__init__.py 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. from urllib.parse import quote
  2. from time import time
  3. from datetime import datetime
  4. from queue import Queue, Empty
  5. from threading import Thread
  6. from re import findall
  7. from curl_cffi.requests import post
  8. class PhindResponse:
  9. class Completion:
  10. class Choices:
  11. def __init__(self, choice: dict) -> None:
  12. self.text = choice['text']
  13. self.content = self.text.encode()
  14. self.index = choice['index']
  15. self.logprobs = choice['logprobs']
  16. self.finish_reason = choice['finish_reason']
  17. def __repr__(self) -> str:
  18. return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
  19. def __init__(self, choices: dict) -> None:
  20. self.choices = [self.Choices(choice) for choice in choices]
  21. class Usage:
  22. def __init__(self, usage_dict: dict) -> None:
  23. self.prompt_tokens = usage_dict['prompt_tokens']
  24. self.completion_tokens = usage_dict['completion_tokens']
  25. self.total_tokens = usage_dict['total_tokens']
  26. def __repr__(self):
  27. return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
  28. def __init__(self, response_dict: dict) -> None:
  29. self.response_dict = response_dict
  30. self.id = response_dict['id']
  31. self.object = response_dict['object']
  32. self.created = response_dict['created']
  33. self.model = response_dict['model']
  34. self.completion = self.Completion(response_dict['choices'])
  35. self.usage = self.Usage(response_dict['usage'])
  36. def json(self) -> dict:
  37. return self.response_dict
  38. class Search:
  39. def create(prompt: str, actualSearch: bool = True, language: str = 'en') -> dict: # None = no search
  40. if not actualSearch:
  41. return {
  42. '_type': 'SearchResponse',
  43. 'queryContext': {
  44. 'originalQuery': prompt
  45. },
  46. 'webPages': {
  47. 'webSearchUrl': f'https://www.bing.com/search?q={quote(prompt)}',
  48. 'totalEstimatedMatches': 0,
  49. 'value': []
  50. },
  51. 'rankingResponse': {
  52. 'mainline': {
  53. 'items': []
  54. }
  55. }
  56. }
  57. headers = {
  58. 'authority' : 'www.phind.com',
  59. 'origin' : 'https://www.phind.com',
  60. 'referer' : 'https://www.phind.com/search',
  61. 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',
  62. }
  63. return post('https://www.phind.com/api/bing/search', headers = headers, json = {
  64. 'q': prompt,
  65. 'userRankList': {},
  66. 'browserLanguage': language}).json()['rawBingResults']
  67. class Completion:
  68. def create(
  69. model = 'gpt-4',
  70. prompt: str = '',
  71. results: dict = None,
  72. creative: bool = False,
  73. detailed: bool = False,
  74. codeContext: str = '',
  75. language: str = 'en') -> PhindResponse:
  76. if results is None:
  77. results = Search.create(prompt, actualSearch = True)
  78. if len(codeContext) > 2999:
  79. raise ValueError('codeContext must be less than 3000 characters')
  80. models = {
  81. 'gpt-4' : 'expert',
  82. 'gpt-3.5-turbo' : 'intermediate',
  83. 'gpt-3.5': 'intermediate',
  84. }
  85. json_data = {
  86. 'question' : prompt,
  87. 'bingResults' : results, #response.json()['rawBingResults'],
  88. 'codeContext' : codeContext,
  89. 'options': {
  90. 'skill' : models[model],
  91. 'date' : datetime.now().strftime("%d/%m/%Y"),
  92. 'language': language,
  93. 'detailed': detailed,
  94. 'creative': creative
  95. }
  96. }
  97. headers = {
  98. 'authority' : 'www.phind.com',
  99. 'origin' : 'https://www.phind.com',
  100. 'referer' : f'https://www.phind.com/search?q={quote(prompt)}&c=&source=searchbox&init=true',
  101. 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',
  102. }
  103. completion = ''
  104. response = post('https://www.phind.com/api/infer/answer', headers = headers, json = json_data, timeout=99999)
  105. for line in response.text.split('\r\n\r\n'):
  106. completion += (line.replace('data: ', ''))
  107. return PhindResponse({
  108. 'id' : f'cmpl-1337-{int(time())}',
  109. 'object' : 'text_completion',
  110. 'created': int(time()),
  111. 'model' : models[model],
  112. 'choices': [{
  113. 'text' : completion,
  114. 'index' : 0,
  115. 'logprobs' : None,
  116. 'finish_reason' : 'stop'
  117. }],
  118. 'usage': {
  119. 'prompt_tokens' : len(prompt),
  120. 'completion_tokens' : len(completion),
  121. 'total_tokens' : len(prompt) + len(completion)
  122. }
  123. })
  124. class StreamingCompletion:
  125. message_queue = Queue()
  126. stream_completed = False
  127. def request(model, prompt, results, creative, detailed, codeContext, language) -> None:
  128. models = {
  129. 'gpt-4' : 'expert',
  130. 'gpt-3.5-turbo' : 'intermediate',
  131. 'gpt-3.5': 'intermediate',
  132. }
  133. json_data = {
  134. 'question' : prompt,
  135. 'bingResults' : results,
  136. 'codeContext' : codeContext,
  137. 'options': {
  138. 'skill' : models[model],
  139. 'date' : datetime.now().strftime("%d/%m/%Y"),
  140. 'language': language,
  141. 'detailed': detailed,
  142. 'creative': creative
  143. }
  144. }
  145. stream_req = post('https://www.phind.com/api/infer/answer', json=json_data, timeout=99999,
  146. content_callback = StreamingCompletion.handle_stream_response,
  147. headers = {
  148. 'authority' : 'www.phind.com',
  149. 'origin' : 'https://www.phind.com',
  150. 'referer' : f'https://www.phind.com/search?q={quote(prompt)}&c=&source=searchbox&init=true',
  151. 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',
  152. })
  153. StreamingCompletion.stream_completed = True
  154. @staticmethod
  155. def create(
  156. model : str = 'gpt-4',
  157. prompt : str = '',
  158. results : dict = None,
  159. creative : bool = False,
  160. detailed : bool = False,
  161. codeContext : str = '',
  162. language : str = 'en'):
  163. if results is None:
  164. results = Search.create(prompt, actualSearch = True)
  165. if len(codeContext) > 2999:
  166. raise ValueError('codeContext must be less than 3000 characters')
  167. Thread(target = StreamingCompletion.request, args = [
  168. model, prompt, results, creative, detailed, codeContext, language]).start()
  169. while StreamingCompletion.stream_completed != True or not StreamingCompletion.message_queue.empty():
  170. try:
  171. chunk = StreamingCompletion.message_queue.get(timeout=0)
  172. if chunk == b'data: \r\ndata: \r\ndata: \r\n\r\n':
  173. chunk = b'data: \n\n\r\n\r\n'
  174. chunk = chunk.decode()
  175. chunk = chunk.replace('data: \r\n\r\ndata: ', 'data: \n')
  176. chunk = chunk.replace('\r\ndata: \r\ndata: \r\n\r\n', '\n\n\r\n\r\n')
  177. chunk = chunk.replace('data: ', '').replace('\r\n\r\n', '')
  178. yield PhindResponse({
  179. 'id' : f'cmpl-1337-{int(time())}',
  180. 'object' : 'text_completion',
  181. 'created': int(time()),
  182. 'model' : model,
  183. 'choices': [{
  184. 'text' : chunk,
  185. 'index' : 0,
  186. 'logprobs' : None,
  187. 'finish_reason' : 'stop'
  188. }],
  189. 'usage': {
  190. 'prompt_tokens' : len(prompt),
  191. 'completion_tokens' : len(chunk),
  192. 'total_tokens' : len(prompt) + len(chunk)
  193. }
  194. })
  195. except Empty:
  196. pass
  197. @staticmethod
  198. def handle_stream_response(response):
  199. StreamingCompletion.message_queue.put(response)
粤ICP备19079148号