Abhaykoul commited on
Commit
5021f5b
·
verified ·
1 Parent(s): 9e7090f

Delete webscout/Provider

Browse files
Files changed (43) hide show
  1. webscout/Provider/BasedGPT.py +0 -226
  2. webscout/Provider/Berlin4h.py +0 -211
  3. webscout/Provider/Blackboxai.py +0 -440
  4. webscout/Provider/ChatGPTUK.py +0 -214
  5. webscout/Provider/Cohere.py +0 -223
  6. webscout/Provider/Gemini.py +0 -217
  7. webscout/Provider/Groq.py +0 -512
  8. webscout/Provider/Koboldai.py +0 -402
  9. webscout/Provider/Leo.py +0 -469
  10. webscout/Provider/Llama2.py +0 -437
  11. webscout/Provider/OpenGPT.py +0 -487
  12. webscout/Provider/Openai.py +0 -511
  13. webscout/Provider/Perplexity.py +0 -230
  14. webscout/Provider/Phind.py +0 -518
  15. webscout/Provider/Poe.py +0 -208
  16. webscout/Provider/Reka.py +0 -226
  17. webscout/Provider/ThinkAnyAI.py +0 -280
  18. webscout/Provider/Xjai.py +0 -230
  19. webscout/Provider/Yepchat.py +0 -478
  20. webscout/Provider/Youchat.py +0 -221
  21. webscout/Provider/__init__.py +0 -61
  22. webscout/Provider/__pycache__/BasedGPT.cpython-311.pyc +0 -0
  23. webscout/Provider/__pycache__/Berlin4h.cpython-311.pyc +0 -0
  24. webscout/Provider/__pycache__/Blackboxai.cpython-311.pyc +0 -0
  25. webscout/Provider/__pycache__/ChatGPTUK.cpython-311.pyc +0 -0
  26. webscout/Provider/__pycache__/ChatGPTlogin.cpython-311.pyc +0 -0
  27. webscout/Provider/__pycache__/Cohere.cpython-311.pyc +0 -0
  28. webscout/Provider/__pycache__/Gemini.cpython-311.pyc +0 -0
  29. webscout/Provider/__pycache__/Groq.cpython-311.pyc +0 -0
  30. webscout/Provider/__pycache__/Koboldai.cpython-311.pyc +0 -0
  31. webscout/Provider/__pycache__/Leo.cpython-311.pyc +0 -0
  32. webscout/Provider/__pycache__/Llama2.cpython-311.pyc +0 -0
  33. webscout/Provider/__pycache__/OpenGPT.cpython-311.pyc +0 -0
  34. webscout/Provider/__pycache__/Openai.cpython-311.pyc +0 -0
  35. webscout/Provider/__pycache__/Perplexity.cpython-311.pyc +0 -0
  36. webscout/Provider/__pycache__/Phind.cpython-311.pyc +0 -0
  37. webscout/Provider/__pycache__/Poe.cpython-311.pyc +0 -0
  38. webscout/Provider/__pycache__/Reka.cpython-311.pyc +0 -0
  39. webscout/Provider/__pycache__/ThinkAnyAI.cpython-311.pyc +0 -0
  40. webscout/Provider/__pycache__/Xjai.cpython-311.pyc +0 -0
  41. webscout/Provider/__pycache__/Yepchat.cpython-311.pyc +0 -0
  42. webscout/Provider/__pycache__/Youchat.cpython-311.pyc +0 -0
  43. webscout/Provider/__pycache__/__init__.cpython-311.pyc +0 -0
webscout/Provider/BasedGPT.py DELETED
@@ -1,226 +0,0 @@
1
- import time
2
- import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
9
- import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
- import json
21
- import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
26
- from webscout import exceptions
27
- from typing import Any, AsyncGenerator, Dict
28
- import logging
29
- import httpx
30
-
31
- class BasedGPT(Provider):
32
- def __init__(
33
- self,
34
- is_conversation: bool = True,
35
- max_tokens: int = 600,
36
- timeout: int = 30,
37
- intro: str = None,
38
- filepath: str = None,
39
- update_file: bool = True,
40
- proxies: dict = {},
41
- history_offset: int = 10250,
42
- act: str = None,
43
- system_prompt: str = "Be Helpful and Friendly",
44
- ):
45
- """Instantiates BasedGPT
46
-
47
- Args:
48
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
49
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
50
- timeout (int, optional): Http request timeout. Defaults to 30.
51
- intro (str, optional): Conversation introductory prompt. Defaults to None.
52
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
53
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
54
- proxies (dict, optional): Http request proxies. Defaults to {}.
55
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
56
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
57
- system_prompt (str, optional): System prompt for BasedGPT. Defaults to "Be Helpful and Friendly".
58
- """
59
- self.session = requests.Session()
60
- self.is_conversation = is_conversation
61
- self.max_tokens_to_sample = max_tokens
62
- self.chat_endpoint = "https://www.basedgpt.chat/api/chat"
63
- self.stream_chunk_size = 64
64
- self.timeout = timeout
65
- self.last_response = {}
66
- self.system_prompt = system_prompt
67
-
68
- self.__available_optimizers = (
69
- method
70
- for method in dir(Optimizers)
71
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
72
- )
73
- self.session.headers.update(
74
- {"Content-Type": "application/json"}
75
- )
76
- Conversation.intro = (
77
- AwesomePrompts().get_act(
78
- act, raise_not_found=True, default=None, case_insensitive=True
79
- )
80
- if act
81
- else intro or Conversation.intro
82
- )
83
- self.conversation = Conversation(
84
- is_conversation, self.max_tokens_to_sample, filepath, update_file
85
- )
86
- self.conversation.history_offset = history_offset
87
- self.session.proxies = proxies
88
-
89
- def ask(
90
- self,
91
- prompt: str,
92
- stream: bool = False,
93
- raw: bool = False,
94
- optimizer: str = None,
95
- conversationally: bool = False,
96
- ) -> dict:
97
- """Chat with AI
98
-
99
- Args:
100
- prompt (str): Prompt to be send.
101
- stream (bool, optional): Flag for streaming response. Defaults to False.
102
- raw (bool, optional): Stream back raw response as received. Defaults to False.
103
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
104
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
105
- Returns:
106
- dict : {}
107
- ```json
108
- {
109
- "id": "chatcmpl-TaREJpBZsRVQFRFic1wIA7Q7XfnaD",
110
- "object": "chat.completion",
111
- "created": 1704623244,
112
- "model": "gpt-3.5-turbo",
113
- "usage": {
114
- "prompt_tokens": 0,
115
- "completion_tokens": 0,
116
- "total_tokens": 0
117
- },
118
- "choices": [
119
- {
120
- "message": {
121
- "role": "assistant",
122
- "content": "Hello! How can I assist you today?"
123
- },
124
- "finish_reason": "stop",
125
- "index": 0
126
- }
127
- ]
128
- }
129
- ```
130
- """
131
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
132
- if optimizer:
133
- if optimizer in self.__available_optimizers:
134
- conversation_prompt = getattr(Optimizers, optimizer)(
135
- conversation_prompt if conversationally else prompt
136
- )
137
- else:
138
- raise Exception(
139
- f"Optimizer is not one of {self.__available_optimizers}"
140
- )
141
-
142
- payload = {
143
- "messages": [
144
- {"role": "system", "content": self.system_prompt},
145
- {"role": "user", "content": conversation_prompt},
146
- ],
147
- }
148
-
149
- def for_stream():
150
- response = self.session.post(
151
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
152
- )
153
- if not response.ok:
154
- raise exceptions.FailedToGenerateResponseError(
155
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
156
- )
157
-
158
- message_load = ""
159
- for value in response.iter_lines(
160
- decode_unicode=True,
161
- delimiter="",
162
- chunk_size=self.stream_chunk_size,
163
- ):
164
- try:
165
- message_load += value
166
- yield value if raw else dict(text=message_load)
167
- except json.decoder.JSONDecodeError:
168
- pass
169
- self.last_response.update(dict(text=message_load))
170
- self.conversation.update_chat_history(
171
- prompt, self.get_message(self.last_response)
172
- )
173
-
174
- def for_non_stream():
175
- for _ in for_stream():
176
- pass
177
- return self.last_response
178
-
179
- return for_stream() if stream else for_non_stream()
180
-
181
- def chat(
182
- self,
183
- prompt: str,
184
- stream: bool = False,
185
- optimizer: str = None,
186
- conversationally: bool = False,
187
- ) -> str:
188
- """Generate response `str`
189
- Args:
190
- prompt (str): Prompt to be send.
191
- stream (bool, optional): Flag for streaming response. Defaults to False.
192
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
193
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
194
- Returns:
195
- str: Response generated
196
- """
197
-
198
- def for_stream():
199
- for response in self.ask(
200
- prompt, True, optimizer=optimizer, conversationally=conversationally
201
- ):
202
- yield self.get_message(response)
203
-
204
- def for_non_stream():
205
- return self.get_message(
206
- self.ask(
207
- prompt,
208
- False,
209
- optimizer=optimizer,
210
- conversationally=conversationally,
211
- )
212
- )
213
-
214
- return for_stream() if stream else for_non_stream()
215
-
216
- def get_message(self, response: dict) -> str:
217
- """Retrieves message only from response
218
-
219
- Args:
220
- response (dict): Response generated by `self.ask`
221
-
222
- Returns:
223
- str: Message extracted
224
- """
225
- assert isinstance(response, dict), "Response should be of dict data-type only"
226
- return response["text"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
webscout/Provider/Berlin4h.py DELETED
@@ -1,211 +0,0 @@
1
- import requests
2
- import json
3
- import uuid
4
- from typing import Any, Dict, Optional
5
- from ..AIutel import Optimizers
6
- from ..AIutel import Conversation
7
- from ..AIutel import AwesomePrompts, sanitize_stream
8
- from ..AIbase import Provider, AsyncProvider
9
- from webscout import exceptions
10
-
11
- class Berlin4h(Provider):
12
- """
13
- A class to interact with the Berlin4h AI API.
14
- """
15
-
16
- def __init__(
17
- self,
18
- api_token: str = "3bf369cd84339603f8a5361e964f9ebe",
19
- api_endpoint: str = "https://ai.berlin4h.top/api/chat/completions",
20
- model: str = "gpt-3.5-turbo",
21
- temperature: float = 0.9,
22
- presence_penalty: float = 0,
23
- frequency_penalty: float = 0,
24
- max_tokens: int = 4000,
25
- is_conversation: bool = True,
26
- timeout: int = 30,
27
- intro: str = None,
28
- filepath: str = None,
29
- update_file: bool = True,
30
- proxies: dict = {},
31
- history_offset: int = 10250,
32
- act: str = None,
33
- ) -> None:
34
- """
35
- Initializes the Berlin4h API with given parameters.
36
-
37
- Args:
38
- api_token (str): The API token for authentication.
39
- api_endpoint (str): The API endpoint to use for requests.
40
- model (str): The AI model to use for text generation.
41
- temperature (float): The temperature parameter for the model.
42
- presence_penalty (float): The presence penalty parameter for the model.
43
- frequency_penalty (float): The frequency penalty parameter for the model.
44
- max_tokens (int): The maximum number of tokens to generate.
45
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
46
- timeout (int, optional): Http request timeout. Defaults to 30.
47
- intro (str, optional): Conversation introductory prompt. Defaults to None.
48
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
49
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
50
- proxies (dict, optional): Http request proxies. Defaults to {}.
51
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
52
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
53
- """
54
- self.api_token = api_token
55
- self.api_endpoint = api_endpoint
56
- self.model = model
57
- self.temperature = temperature
58
- self.presence_penalty = presence_penalty
59
- self.frequency_penalty = frequency_penalty
60
- self.max_tokens = max_tokens
61
- self.parent_message_id: Optional[str] = None
62
- self.session = requests.Session()
63
- self.is_conversation = is_conversation
64
- self.max_tokens_to_sample = max_tokens
65
- self.stream_chunk_size = 1
66
- self.timeout = timeout
67
- self.last_response = {}
68
- self.headers = {"Content-Type": "application/json", "Token": self.api_token}
69
- self.__available_optimizers = (
70
- method
71
- for method in dir(Optimizers)
72
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
73
- )
74
- self.session.headers.update(self.headers)
75
- Conversation.intro = (
76
- AwesomePrompts().get_act(
77
- act, raise_not_found=True, default=None, case_insensitive=True
78
- )
79
- if act
80
- else intro or Conversation.intro
81
- )
82
- self.conversation = Conversation(
83
- is_conversation, self.max_tokens_to_sample, filepath, update_file
84
- )
85
- self.conversation.history_offset = history_offset
86
- self.session.proxies = proxies
87
-
88
- def ask(
89
- self,
90
- prompt: str,
91
- stream: bool = False,
92
- raw: bool = False,
93
- optimizer: str = None,
94
- conversationally: bool = False,
95
- ) -> Dict[str, Any]:
96
- """
97
- Sends a prompt to the Berlin4h AI API and returns the response.
98
-
99
- Args:
100
- prompt: The text prompt to generate text from.
101
- stream (bool, optional): Whether to stream the response. Defaults to False.
102
- raw (bool, optional): Whether to return the raw response. Defaults to False.
103
- optimizer (str, optional): The name of the optimizer to use. Defaults to None.
104
- conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
105
-
106
- Returns:
107
- The response from the API.
108
- """
109
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
110
- if optimizer:
111
- if optimizer in self.__available_optimizers:
112
- conversation_prompt = getattr(Optimizers, optimizer)(
113
- conversation_prompt if conversationally else prompt
114
- )
115
- else:
116
- raise Exception(
117
- f"Optimizer is not one of {self.__available_optimizers}"
118
- )
119
-
120
- payload: Dict[str, any] = {
121
- "prompt": conversation_prompt,
122
- "parentMessageId": self.parent_message_id or str(uuid.uuid4()),
123
- "options": {
124
- "model": self.model,
125
- "temperature": self.temperature,
126
- "presence_penalty": self.presence_penalty,
127
- "frequency_penalty": self.frequency_penalty,
128
- "max_tokens": self.max_tokens,
129
- },
130
- }
131
-
132
- def for_stream():
133
- response = self.session.post(
134
- self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
135
- )
136
-
137
- if not response.ok:
138
- raise exceptions.FailedToGenerateResponseError(
139
- f"Failed to generate response - ({response.status_code}, {response.reason})"
140
- )
141
-
142
- streaming_response = ""
143
- # Collect the entire line before processing
144
- for line in response.iter_lines(decode_unicode=True):
145
- if line:
146
- try:
147
- json_data = json.loads(line)
148
- content = json_data['content']
149
- if ">" in content: break
150
- streaming_response += content
151
- yield content if raw else dict(text=streaming_response) # Yield accumulated response
152
- except:
153
- continue
154
- self.last_response.update(dict(text=streaming_response))
155
- self.conversation.update_chat_history(
156
- prompt, self.get_message(self.last_response)
157
- )
158
-
159
- def for_non_stream():
160
- for _ in for_stream():
161
- pass
162
- return self.last_response
163
-
164
- return for_stream() if stream else for_non_stream()
165
-
166
- def chat(
167
- self,
168
- prompt: str,
169
- stream: bool = False,
170
- optimizer: str = None,
171
- conversationally: bool = False,
172
- ) -> str:
173
- """Generate response `str`
174
- Args:
175
- prompt (str): Prompt to be send.
176
- stream (bool, optional): Flag for streaming response. Defaults to False.
177
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
178
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
179
- Returns:
180
- str: Response generated
181
- """
182
-
183
- def for_stream():
184
- for response in self.ask(
185
- prompt, True, optimizer=optimizer, conversationally=conversationally
186
- ):
187
- yield self.get_message(response)
188
-
189
- def for_non_stream():
190
- return self.get_message(
191
- self.ask(
192
- prompt,
193
- False,
194
- optimizer=optimizer,
195
- conversationally=conversationally,
196
- )
197
- )
198
-
199
- return for_stream() if stream else for_non_stream()
200
-
201
- def get_message(self, response: dict) -> str:
202
- """Retrieves message only from response
203
-
204
- Args:
205
- response (dict): Response generated by `self.ask`
206
-
207
- Returns:
208
- str: Message extracted
209
- """
210
- assert isinstance(response, dict), "Response should be of dict data-type only"
211
- return response["text"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
webscout/Provider/Blackboxai.py DELETED
@@ -1,440 +0,0 @@
1
- import time
2
- import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
9
- import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
- import json
21
- import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
26
- from Helpingai_T2 import Perplexity
27
- from webscout import exceptions
28
- from typing import Any, AsyncGenerator, Dict
29
- import logging
30
- import httpx
31
-
32
- #------------------------------------------------------BLACKBOXAI--------------------------------------------------------
33
- class BLACKBOXAI:
34
- def __init__(
35
- self,
36
- is_conversation: bool = True,
37
- max_tokens: int = 8000,
38
- timeout: int = 30,
39
- intro: str = None,
40
- filepath: str = None,
41
- update_file: bool = True,
42
- proxies: dict = {},
43
- history_offset: int = 10250,
44
- act: str = None,
45
- model: str = None,
46
- ):
47
- """Instantiates BLACKBOXAI
48
-
49
- Args:
50
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
51
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
52
- timeout (int, optional): Http request timeout. Defaults to 30.
53
- intro (str, optional): Conversation introductory prompt. Defaults to None.
54
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
55
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
56
- proxies (dict, optional): Http request proxies. Defaults to {}.
57
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
58
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
59
- model (str, optional): Model name. Defaults to "Phind Model".
60
- """
61
- self.session = requests.Session()
62
- self.max_tokens_to_sample = max_tokens
63
- self.is_conversation = is_conversation
64
- self.chat_endpoint = "https://www.blackbox.ai/api/chat"
65
- self.stream_chunk_size = 64
66
- self.timeout = timeout
67
- self.last_response = {}
68
- self.model = model
69
- self.previewToken: str = None
70
- self.userId: str = ""
71
- self.codeModelMode: bool = True
72
- self.id: str = ""
73
- self.agentMode: dict = {}
74
- self.trendingAgentMode: dict = {}
75
- self.isMicMode: bool = False
76
-
77
- self.headers = {
78
- "Content-Type": "application/json",
79
- "User-Agent": "",
80
- "Accept": "*/*",
81
- "Accept-Encoding": "Identity",
82
- }
83
-
84
- self.__available_optimizers = (
85
- method
86
- for method in dir(Optimizers)
87
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
88
- )
89
- self.session.headers.update(self.headers)
90
- Conversation.intro = (
91
- AwesomePrompts().get_act(
92
- act, raise_not_found=True, default=None, case_insensitive=True
93
- )
94
- if act
95
- else intro or Conversation.intro
96
- )
97
- self.conversation = Conversation(
98
- is_conversation, self.max_tokens_to_sample, filepath, update_file
99
- )
100
- self.conversation.history_offset = history_offset
101
- self.session.proxies = proxies
102
-
103
- def ask(
104
- self,
105
- prompt: str,
106
- stream: bool = False,
107
- raw: bool = False,
108
- optimizer: str = None,
109
- conversationally: bool = False,
110
- ) -> dict:
111
- """Chat with AI
112
-
113
- Args:
114
- prompt (str): Prompt to be send.
115
- stream (bool, optional): Flag for streaming response. Defaults to False.
116
- raw (bool, optional): Stream back raw response as received. Defaults to False.
117
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
118
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
119
- Returns:
120
- dict : {}
121
- ```json
122
- {
123
- "text" : "print('How may I help you today?')"
124
- }
125
- ```
126
- """
127
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
128
- if optimizer:
129
- if optimizer in self.__available_optimizers:
130
- conversation_prompt = getattr(Optimizers, optimizer)(
131
- conversation_prompt if conversationally else prompt
132
- )
133
- else:
134
- raise Exception(
135
- f"Optimizer is not one of {self.__available_optimizers}"
136
- )
137
-
138
- self.session.headers.update(self.headers)
139
- payload = {
140
- "messages": [
141
- # json.loads(prev_messages),
142
- {"content": conversation_prompt, "role": "user"}
143
- ],
144
- "id": self.id,
145
- "previewToken": self.previewToken,
146
- "userId": self.userId,
147
- "codeModelMode": self.codeModelMode,
148
- "agentMode": self.agentMode,
149
- "trendingAgentMode": self.trendingAgentMode,
150
- "isMicMode": self.isMicMode,
151
- }
152
-
153
- def for_stream():
154
- response = self.session.post(
155
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
156
- )
157
- if (
158
- not response.ok
159
- or not response.headers.get("Content-Type")
160
- == "text/plain; charset=utf-8"
161
- ):
162
- raise Exception(
163
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
164
- )
165
- streaming_text = ""
166
- for value in response.iter_lines(
167
- decode_unicode=True,
168
- chunk_size=self.stream_chunk_size,
169
- delimiter="\n",
170
- ):
171
- try:
172
- if bool(value):
173
- streaming_text += value + ("\n" if stream else "")
174
-
175
- resp = dict(text=streaming_text)
176
- self.last_response.update(resp)
177
- yield value if raw else resp
178
- except json.decoder.JSONDecodeError:
179
- pass
180
- self.conversation.update_chat_history(
181
- prompt, self.get_message(self.last_response)
182
- )
183
-
184
- def for_non_stream():
185
- for _ in for_stream():
186
- pass
187
- return self.last_response
188
-
189
- return for_stream() if stream else for_non_stream()
190
-
191
- def chat(
192
- self,
193
- prompt: str,
194
- stream: bool = False,
195
- optimizer: str = None,
196
- conversationally: bool = False,
197
- ) -> str:
198
- """Generate response `str`
199
- Args:
200
- prompt (str): Prompt to be send.
201
- stream (bool, optional): Flag for streaming response. Defaults to False.
202
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
203
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
204
- Returns:
205
- str: Response generated
206
- """
207
-
208
- def for_stream():
209
- for response in self.ask(
210
- prompt, True, optimizer=optimizer, conversationally=conversationally
211
- ):
212
- yield self.get_message(response)
213
-
214
- def for_non_stream():
215
- return self.get_message(
216
- self.ask(
217
- prompt,
218
- False,
219
- optimizer=optimizer,
220
- conversationally=conversationally,
221
- )
222
- )
223
-
224
- return for_stream() if stream else for_non_stream()
225
-
226
- def get_message(self, response: dict) -> str:
227
- """Retrieves message only from response
228
-
229
- Args:
230
- response (dict): Response generated by `self.ask`
231
-
232
- Returns:
233
- str: Message extracted
234
- """
235
- assert isinstance(response, dict), "Response should be of dict data-type only"
236
- return response["text"]
237
- @staticmethod
238
- def chat_cli(prompt):
239
- """Sends a request to the BLACKBOXAI API and processes the response."""
240
- blackbox_ai = BLACKBOXAI() # Initialize a BLACKBOXAI instance
241
- response = blackbox_ai.ask(prompt) # Perform a chat with the given prompt
242
- processed_response = blackbox_ai.get_message(response) # Process the response
243
- print(processed_response)
244
- class AsyncBLACKBOXAI(AsyncProvider):
245
- def __init__(
246
- self,
247
- is_conversation: bool = True,
248
- max_tokens: int = 600,
249
- timeout: int = 30,
250
- intro: str = None,
251
- filepath: str = None,
252
- update_file: bool = True,
253
- proxies: dict = {},
254
- history_offset: int = 10250,
255
- act: str = None,
256
- model: str = None,
257
- ):
258
- """Instantiates BLACKBOXAI
259
-
260
- Args:
261
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
262
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
263
- timeout (int, optional): Http request timeout. Defaults to 30.
264
- intro (str, optional): Conversation introductory prompt. Defaults to None.
265
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
266
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
267
- proxies (dict, optional): Http request proxies. Defaults to {}.
268
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
269
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
270
- model (str, optional): Model name. Defaults to "Phind Model".
271
- """
272
- self.max_tokens_to_sample = max_tokens
273
- self.is_conversation = is_conversation
274
- self.chat_endpoint = "https://www.blackbox.ai/api/chat"
275
- self.stream_chunk_size = 64
276
- self.timeout = timeout
277
- self.last_response = {}
278
- self.model = model
279
- self.previewToken: str = None
280
- self.userId: str = ""
281
- self.codeModelMode: bool = True
282
- self.id: str = ""
283
- self.agentMode: dict = {}
284
- self.trendingAgentMode: dict = {}
285
- self.isMicMode: bool = False
286
-
287
- self.headers = {
288
- "Content-Type": "application/json",
289
- "User-Agent": "",
290
- "Accept": "*/*",
291
- "Accept-Encoding": "Identity",
292
- }
293
-
294
- self.__available_optimizers = (
295
- method
296
- for method in dir(Optimizers)
297
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
298
- )
299
- Conversation.intro = (
300
- AwesomePrompts().get_act(
301
- act, raise_not_found=True, default=None, case_insensitive=True
302
- )
303
- if act
304
- else intro or Conversation.intro
305
- )
306
- self.conversation = Conversation(
307
- is_conversation, self.max_tokens_to_sample, filepath, update_file
308
- )
309
- self.conversation.history_offset = history_offset
310
- self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies)
311
-
312
- async def ask(
313
- self,
314
- prompt: str,
315
- stream: bool = False,
316
- raw: bool = False,
317
- optimizer: str = None,
318
- conversationally: bool = False,
319
- ) -> dict | AsyncGenerator:
320
- """Chat with AI asynchronously.
321
-
322
- Args:
323
- prompt (str): Prompt to be send.
324
- stream (bool, optional): Flag for streaming response. Defaults to False.
325
- raw (bool, optional): Stream back raw response as received. Defaults to False.
326
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
327
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
328
- Returns:
329
- dict|AsyncGenerator : ai content
330
- ```json
331
- {
332
- "text" : "print('How may I help you today?')"
333
- }
334
- ```
335
- """
336
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
337
- if optimizer:
338
- if optimizer in self.__available_optimizers:
339
- conversation_prompt = getattr(Optimizers, optimizer)(
340
- conversation_prompt if conversationally else prompt
341
- )
342
- else:
343
- raise Exception(
344
- f"Optimizer is not one of {self.__available_optimizers}"
345
- )
346
-
347
- payload = {
348
- "messages": [
349
- # json.loads(prev_messages),
350
- {"content": conversation_prompt, "role": "user"}
351
- ],
352
- "id": self.id,
353
- "previewToken": self.previewToken,
354
- "userId": self.userId,
355
- "codeModelMode": self.codeModelMode,
356
- "agentMode": self.agentMode,
357
- "trendingAgentMode": self.trendingAgentMode,
358
- "isMicMode": self.isMicMode,
359
- }
360
-
361
- async def for_stream():
362
- async with self.session.stream(
363
- "POST", self.chat_endpoint, json=payload, timeout=self.timeout
364
- ) as response:
365
- if (
366
- not response.is_success
367
- or not response.headers.get("Content-Type")
368
- == "text/plain; charset=utf-8"
369
- ):
370
- raise exceptions.FailedToGenerateResponseError(
371
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
372
- )
373
- streaming_text = ""
374
- async for value in response.aiter_lines():
375
- try:
376
- if bool(value):
377
- streaming_text += value + ("\n" if stream else "")
378
- resp = dict(text=streaming_text)
379
- self.last_response.update(resp)
380
- yield value if raw else resp
381
- except json.decoder.JSONDecodeError:
382
- pass
383
- self.conversation.update_chat_history(
384
- prompt, await self.get_message(self.last_response)
385
- )
386
-
387
- async def for_non_stream():
388
- async for _ in for_stream():
389
- pass
390
- return self.last_response
391
-
392
- return for_stream() if stream else await for_non_stream()
393
-
394
- async def chat(
395
- self,
396
- prompt: str,
397
- stream: bool = False,
398
- optimizer: str = None,
399
- conversationally: bool = False,
400
- ) -> str | AsyncGenerator:
401
- """Generate response `str` asynchronously.
402
- Args:
403
- prompt (str): Prompt to be send.
404
- stream (bool, optional): Flag for streaming response. Defaults to False.
405
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
406
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
407
- Returns:
408
- str|AsyncGenerator: Response generated
409
- """
410
-
411
- async def for_stream():
412
- async_ask = await self.ask(
413
- prompt, True, optimizer=optimizer, conversationally=conversationally
414
- )
415
- async for response in async_ask:
416
- yield await self.get_message(response)
417
-
418
- async def for_non_stream():
419
- return await self.get_message(
420
- await self.ask(
421
- prompt,
422
- False,
423
- optimizer=optimizer,
424
- conversationally=conversationally,
425
- )
426
- )
427
-
428
- return for_stream() if stream else await for_non_stream()
429
-
430
- async def get_message(self, response: dict) -> str:
431
- """Retrieves message only from response
432
-
433
- Args:
434
- response (dict): Response generated by `self.ask`
435
-
436
- Returns:
437
- str: Message extracted
438
- """
439
- assert isinstance(response, dict), "Response should be of dict data-type only"
440
- return response["text"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
webscout/Provider/ChatGPTUK.py DELETED
@@ -1,214 +0,0 @@
1
- import requests
2
- from typing import Any, AsyncGenerator, Dict, Optional
3
- import json
4
- import re
5
-
6
- from ..AIutel import Optimizers
7
- from ..AIutel import Conversation
8
- from ..AIutel import AwesomePrompts, sanitize_stream
9
- from ..AIbase import Provider, AsyncProvider
10
- from webscout import exceptions
11
-
12
-
13
- class ChatGPTUK(Provider):
14
- """
15
- A class to interact with the ChatGPT UK API.
16
- """
17
-
18
- def __init__(
19
- self,
20
- is_conversation: bool = True,
21
- max_tokens: int = 600,
22
- temperature: float = 0.9,
23
- presence_penalty: float = 0,
24
- frequency_penalty: float = 0,
25
- top_p: float = 1,
26
- model: str = "google-gemini-pro",
27
- timeout: int = 30,
28
- intro: str = None,
29
- filepath: str = None,
30
- update_file: bool = True,
31
- proxies: dict = {},
32
- history_offset: int = 10250,
33
- act: str = None,
34
- ) -> None:
35
- """
36
- Initializes the ChatGPTUK API with given parameters.
37
-
38
- Args:
39
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
40
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
41
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.9.
42
- presence_penalty (float, optional): Chances of topic being repeated. Defaults to 0.
43
- frequency_penalty (float, optional): Chances of word being repeated. Defaults to 0.
44
- top_p (float, optional): Sampling threshold during inference time. Defaults to 1.
45
- model (str, optional): LLM model name. Defaults to "google-gemini-pro".
46
- timeout (int, optional): Http request timeout. Defaults to 30.
47
- intro (str, optional): Conversation introductory prompt. Defaults to None.
48
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
49
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
50
- proxies (dict, optional): Http request proxies. Defaults to {}.
51
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
52
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
53
- """
54
- self.session = requests.Session()
55
- self.is_conversation = is_conversation
56
- self.max_tokens_to_sample = max_tokens
57
- self.api_endpoint = "https://free.chatgpt.org.uk/api/openai/v1/chat/completions"
58
- self.stream_chunk_size = 64
59
- self.timeout = timeout
60
- self.last_response = {}
61
- self.model = model
62
- self.temperature = temperature
63
- self.presence_penalty = presence_penalty
64
- self.frequency_penalty = frequency_penalty
65
- self.top_p = top_p
66
- self.headers = {"Content-Type": "application/json"}
67
-
68
- self.__available_optimizers = (
69
- method
70
- for method in dir(Optimizers)
71
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
72
- )
73
- self.session.headers.update(self.headers)
74
- Conversation.intro = (
75
- AwesomePrompts().get_act(
76
- act, raise_not_found=True, default=None, case_insensitive=True
77
- )
78
- if act
79
- else intro or Conversation.intro
80
- )
81
- self.conversation = Conversation(
82
- is_conversation, self.max_tokens_to_sample, filepath, update_file
83
- )
84
- self.conversation.history_offset = history_offset
85
- self.session.proxies = proxies
86
-
87
- def ask(
88
- self,
89
- prompt: str,
90
- stream: bool = False,
91
- raw: bool = False,
92
- optimizer: str = None,
93
- conversationally: bool = False,
94
- ) -> dict:
95
- """Chat with AI
96
-
97
- Args:
98
- prompt (str): Prompt to be send.
99
- stream (bool, optional): Flag for streaming response. Defaults to False.
100
- raw (bool, optional): Stream back raw response as received. Defaults to False.
101
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
102
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
103
- Returns:
104
- dict : {}
105
- ```json
106
- {
107
- "text" : "How may I assist you today?"
108
- }
109
- ```
110
- """
111
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
112
- if optimizer:
113
- if optimizer in self.__available_optimizers:
114
- conversation_prompt = getattr(Optimizers, optimizer)(
115
- conversation_prompt if conversationally else prompt
116
- )
117
- else:
118
- raise Exception(
119
- f"Optimizer is not one of {self.__available_optimizers}"
120
- )
121
-
122
- self.session.headers.update(self.headers)
123
- payload = {
124
- "messages": [
125
- {"role": "system", "content": "Keep your responses long and detailed"},
126
- {"role": "user", "content": conversation_prompt}
127
- ],
128
- "stream": True,
129
- "model": self.model,
130
- "temperature": self.temperature,
131
- "presence_penalty": self.presence_penalty,
132
- "frequency_penalty": self.frequency_penalty,
133
- "top_p": self.top_p,
134
- "max_tokens": self.max_tokens_to_sample
135
- }
136
-
137
- def for_stream():
138
- response = self.session.post(
139
- self.api_endpoint, json=payload, stream=True, timeout=self.timeout
140
- )
141
- if not response.ok:
142
- raise exceptions.FailedToGenerateResponseError(
143
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
144
- )
145
-
146
- streaming_response = ""
147
- for line in response.iter_lines(decode_unicode=True, chunk_size=1):
148
- if line:
149
- modified_line = re.sub("data:", "", line)
150
- try:
151
- json_data = json.loads(modified_line)
152
- content = json_data['choices'][0]['delta']['content']
153
- streaming_response += content
154
- yield content if raw else dict(text=streaming_response)
155
- except:
156
- continue
157
- self.last_response.update(dict(text=streaming_response))
158
- self.conversation.update_chat_history(
159
- prompt, self.get_message(self.last_response)
160
- )
161
-
162
- def for_non_stream():
163
- for _ in for_stream():
164
- pass
165
- return self.last_response
166
-
167
- return for_stream() if stream else for_non_stream()
168
-
169
- def chat(
170
- self,
171
- prompt: str,
172
- stream: bool = False,
173
- optimizer: str = None,
174
- conversationally: bool = False,
175
- ) -> str:
176
- """Generate response `str`
177
- Args:
178
- prompt (str): Prompt to be send.
179
- stream (bool, optional): Flag for streaming response. Defaults to False.
180
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
181
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
182
- Returns:
183
- str: Response generated
184
- """
185
-
186
- def for_stream():
187
- for response in self.ask(
188
- prompt, True, optimizer=optimizer, conversationally=conversationally
189
- ):
190
- yield self.get_message(response)
191
-
192
- def for_non_stream():
193
- return self.get_message(
194
- self.ask(
195
- prompt,
196
- False,
197
- optimizer=optimizer,
198
- conversationally=conversationally,
199
- )
200
- )
201
-
202
- return for_stream() if stream else for_non_stream()
203
-
204
- def get_message(self, response: dict) -> str:
205
- """Retrieves message only from response
206
-
207
- Args:
208
- response (dict): Response generated by `self.ask`
209
-
210
- Returns:
211
- str: Message extracted
212
- """
213
- assert isinstance(response, dict), "Response should be of dict data-type only"
214
- return response["text"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
webscout/Provider/Cohere.py DELETED
@@ -1,223 +0,0 @@
1
- import time
2
- import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
9
- import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
- import json
21
- import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
26
- from Helpingai_T2 import Perplexity
27
- from webscout import exceptions
28
- from typing import Any, AsyncGenerator, Dict
29
- import logging
30
- import httpx
31
- #-----------------------------------------------Cohere--------------------------------------------
32
- class Cohere(Provider):
33
- def __init__(
34
- self,
35
- api_key: str,
36
- is_conversation: bool = True,
37
- max_tokens: int = 600,
38
- model: str = "command-r-plus",
39
- temperature: float = 0.7,
40
- system_prompt: str = "You are helpful AI",
41
- timeout: int = 30,
42
- intro: str = None,
43
- filepath: str = None,
44
- update_file: bool = True,
45
- proxies: dict = {},
46
- history_offset: int = 10250,
47
- act: str = None,
48
- top_k: int = -1,
49
- top_p: float = 0.999,
50
- ):
51
- """Initializes Cohere
52
-
53
- Args:
54
- api_key (str): Cohere API key.
55
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
56
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
57
- model (str, optional): Model to use for generating text. Defaults to "command-r-plus".
58
- temperature (float, optional): Diversity of the generated text. Higher values produce more diverse outputs.
59
- Defaults to 0.7.
60
- system_prompt (str, optional): A system_prompt or context to set the style or tone of the generated text.
61
- Defaults to "You are helpful AI".
62
- timeout (int, optional): Http request timeout. Defaults to 30.
63
- intro (str, optional): Conversation introductory prompt. Defaults to None.
64
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
65
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
66
- proxies (dict, optional): Http request proxies. Defaults to {}.
67
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
68
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
69
- """
70
- self.session = requests.Session()
71
- self.is_conversation = is_conversation
72
- self.max_tokens_to_sample = max_tokens
73
- self.api_key = api_key
74
- self.model = model
75
- self.temperature = temperature
76
- self.system_prompt = system_prompt
77
- self.chat_endpoint = "https://production.api.os.cohere.ai/coral/v1/chat"
78
- self.stream_chunk_size = 64
79
- self.timeout = timeout
80
- self.last_response = {}
81
- self.headers = {
82
- "Content-Type": "application/json",
83
- "Authorization": f"Bearer {self.api_key}",
84
- }
85
-
86
- self.__available_optimizers = (
87
- method
88
- for method in dir(Optimizers)
89
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
90
- )
91
- self.session.headers.update(self.headers)
92
- Conversation.intro = (
93
- AwesomePrompts().get_act(
94
- act, raise_not_found=True, default=None, case_insensitive=True
95
- )
96
- if act
97
- else intro or Conversation.intro
98
- )
99
- self.conversation = Conversation(
100
- is_conversation, self.max_tokens_to_sample, filepath, update_file
101
- )
102
- self.conversation.history_offset = history_offset
103
- self.session.proxies = proxies
104
-
105
- def ask(
106
- self,
107
- prompt: str,
108
- stream: bool = False,
109
- raw: bool = False,
110
- optimizer: str = None,
111
- conversationally: bool = False,
112
- ) -> dict:
113
- """Chat with AI
114
-
115
- Args:
116
- prompt (str): Prompt to be send.
117
- stream (bool, optional): Flag for streaming response. Defaults to False.
118
- raw (bool, optional): Stream back raw response as received. Defaults to False.
119
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
120
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
121
- Returns:
122
- dict : {}
123
- ```json
124
- {
125
- "text" : "How may I assist you today?"
126
- }
127
- ```
128
- """
129
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
130
- if optimizer:
131
- if optimizer in self.__available_optimizers:
132
- conversation_prompt = getattr(Optimizers, optimizer)(
133
- conversation_prompt if conversationally else prompt
134
- )
135
- else:
136
- raise Exception(
137
- f"Optimizer is not one of {self.__available_optimizers}"
138
- )
139
- self.session.headers.update(self.headers)
140
- payload = {
141
- "message": conversation_prompt,
142
- "model": self.model,
143
- "temperature": self.temperature,
144
- "preamble": self.system_prompt,
145
- }
146
-
147
- def for_stream():
148
- response = self.session.post(
149
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
150
- )
151
- if not response.ok:
152
- raise Exception(
153
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
154
- )
155
-
156
- for value in response.iter_lines(
157
- decode_unicode=True,
158
- chunk_size=self.stream_chunk_size,
159
- ):
160
- try:
161
- resp = json.loads(value.strip().split("\n")[-1])
162
- self.last_response.update(resp)
163
- yield value if raw else resp
164
- except json.decoder.JSONDecodeError:
165
- pass
166
- self.conversation.update_chat_history(
167
- prompt, self.get_message(self.last_response)
168
- )
169
-
170
- def for_non_stream():
171
- # let's make use of stream
172
- for _ in for_stream():
173
- pass
174
- return self.last_response
175
-
176
- return for_stream() if stream else for_non_stream()
177
-
178
- def chat(
179
- self,
180
- prompt: str,
181
- stream: bool = False,
182
- optimizer: str = None,
183
- conversationally: bool = False,
184
- ) -> str:
185
- """Generate response `str`
186
- Args:
187
- prompt (str): Prompt to be send.
188
- stream (bool, optional): Flag for streaming response. Defaults to False.
189
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
190
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
191
- Returns:
192
- str: Response generated
193
- """
194
-
195
- def for_stream():
196
- for response in self.ask(
197
- prompt, True, optimizer=optimizer, conversationally=conversationally
198
- ):
199
- yield self.get_message(response)
200
-
201
- def for_non_stream():
202
- return self.get_message(
203
- self.ask(
204
- prompt,
205
- False,
206
- optimizer=optimizer,
207
- conversationally=conversationally,
208
- )
209
- )
210
-
211
- return for_stream() if stream else for_non_stream()
212
-
213
- def get_message(self, response: dict) -> str:
214
- """Retrieves message only from response
215
-
216
- Args:
217
- response (dict): Response generated by `self.ask`
218
-
219
- Returns:
220
- str: Message extracted
221
- """
222
- assert isinstance(response, dict), "Response should be of dict data-type only"
223
- return response["result"]["chatStreamEndEvent"]["response"]["text"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
webscout/Provider/Gemini.py DELETED
@@ -1,217 +0,0 @@
1
- import time
2
- import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
9
- import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
- import json
21
- import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
26
- from Helpingai_T2 import Perplexity
27
- from webscout import exceptions
28
- from typing import Any, AsyncGenerator, Dict
29
- import logging
30
- import httpx
31
- from Bard import Chatbot
32
- import logging
33
- from os import path
34
- from json import load
35
- from json import dumps
36
- import warnings
37
- logging.getLogger("httpx").setLevel(logging.ERROR)
38
- warnings.simplefilter("ignore", category=UserWarning)
39
- class GEMINI(Provider):
40
- def __init__(
41
- self,
42
- cookie_file: str,
43
- proxy: dict = {},
44
- timeout: int = 30,
45
- ):
46
- """Initializes GEMINI
47
-
48
- Args:
49
- cookie_file (str): Path to `bard.google.com.cookies.json` file
50
- proxy (dict, optional): Http request proxy. Defaults to {}.
51
- timeout (int, optional): Http request timeout. Defaults to 30.
52
- """
53
- self.conversation = Conversation(False)
54
- self.session_auth1 = None
55
- self.session_auth2 = None
56
- assert isinstance(
57
- cookie_file, str
58
- ), f"cookie_file should be of {str} only not '{type(cookie_file)}'"
59
- if path.isfile(cookie_file):
60
- # let's assume auth is a path to exported .json cookie-file
61
- with open(cookie_file) as fh:
62
- entries = load(fh)
63
- for entry in entries:
64
- if entry["name"] == "__Secure-1PSID":
65
- self.session_auth1 = entry["value"]
66
- elif entry["name"] == "__Secure-1PSIDTS":
67
- self.session_auth2 = entry["value"]
68
-
69
- assert all(
70
- [self.session_auth1, self.session_auth2]
71
- ), f"Failed to extract the required cookie value from file '{cookie_file}'"
72
- else:
73
- raise Exception(f"{cookie_file} is not a valid file path")
74
-
75
- self.session = Chatbot(self.session_auth1, self.session_auth2, proxy, timeout)
76
- self.last_response = {}
77
- self.__available_optimizers = (
78
- method
79
- for method in dir(Optimizers)
80
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
81
- )
82
-
83
- def ask(
84
- self,
85
- prompt: str,
86
- stream: bool = False,
87
- raw: bool = False,
88
- optimizer: str = None,
89
- conversationally: bool = False,
90
- ) -> dict:
91
- """Chat with AI
92
-
93
- Args:
94
- prompt (str): Prompt to be send.
95
- stream (bool, optional): Flag for streaming response. Defaults to False.
96
- raw (bool, optional): Stream back raw response as received. Defaults to False.
97
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defeaults to None
98
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
99
- Returns:
100
- dict : {}
101
- ```json
102
- {
103
- "content": "General Kenobi! \n\n(I couldn't help but respond with the iconic Star Wars greeting since you used it first. )\n\nIs there anything I can help you with today?\n[Image of Hello there General Kenobi]",
104
- "conversation_id": "c_f13f6217f9a997aa",
105
- "response_id": "r_d3665f95975c368f",
106
- "factualityQueries": null,
107
- "textQuery": [
108
- "hello there",
109
- 1
110
- ],
111
- "choices": [
112
- {
113
- "id": "rc_ea075c9671bfd8cb",
114
- "content": [
115
- "General Kenobi! \n\n(I couldn't help but respond with the iconic Star Wars greeting since you used it first. )\n\nIs there anything I can help you with today?\n[Image of Hello there General Kenobi]"
116
- ]
117
- },
118
- {
119
- "id": "rc_de6dd3fb793a5402",
120
- "content": [
121
- "General Kenobi! (or just a friendly hello, whichever you prefer!). \n\nI see you're a person of culture as well. *Star Wars* references are always appreciated. \n\nHow can I help you today?\n"
122
- ]
123
- },
124
- {
125
- "id": "rc_a672ac089caf32db",
126
- "content": [
127
- "General Kenobi! (or just a friendly hello if you're not a Star Wars fan!). \n\nHow can I help you today? Feel free to ask me anything, or tell me what you'd like to chat about. I'm here to assist in any way I can.\n[Image of Obi-Wan Kenobi saying hello there]"
128
- ]
129
- }
130
- ],
131
-
132
- "images": [
133
- "https://i.pinimg.com/originals/40/74/60/407460925c9e419d82b93313f0b42f71.jpg"
134
- ]
135
- }
136
-
137
- ```
138
- """
139
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
140
- if optimizer:
141
- if optimizer in self.__available_optimizers:
142
- conversation_prompt = getattr(Optimizers, optimizer)(
143
- conversation_prompt if conversationally else prompt
144
- )
145
- else:
146
- raise Exception(
147
- f"Optimizer is not one of {self.__available_optimizers}"
148
- )
149
-
150
- def for_stream():
151
- response = self.session.ask(prompt)
152
- self.last_response.update(response)
153
- self.conversation.update_chat_history(
154
- prompt, self.get_message(self.last_response)
155
- )
156
- yield dumps(response) if raw else response
157
-
158
- def for_non_stream():
159
- # let's make use of stream
160
- for _ in for_stream():
161
- pass
162
- return self.last_response
163
-
164
- return for_stream() if stream else for_non_stream()
165
-
166
- def chat(
167
- self,
168
- prompt: str,
169
- stream: bool = False,
170
- optimizer: str = None,
171
- conversationally: bool = False,
172
- ) -> str:
173
- """Generate response `str`
174
- Args:
175
- prompt (str): Prompt to be send.
176
- stream (bool, optional): Flag for streaming response. Defaults to False.
177
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
178
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
179
- Returns:
180
- str: Response generated
181
- """
182
-
183
- def for_stream():
184
- for response in self.ask(
185
- prompt, True, optimizer=optimizer, conversationally=conversationally
186
- ):
187
- yield self.get_message(response)
188
-
189
- def for_non_stream():
190
- return self.get_message(
191
- self.ask(
192
- prompt,
193
- False,
194
- optimizer=optimizer,
195
- conversationally=conversationally,
196
- )
197
- )
198
-
199
- return for_stream() if stream else for_non_stream()
200
-
201
- def get_message(self, response: dict) -> str:
202
- """Retrieves message only from response
203
-
204
- Args:
205
- response (dict): Response generated by `self.ask`
206
-
207
- Returns:
208
- str: Message extracted
209
- """
210
- assert isinstance(response, dict), "Response should be of dict data-type only"
211
- return response["content"]
212
-
213
- def reset(self):
214
- """Reset the current conversation"""
215
- self.session.async_chatbot.conversation_id = ""
216
- self.session.async_chatbot.response_id = ""
217
- self.session.async_chatbot.choice_id = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
webscout/Provider/Groq.py DELETED
@@ -1,512 +0,0 @@
1
- import time
2
- import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
9
- import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
- import json
21
- import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
26
- from Helpingai_T2 import Perplexity
27
- from webscout import exceptions
28
- from typing import Any, AsyncGenerator, Dict
29
- import logging
30
- import httpx
31
-
32
- class GROQ(Provider):
33
- def __init__(
34
- self,
35
- api_key: str,
36
- is_conversation: bool = True,
37
- max_tokens: int = 600,
38
- temperature: float = 1,
39
- presence_penalty: int = 0,
40
- frequency_penalty: int = 0,
41
- top_p: float = 1,
42
- model: str = "mixtral-8x7b-32768",
43
- timeout: int = 30,
44
- intro: str = None,
45
- filepath: str = None,
46
- update_file: bool = True,
47
- proxies: dict = {},
48
- history_offset: int = 10250,
49
- act: str = None,
50
- ):
51
- """Instantiates GROQ
52
-
53
- Args:
54
- api_key (key): GROQ's API key.
55
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
56
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
57
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 1.
58
- presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
59
- frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
60
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
61
- model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
62
- timeout (int, optional): Http request timeout. Defaults to 30.
63
- intro (str, optional): Conversation introductory prompt. Defaults to None.
64
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
65
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
66
- proxies (dict, optional): Http request proxies. Defaults to {}.
67
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
68
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
69
- """
70
- self.session = requests.Session()
71
- self.is_conversation = is_conversation
72
- self.max_tokens_to_sample = max_tokens
73
- self.api_key = api_key
74
- self.model = model
75
- self.temperature = temperature
76
- self.presence_penalty = presence_penalty
77
- self.frequency_penalty = frequency_penalty
78
- self.top_p = top_p
79
- self.chat_endpoint = "https://api.groq.com/openai/v1/chat/completions"
80
- self.stream_chunk_size = 64
81
- self.timeout = timeout
82
- self.last_response = {}
83
- self.headers = {
84
- "Content-Type": "application/json",
85
- "Authorization": f"Bearer {self.api_key}",
86
- }
87
-
88
- self.__available_optimizers = (
89
- method
90
- for method in dir(Optimizers)
91
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
92
- )
93
- self.session.headers.update(self.headers)
94
- Conversation.intro = (
95
- AwesomePrompts().get_act(
96
- act, raise_not_found=True, default=None, case_insensitive=True
97
- )
98
- if act
99
- else intro or Conversation.intro
100
- )
101
- self.conversation = Conversation(
102
- is_conversation, self.max_tokens_to_sample, filepath, update_file
103
- )
104
- self.conversation.history_offset = history_offset
105
- self.session.proxies = proxies
106
-
107
- def ask(
108
- self,
109
- prompt: str,
110
- stream: bool = False,
111
- raw: bool = False,
112
- optimizer: str = None,
113
- conversationally: bool = False,
114
- ) -> dict:
115
- """Chat with AI
116
-
117
- Args:
118
- prompt (str): Prompt to be send.
119
- stream (bool, optional): Flag for streaming response. Defaults to False.
120
- raw (bool, optional): Stream back raw response as received. Defaults to False.
121
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
122
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
123
- Returns:
124
- dict : {}
125
- ```json
126
- {
127
- "id": "c0c8d139-d2b9-9909-8aa1-14948bc28404",
128
- "object": "chat.completion",
129
- "created": 1710852779,
130
- "model": "mixtral-8x7b-32768",
131
- "choices": [
132
- {
133
- "index": 0,
134
- "message": {
135
- "role": "assistant",
136
- "content": "Hello! How can I assist you today? I'm here to help answer your questions and engage in conversation on a wide variety of topics. Feel free to ask me anything!"
137
- },
138
- "logprobs": null,
139
- "finish_reason": "stop"
140
- }
141
- ],
142
- "usage": {
143
- "prompt_tokens": 47,
144
- "prompt_time": 0.03,
145
- "completion_tokens": 37,
146
- "completion_time": 0.069,
147
- "total_tokens": 84,
148
- "total_time": 0.099
149
- },
150
- "system_fingerprint": null
151
- }
152
- ```
153
- """
154
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
155
- if optimizer:
156
- if optimizer in self.__available_optimizers:
157
- conversation_prompt = getattr(Optimizers, optimizer)(
158
- conversation_prompt if conversationally else prompt
159
- )
160
- else:
161
- raise Exception(
162
- f"Optimizer is not one of {self.__available_optimizers}"
163
- )
164
- self.session.headers.update(self.headers)
165
- payload = {
166
- "frequency_penalty": self.frequency_penalty,
167
- "messages": [{"content": conversation_prompt, "role": "user"}],
168
- "model": self.model,
169
- "presence_penalty": self.presence_penalty,
170
- "stream": stream,
171
- "temperature": self.temperature,
172
- "top_p": self.top_p,
173
- }
174
-
175
- def for_stream():
176
- response = self.session.post(
177
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
178
- )
179
- if not response.ok:
180
- raise Exception(
181
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
182
- )
183
-
184
- message_load = ""
185
- for value in response.iter_lines(
186
- decode_unicode=True,
187
- delimiter="" if raw else "data:",
188
- chunk_size=self.stream_chunk_size,
189
- ):
190
- try:
191
- resp = json.loads(value)
192
- incomplete_message = self.get_message(resp)
193
- if incomplete_message:
194
- message_load += incomplete_message
195
- resp["choices"][0]["delta"]["content"] = message_load
196
- self.last_response.update(resp)
197
- yield value if raw else resp
198
- elif raw:
199
- yield value
200
- except json.decoder.JSONDecodeError:
201
- pass
202
- self.conversation.update_chat_history(
203
- prompt, self.get_message(self.last_response)
204
- )
205
-
206
- def for_non_stream():
207
- response = self.session.post(
208
- self.chat_endpoint, json=payload, stream=False, timeout=self.timeout
209
- )
210
- if not response.ok:
211
- raise Exception(
212
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
213
- )
214
- resp = response.json()
215
- self.last_response.update(resp)
216
- self.conversation.update_chat_history(
217
- prompt, self.get_message(self.last_response)
218
- )
219
- return resp
220
-
221
- return for_stream() if stream else for_non_stream()
222
-
223
- def chat(
224
- self,
225
- prompt: str,
226
- stream: bool = False,
227
- optimizer: str = None,
228
- conversationally: bool = False,
229
- ) -> str:
230
- """Generate response `str`
231
- Args:
232
- prompt (str): Prompt to be send.
233
- stream (bool, optional): Flag for streaming response. Defaults to False.
234
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
235
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
236
- Returns:
237
- str: Response generated
238
- """
239
-
240
- def for_stream():
241
- for response in self.ask(
242
- prompt, True, optimizer=optimizer, conversationally=conversationally
243
- ):
244
- yield self.get_message(response)
245
-
246
- def for_non_stream():
247
- return self.get_message(
248
- self.ask(
249
- prompt,
250
- False,
251
- optimizer=optimizer,
252
- conversationally=conversationally,
253
- )
254
- )
255
-
256
- return for_stream() if stream else for_non_stream()
257
-
258
- def get_message(self, response: dict) -> str:
259
- """Retrieves message only from response
260
-
261
- Args:
262
- response (dict): Response generated by `self.ask`
263
-
264
- Returns:
265
- str: Message extracted
266
- """
267
- assert isinstance(response, dict), "Response should be of dict data-type only"
268
- try:
269
- if response["choices"][0].get("delta"):
270
- return response["choices"][0]["delta"]["content"]
271
- return response["choices"][0]["message"]["content"]
272
- except KeyError:
273
- return ""
274
- class AsyncGROQ(AsyncProvider):
275
- def __init__(
276
- self,
277
- api_key: str,
278
- is_conversation: bool = True,
279
- max_tokens: int = 600,
280
- temperature: float = 1,
281
- presence_penalty: int = 0,
282
- frequency_penalty: int = 0,
283
- top_p: float = 1,
284
- model: str = "mixtral-8x7b-32768",
285
- timeout: int = 30,
286
- intro: str = None,
287
- filepath: str = None,
288
- update_file: bool = True,
289
- proxies: dict = {},
290
- history_offset: int = 10250,
291
- act: str = None,
292
- ):
293
- """Instantiates GROQ
294
-
295
- Args:
296
- api_key (key): GROQ's API key.
297
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
298
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
299
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 1.
300
- presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
301
- frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
302
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
303
- model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
304
- timeout (int, optional): Http request timeout. Defaults to 30.
305
- intro (str, optional): Conversation introductory prompt. Defaults to None.
306
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
307
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
308
- proxies (dict, optional): Http request proxies. Defaults to {}.
309
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
310
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
311
- """
312
- self.is_conversation = is_conversation
313
- self.max_tokens_to_sample = max_tokens
314
- self.api_key = api_key
315
- self.model = model
316
- self.temperature = temperature
317
- self.presence_penalty = presence_penalty
318
- self.frequency_penalty = frequency_penalty
319
- self.top_p = top_p
320
- self.chat_endpoint = "https://api.groq.com/openai/v1/chat/completions"
321
- self.stream_chunk_size = 64
322
- self.timeout = timeout
323
- self.last_response = {}
324
- self.headers = {
325
- "Content-Type": "application/json",
326
- "Authorization": f"Bearer {self.api_key}",
327
- }
328
-
329
- self.__available_optimizers = (
330
- method
331
- for method in dir(Optimizers)
332
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
333
- )
334
- Conversation.intro = (
335
- AwesomePrompts().get_act(
336
- act, raise_not_found=True, default=None, case_insensitive=True
337
- )
338
- if act
339
- else intro or Conversation.intro
340
- )
341
- self.conversation = Conversation(
342
- is_conversation, self.max_tokens_to_sample, filepath, update_file
343
- )
344
- self.conversation.history_offset = history_offset
345
- self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies)
346
-
347
- async def ask(
348
- self,
349
- prompt: str,
350
- stream: bool = False,
351
- raw: bool = False,
352
- optimizer: str = None,
353
- conversationally: bool = False,
354
- ) -> dict | AsyncGenerator:
355
- """Chat with AI asynchronously.
356
-
357
- Args:
358
- prompt (str): Prompt to be send.
359
- stream (bool, optional): Flag for streaming response. Defaults to False.
360
- raw (bool, optional): Stream back raw response as received. Defaults to False.
361
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
362
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
363
- Returns:
364
- dict|AsyncGenerator : ai content
365
- ```json
366
- {
367
- "id": "c0c8d139-d2b9-9909-8aa1-14948bc28404",
368
- "object": "chat.completion",
369
- "created": 1710852779,
370
- "model": "mixtral-8x7b-32768",
371
- "choices": [
372
- {
373
- "index": 0,
374
- "message": {
375
- "role": "assistant",
376
- "content": "Hello! How can I assist you today? I'm here to help answer your questions and engage in conversation on a wide variety of topics. Feel free to ask me anything!"
377
- },
378
- "logprobs": null,
379
- "finish_reason": "stop"
380
- }
381
- ],
382
- "usage": {
383
- "prompt_tokens": 47,
384
- "prompt_time": 0.03,
385
- "completion_tokens": 37,
386
- "completion_time": 0.069,
387
- "total_tokens": 84,
388
- "total_time": 0.099
389
- },
390
- "system_fingerprint": null
391
- }
392
- ```
393
- """
394
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
395
- if optimizer:
396
- if optimizer in self.__available_optimizers:
397
- conversation_prompt = getattr(Optimizers, optimizer)(
398
- conversation_prompt if conversationally else prompt
399
- )
400
- else:
401
- raise Exception(
402
- f"Optimizer is not one of {self.__available_optimizers}"
403
- )
404
- payload = {
405
- "frequency_penalty": self.frequency_penalty,
406
- "messages": [{"content": conversation_prompt, "role": "user"}],
407
- "model": self.model,
408
- "presence_penalty": self.presence_penalty,
409
- "stream": stream,
410
- "temperature": self.temperature,
411
- "top_p": self.top_p,
412
- }
413
-
414
- async def for_stream():
415
- async with self.session.stream(
416
- "POST", self.chat_endpoint, json=payload, timeout=self.timeout
417
- ) as response:
418
- if not response.is_success:
419
- raise Exception(
420
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
421
- )
422
-
423
- message_load = ""
424
- intro_value = "data:"
425
- async for value in response.aiter_lines():
426
- try:
427
- if value.startswith(intro_value):
428
- value = value[len(intro_value) :]
429
- resp = json.loads(value)
430
- incomplete_message = await self.get_message(resp)
431
- if incomplete_message:
432
- message_load += incomplete_message
433
- resp["choices"][0]["delta"]["content"] = message_load
434
- self.last_response.update(resp)
435
- yield value if raw else resp
436
- elif raw:
437
- yield value
438
- except json.decoder.JSONDecodeError:
439
- pass
440
- self.conversation.update_chat_history(
441
- prompt, await self.get_message(self.last_response)
442
- )
443
-
444
- async def for_non_stream():
445
- response = httpx.post(
446
- self.chat_endpoint, json=payload, timeout=self.timeout
447
- )
448
- if not response.is_success:
449
- raise Exception(
450
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
451
- )
452
- resp = response.json()
453
- self.last_response.update(resp)
454
- self.conversation.update_chat_history(
455
- prompt, await self.get_message(self.last_response)
456
- )
457
- return resp
458
-
459
- return for_stream() if stream else await for_non_stream()
460
-
461
- async def chat(
462
- self,
463
- prompt: str,
464
- stream: bool = False,
465
- optimizer: str = None,
466
- conversationally: bool = False,
467
- ) -> str | AsyncGenerator:
468
- """Generate response `str` asynchronously.
469
- Args:
470
- prompt (str): Prompt to be send.
471
- stream (bool, optional): Flag for streaming response. Defaults to False.
472
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
473
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
474
- Returns:
475
- str|AsyncGenerator: Response generated
476
- """
477
-
478
- async def for_stream():
479
- async_ask = await self.ask(
480
- prompt, True, optimizer=optimizer, conversationally=conversationally
481
- )
482
- async for response in async_ask:
483
- yield await self.get_message(response)
484
-
485
- async def for_non_stream():
486
- return await self.get_message(
487
- await self.ask(
488
- prompt,
489
- False,
490
- optimizer=optimizer,
491
- conversationally=conversationally,
492
- )
493
- )
494
-
495
- return for_stream() if stream else await for_non_stream()
496
-
497
- async def get_message(self, response: dict) -> str:
498
- """Retrieves message only from response
499
-
500
- Args:
501
- response (dict): Response generated by `self.ask`
502
-
503
- Returns:
504
- str: Message extracted
505
- """
506
- assert isinstance(response, dict), "Response should be of dict data-type only"
507
- try:
508
- if response["choices"][0].get("delta"):
509
- return response["choices"][0]["delta"]["content"]
510
- return response["choices"][0]["message"]["content"]
511
- except KeyError:
512
- return ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
webscout/Provider/Koboldai.py DELETED
@@ -1,402 +0,0 @@
1
- import time
2
- import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
9
- import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
- import json
21
- import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
26
- from Helpingai_T2 import Perplexity
27
- from webscout import exceptions
28
- from typing import Any, AsyncGenerator, Dict
29
- import logging
30
- import httpx
31
- #------------------------------------------------------KOBOLDAI-----------------------------------------------------------
32
- class KOBOLDAI(Provider):
33
- def __init__(
34
- self,
35
- is_conversation: bool = True,
36
- max_tokens: int = 600,
37
- temperature: float = 1,
38
- top_p: float = 1,
39
- timeout: int = 30,
40
- intro: str = None,
41
- filepath: str = None,
42
- update_file: bool = True,
43
- proxies: dict = {},
44
- history_offset: int = 10250,
45
- act: str = None,
46
- ):
47
- """Instantiate TGPT
48
-
49
- Args:
50
- is_conversation (str, optional): Flag for chatting conversationally. Defaults to True.
51
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
52
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.2.
53
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
54
- timeout (int, optional): Http requesting timeout. Defaults to 30
55
- intro (str, optional): Conversation introductory prompt. Defaults to `Conversation.intro`.
56
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
57
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
58
- proxies (dict, optional) : Http reqiuest proxies (socks). Defaults to {}.
59
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
60
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
61
- """
62
- self.session = requests.Session()
63
- self.is_conversation = is_conversation
64
- self.max_tokens_to_sample = max_tokens
65
- self.temperature = temperature
66
- self.top_p = top_p
67
- self.chat_endpoint = (
68
- "https://koboldai-koboldcpp-tiefighter.hf.space/api/extra/generate/stream"
69
- )
70
- self.stream_chunk_size = 64
71
- self.timeout = timeout
72
- self.last_response = {}
73
- self.headers = {
74
- "Content-Type": "application/json",
75
- "Accept": "application/json",
76
- }
77
-
78
- self.__available_optimizers = (
79
- method
80
- for method in dir(Optimizers)
81
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
82
- )
83
- self.session.headers.update(self.headers)
84
- Conversation.intro = (
85
- AwesomePrompts().get_act(
86
- act, raise_not_found=True, default=None, case_insensitive=True
87
- )
88
- if act
89
- else intro or Conversation.intro
90
- )
91
- self.conversation = Conversation(
92
- is_conversation, self.max_tokens_to_sample, filepath, update_file
93
- )
94
- self.conversation.history_offset = history_offset
95
- self.session.proxies = proxies
96
-
97
- def ask(
98
- self,
99
- prompt: str,
100
- stream: bool = False,
101
- raw: bool = False,
102
- optimizer: str = None,
103
- conversationally: bool = False,
104
- ) -> dict:
105
- """Chat with AI
106
-
107
- Args:
108
- prompt (str): Prompt to be send.
109
- stream (bool, optional): Flag for streaming response. Defaults to False.
110
- raw (bool, optional): Stream back raw response as received. Defaults to False.
111
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
112
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
113
- Returns:
114
- dict : {}
115
- ```json
116
- {
117
- "token" : "How may I assist you today?"
118
- }
119
- ```
120
- """
121
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
122
- if optimizer:
123
- if optimizer in self.__available_optimizers:
124
- conversation_prompt = getattr(Optimizers, optimizer)(
125
- conversation_prompt if conversationally else prompt
126
- )
127
- else:
128
- raise Exception(
129
- f"Optimizer is not one of {self.__available_optimizers}"
130
- )
131
-
132
- self.session.headers.update(self.headers)
133
- payload = {
134
- "prompt": conversation_prompt,
135
- "temperature": self.temperature,
136
- "top_p": self.top_p,
137
- }
138
-
139
- def for_stream():
140
- response = self.session.post(
141
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
142
- )
143
- if not response.ok:
144
- raise Exception(
145
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
146
- )
147
-
148
- message_load = ""
149
- for value in response.iter_lines(
150
- decode_unicode=True,
151
- delimiter="" if raw else "event: message\ndata:",
152
- chunk_size=self.stream_chunk_size,
153
- ):
154
- try:
155
- resp = json.loads(value)
156
- message_load += self.get_message(resp)
157
- resp["token"] = message_load
158
- self.last_response.update(resp)
159
- yield value if raw else resp
160
- except json.decoder.JSONDecodeError:
161
- pass
162
- self.conversation.update_chat_history(
163
- prompt, self.get_message(self.last_response)
164
- )
165
-
166
- def for_non_stream():
167
- # let's make use of stream
168
- for _ in for_stream():
169
- pass
170
- return self.last_response
171
-
172
- return for_stream() if stream else for_non_stream()
173
-
174
- def chat(
175
- self,
176
- prompt: str,
177
- stream: bool = False,
178
- optimizer: str = None,
179
- conversationally: bool = False,
180
- ) -> str:
181
- """Generate response `str`
182
- Args:
183
- prompt (str): Prompt to be send.
184
- stream (bool, optional): Flag for streaming response. Defaults to False.
185
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
186
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
187
- Returns:
188
- str: Response generated
189
- """
190
-
191
- def for_stream():
192
- for response in self.ask(
193
- prompt, True, optimizer=optimizer, conversationally=conversationally
194
- ):
195
- yield self.get_message(response)
196
-
197
- def for_non_stream():
198
- return self.get_message(
199
- self.ask(
200
- prompt,
201
- False,
202
- optimizer=optimizer,
203
- conversationally=conversationally,
204
- )
205
- )
206
-
207
- return for_stream() if stream else for_non_stream()
208
-
209
- def get_message(self, response: dict) -> str:
210
- """Retrieves message only from response
211
-
212
- Args:
213
- response (dict): Response generated by `self.ask`
214
-
215
- Returns:
216
- str: Message extracted
217
- """
218
- assert isinstance(response, dict), "Response should be of dict data-type only"
219
- return response.get("token")
220
- class AsyncKOBOLDAI(AsyncProvider):
221
- def __init__(
222
- self,
223
- is_conversation: bool = True,
224
- max_tokens: int = 600,
225
- temperature: float = 1,
226
- top_p: float = 1,
227
- timeout: int = 30,
228
- intro: str = None,
229
- filepath: str = None,
230
- update_file: bool = True,
231
- proxies: dict = {},
232
- history_offset: int = 10250,
233
- act: str = None,
234
- ):
235
- """Instantiate TGPT
236
-
237
- Args:
238
- is_conversation (str, optional): Flag for chatting conversationally. Defaults to True.
239
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
240
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.2.
241
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
242
- timeout (int, optional): Http requesting timeout. Defaults to 30
243
- intro (str, optional): Conversation introductory prompt. Defaults to `Conversation.intro`.
244
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
245
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
246
- proxies (dict, optional) : Http reqiuest proxies (socks). Defaults to {}.
247
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
248
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
249
- """
250
- self.is_conversation = is_conversation
251
- self.max_tokens_to_sample = max_tokens
252
- self.temperature = temperature
253
- self.top_p = top_p
254
- self.chat_endpoint = (
255
- "https://koboldai-koboldcpp-tiefighter.hf.space/api/extra/generate/stream"
256
- )
257
- self.stream_chunk_size = 64
258
- self.timeout = timeout
259
- self.last_response = {}
260
- self.headers = {
261
- "Content-Type": "application/json",
262
- "Accept": "application/json",
263
- }
264
-
265
- self.__available_optimizers = (
266
- method
267
- for method in dir(Optimizers)
268
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
269
- )
270
- Conversation.intro = (
271
- AwesomePrompts().get_act(
272
- act, raise_not_found=True, default=None, case_insensitive=True
273
- )
274
- if act
275
- else intro or Conversation.intro
276
- )
277
- self.conversation = Conversation(
278
- is_conversation, self.max_tokens_to_sample, filepath, update_file
279
- )
280
- self.conversation.history_offset = history_offset
281
- self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies)
282
-
283
- async def ask(
284
- self,
285
- prompt: str,
286
- stream: bool = False,
287
- raw: bool = False,
288
- optimizer: str = None,
289
- conversationally: bool = False,
290
- ) -> dict | AsyncGenerator:
291
- """Chat with AI asynchronously.
292
-
293
- Args:
294
- prompt (str): Prompt to be send.
295
- stream (bool, optional): Flag for streaming response. Defaults to False.
296
- raw (bool, optional): Stream back raw response as received. Defaults to False.
297
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
298
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
299
- Returns:
300
- dict|AsyncGenerator : ai content
301
- ```json
302
- {
303
- "token" : "How may I assist you today?"
304
- }
305
- ```
306
- """
307
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
308
- if optimizer:
309
- if optimizer in self.__available_optimizers:
310
- conversation_prompt = getattr(Optimizers, optimizer)(
311
- conversation_prompt if conversationally else prompt
312
- )
313
- else:
314
- raise Exception(
315
- f"Optimizer is not one of {self.__available_optimizers}"
316
- )
317
-
318
- payload = {
319
- "prompt": conversation_prompt,
320
- "temperature": self.temperature,
321
- "top_p": self.top_p,
322
- }
323
-
324
- async def for_stream():
325
- async with self.session.stream(
326
- "POST", self.chat_endpoint, json=payload, timeout=self.timeout
327
- ) as response:
328
- if not response.is_success:
329
- raise exceptions.FailedToGenerateResponseError(
330
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
331
- )
332
-
333
- message_load = ""
334
- async for value in response.aiter_lines():
335
- try:
336
- resp = sanitize_stream(value)
337
- message_load += await self.get_message(resp)
338
- resp["token"] = message_load
339
- self.last_response.update(resp)
340
- yield value if raw else resp
341
- except json.decoder.JSONDecodeError:
342
- pass
343
-
344
- self.conversation.update_chat_history(
345
- prompt, await self.get_message(self.last_response)
346
- )
347
-
348
- async def for_non_stream():
349
- # let's make use of stream
350
- async for _ in for_stream():
351
- pass
352
- return self.last_response
353
-
354
- return for_stream() if stream else await for_non_stream()
355
-
356
- async def chat(
357
- self,
358
- prompt: str,
359
- stream: bool = False,
360
- optimizer: str = None,
361
- conversationally: bool = False,
362
- ) -> str | AsyncGenerator:
363
- """Generate response `str` asynchronously.
364
- Args:
365
- prompt (str): Prompt to be send.
366
- stream (bool, optional): Flag for streaming response. Defaults to False.
367
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
368
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
369
- Returns:
370
- str: Response generated
371
- """
372
-
373
- async def for_stream():
374
- async_ask = await self.ask(
375
- prompt, True, optimizer=optimizer, conversationally=conversationally
376
- )
377
- async for response in async_ask:
378
- yield await self.get_message(response)
379
-
380
- async def for_non_stream():
381
- return await self.get_message(
382
- await self.ask(
383
- prompt,
384
- False,
385
- optimizer=optimizer,
386
- conversationally=conversationally,
387
- )
388
- )
389
-
390
- return for_stream() if stream else await for_non_stream()
391
-
392
- async def get_message(self, response: dict) -> str:
393
- """Retrieves message only from response
394
-
395
- Args:
396
- response (dict): Response generated by `self.ask`
397
-
398
- Returns:
399
- str: Message extracted
400
- """
401
- assert isinstance(response, dict), "Response should be of dict data-type only"
402
- return response.get("token")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
webscout/Provider/Leo.py DELETED
@@ -1,469 +0,0 @@
1
- import time
2
- import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
9
- import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
- import json
21
- import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
26
- from Helpingai_T2 import Perplexity
27
- from webscout import exceptions
28
- from typing import Any, AsyncGenerator, Dict
29
- import logging
30
- import httpx
31
- #--------------------------------------LEO-----------------------------------------
32
- class LEO(Provider):
33
-
34
- def __init__(
35
- self,
36
- is_conversation: bool = True,
37
- max_tokens: int = 600,
38
- temperature: float = 0.2,
39
- top_k: int = -1,
40
- top_p: float = 0.999,
41
- model: str = "llama-2-13b-chat",
42
- brave_key: str = "qztbjzBqJueQZLFkwTTJrieu8Vw3789u",
43
- timeout: int = 30,
44
- intro: str = None,
45
- filepath: str = None,
46
- update_file: bool = True,
47
- proxies: dict = {},
48
- history_offset: int = 10250,
49
- act: str = None,
50
- ):
51
- """Instantiate TGPT
52
-
53
- Args:
54
- is_conversation (str, optional): Flag for chatting conversationally. Defaults to True.
55
- brave_key (str, optional): Brave API access key. Defaults to "qztbjzBqJueQZLFkwTTJrieu8Vw3789u".
56
- model (str, optional): Text generation model name. Defaults to "llama-2-13b-chat".
57
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
58
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.2.
59
- top_k (int, optional): Chance of topic being repeated. Defaults to -1.
60
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
61
- timeput (int, optional): Http requesting timeout. Defaults to 30
62
- intro (str, optional): Conversation introductory prompt. Defaults to `Conversation.intro`.
63
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
64
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
65
- proxies (dict, optional) : Http reqiuest proxies (socks). Defaults to {}.
66
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
67
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
68
- """
69
- self.session = requests.Session()
70
- self.is_conversation = is_conversation
71
- self.max_tokens_to_sample = max_tokens
72
- self.model = model
73
- self.stop_sequences = ["</response>", "</s>"]
74
- self.temperature = temperature
75
- self.top_k = top_k
76
- self.top_p = top_p
77
- self.chat_endpoint = "https://ai-chat.bsg.brave.com/v1/complete"
78
- self.stream_chunk_size = 64
79
- self.timeout = timeout
80
- self.last_response = {}
81
- self.headers = {
82
- "Content-Type": "application/json",
83
- "accept": "text/event-stream",
84
- "x-brave-key": brave_key,
85
- "accept-language": "en-US,en;q=0.9",
86
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:99.0) Gecko/20100101 Firefox/110.0",
87
- }
88
- self.__available_optimizers = (
89
- method
90
- for method in dir(Optimizers)
91
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
92
- )
93
- self.session.headers.update(self.headers)
94
- Conversation.intro = (
95
- AwesomePrompts().get_act(
96
- act, raise_not_found=True, default=None, case_insensitive=True
97
- )
98
- if act
99
- else intro or Conversation.intro
100
- )
101
- self.conversation = Conversation(
102
- is_conversation, self.max_tokens_to_sample, filepath, update_file
103
- )
104
- self.conversation.history_offset = history_offset
105
- self.session.proxies = proxies
106
- self.system_prompt = (
107
- "\n\nYour name is Leo, a helpful"
108
- "respectful and honest AI assistant created by the company Brave. You will be replying to a user of the Brave browser. "
109
- "Always respond in a neutral tone. Be polite and courteous. Answer concisely in no more than 50-80 words."
110
- "\n\nPlease ensure that your responses are socially unbiased and positive in nature."
111
- "If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. "
112
- "If you don't know the answer to a question, please don't share false information.\n"
113
- )
114
-
115
- def ask(
116
- self,
117
- prompt: str,
118
- stream: bool = False,
119
- raw: bool = False,
120
- optimizer: str = None,
121
- conversationally: bool = False,
122
- ) -> dict:
123
- """Chat with AI
124
-
125
- Args:
126
- prompt (str): Prompt to be send.
127
- stream (bool, optional): Flag for streaming response. Defaults to False.
128
- raw (bool, optional): Stream back raw response as received. Defaults to False.
129
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
130
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
131
- Returns:
132
- dict : {}
133
- ```json
134
- {
135
- "completion": "\nNext: domestic cat breeds with short hair >>",
136
- "stop_reason": null,
137
- "truncated": false,
138
- "stop": null,
139
- "model": "llama-2-13b-chat",
140
- "log_id": "cmpl-3kYiYxSNDvgMShSzFooz6t",
141
- "exception": null
142
- }
143
- ```
144
- """
145
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
146
- if optimizer:
147
- if optimizer in self.__available_optimizers:
148
- conversation_prompt = getattr(Optimizers, optimizer)(
149
- conversation_prompt if conversationally else prompt
150
- )
151
- else:
152
- raise Exception(
153
- f"Optimizer is not one of {self.__available_optimizers}"
154
- )
155
-
156
- self.session.headers.update(self.headers)
157
- payload = {
158
- "max_tokens_to_sample": self.max_tokens_to_sample,
159
- "model": self.model,
160
- "prompt": f"<s>[INST] <<SYS>>{self.system_prompt}<</SYS>>{conversation_prompt} [/INST]",
161
- "self.stop_sequence": self.stop_sequences,
162
- "stream": stream,
163
- "top_k": self.top_k,
164
- "top_p": self.top_p,
165
- }
166
-
167
- def for_stream():
168
- response = self.session.post(
169
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
170
- )
171
- if (
172
- not response.ok
173
- or not response.headers.get("Content-Type")
174
- == "text/event-stream; charset=utf-8"
175
- ):
176
- raise Exception(
177
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
178
- )
179
-
180
- for value in response.iter_lines(
181
- decode_unicode=True,
182
- delimiter="" if raw else "data:",
183
- chunk_size=self.stream_chunk_size,
184
- ):
185
- try:
186
- resp = json.loads(value)
187
- self.last_response.update(resp)
188
- yield value if raw else resp
189
- except json.decoder.JSONDecodeError:
190
- pass
191
- self.conversation.update_chat_history(
192
- prompt, self.get_message(self.last_response)
193
- )
194
-
195
- def for_non_stream():
196
- response = self.session.post(
197
- self.chat_endpoint, json=payload, stream=False, timeout=self.timeout
198
- )
199
- if (
200
- not response.ok
201
- or not response.headers.get("Content-Type", "") == "application/json"
202
- ):
203
- raise Exception(
204
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
205
- )
206
- resp = response.json()
207
- self.last_response.update(resp)
208
- self.conversation.update_chat_history(
209
- prompt, self.get_message(self.last_response)
210
- )
211
- return resp
212
-
213
- return for_stream() if stream else for_non_stream()
214
-
215
- def chat(
216
- self,
217
- prompt: str,
218
- stream: bool = False,
219
- optimizer: str = None,
220
- conversationally: bool = False,
221
- ) -> str:
222
- """Generate response `str`
223
- Args:
224
- prompt (str): Prompt to be send.
225
- stream (bool, optional): Flag for streaming response. Defaults to False.
226
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
227
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
228
- Returns:
229
- str: Response generated
230
- """
231
-
232
- def for_stream():
233
- for response in self.ask(
234
- prompt, True, optimizer=optimizer, conversationally=conversationally
235
- ):
236
- yield self.get_message(response)
237
-
238
- def for_non_stream():
239
- return self.get_message(
240
- self.ask(
241
- prompt,
242
- False,
243
- optimizer=optimizer,
244
- conversationally=conversationally,
245
- )
246
- )
247
-
248
- return for_stream() if stream else for_non_stream()
249
-
250
- def get_message(self, response: dict) -> str:
251
- """Retrieves message only from response
252
-
253
- Args:
254
- response (dict): Response generated by `self.ask`
255
-
256
- Returns:
257
- str: Message extracted
258
- """
259
- assert isinstance(response, dict), "Response should be of dict data-type only"
260
- return response.get("completion")
261
- class AsyncLEO(AsyncProvider):
262
- def __init__(
263
- self,
264
- is_conversation: bool = True,
265
- max_tokens: int = 600,
266
- temperature: float = 0.2,
267
- top_k: int = -1,
268
- top_p: float = 0.999,
269
- model: str = "llama-2-13b-chat",
270
- brave_key: str = "qztbjzBqJueQZLFkwTTJrieu8Vw3789u",
271
- timeout: int = 30,
272
- intro: str = None,
273
- filepath: str = None,
274
- update_file: bool = True,
275
- proxies: dict = {},
276
- history_offset: int = 10250,
277
- act: str = None,
278
- ):
279
- """Instantiate TGPT
280
-
281
- Args:
282
- is_conversation (str, optional): Flag for chatting conversationally. Defaults to True.
283
- brave_key (str, optional): Brave API access key. Defaults to "qztbjzBqJueQZLFkwTTJrieu8Vw3789u".
284
- model (str, optional): Text generation model name. Defaults to "llama-2-13b-chat".
285
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
286
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.2.
287
- top_k (int, optional): Chance of topic being repeated. Defaults to -1.
288
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
289
- timeput (int, optional): Http requesting timeout. Defaults to 30
290
- intro (str, optional): Conversation introductory prompt. Defaults to `Conversation.intro`.
291
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
292
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
293
- proxies (dict, optional) : Http reqiuest proxies (socks). Defaults to {}.
294
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
295
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
296
- """
297
- self.is_conversation = is_conversation
298
- self.max_tokens_to_sample = max_tokens
299
- self.model = model
300
- self.stop_sequences = ["</response>", "</s>"]
301
- self.temperature = temperature
302
- self.top_k = top_k
303
- self.top_p = top_p
304
- self.chat_endpoint = "https://ai-chat.bsg.brave.com/v1/complete"
305
- self.stream_chunk_size = 64
306
- self.timeout = timeout
307
- self.last_response = {}
308
- self.headers = {
309
- "Content-Type": "application/json",
310
- "accept": "text/event-stream",
311
- "x-brave-key": brave_key,
312
- "accept-language": "en-US,en;q=0.9",
313
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:99.0) Gecko/20100101 Firefox/110.0",
314
- }
315
- self.__available_optimizers = (
316
- method
317
- for method in dir(Optimizers)
318
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
319
- )
320
- Conversation.intro = (
321
- AwesomePrompts().get_act(
322
- act, raise_not_found=True, default=None, case_insensitive=True
323
- )
324
- if act
325
- else intro or Conversation.intro
326
- )
327
- self.conversation = Conversation(
328
- is_conversation, self.max_tokens_to_sample, filepath, update_file
329
- )
330
- self.conversation.history_offset = history_offset
331
- self.system_prompt = (
332
- "\n\nYour name is Leo, a helpful"
333
- "respectful and honest AI assistant created by the company Brave. You will be replying to a user of the Brave browser. "
334
- "Always respond in a neutral tone. Be polite and courteous. Answer concisely in no more than 50-80 words."
335
- "\n\nPlease ensure that your responses are socially unbiased and positive in nature."
336
- "If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. "
337
- "If you don't know the answer to a question, please don't share false information.\n"
338
- )
339
- self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies)
340
-
341
- async def ask(
342
- self,
343
- prompt: str,
344
- stream: bool = False,
345
- raw: bool = False,
346
- optimizer: str = None,
347
- conversationally: bool = False,
348
- ) -> dict | AsyncGenerator:
349
- """Chat with AI asynchronously.
350
-
351
- Args:
352
- prompt (str): Prompt to be send.
353
- stream (bool, optional): Flag for streaming response. Defaults to False.
354
- raw (bool, optional): Stream back raw response as received. Defaults to False.
355
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
356
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
357
- Returns:
358
- dict|AsyncGenerator : ai content
359
- ```json
360
- {
361
- "completion": "\nNext: domestic cat breeds with short hair >>",
362
- "stop_reason": null,
363
- "truncated": false,
364
- "stop": null,
365
- "model": "llama-2-13b-chat",
366
- "log_id": "cmpl-3kYiYxSNDvgMShSzFooz6t",
367
- "exception": null
368
- }
369
- ```
370
- """
371
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
372
- if optimizer:
373
- if optimizer in self.__available_optimizers:
374
- conversation_prompt = getattr(Optimizers, optimizer)(
375
- conversation_prompt if conversationally else prompt
376
- )
377
- else:
378
- raise Exception(
379
- f"Optimizer is not one of {self.__available_optimizers}"
380
- )
381
-
382
- payload = {
383
- "max_tokens_to_sample": self.max_tokens_to_sample,
384
- "model": self.model,
385
- "prompt": f"<s>[INST] <<SYS>>{self.system_prompt}<</SYS>>{conversation_prompt} [/INST]",
386
- "self.stop_sequence": self.stop_sequences,
387
- "stream": stream,
388
- "top_k": self.top_k,
389
- "top_p": self.top_p,
390
- }
391
-
392
- async def for_stream():
393
- async with self.session.stream(
394
- "POST", self.chat_endpoint, json=payload, timeout=self.timeout
395
- ) as response:
396
- if (
397
- not response.is_success
398
- or not response.headers.get("Content-Type")
399
- == "text/event-stream; charset=utf-8"
400
- ):
401
- raise exceptions.FailedToGenerateResponseError(
402
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
403
- )
404
- async for value in response.aiter_lines():
405
- try:
406
- resp = sanitize_stream(value)
407
- self.last_response.update(resp)
408
- yield value if raw else resp
409
- except json.decoder.JSONDecodeError:
410
- pass
411
-
412
- self.conversation.update_chat_history(
413
- prompt, await self.get_message(self.last_response)
414
- )
415
-
416
- async def for_non_stream():
417
- async for _ in for_stream():
418
- pass
419
- return self.last_response
420
-
421
- return for_stream() if stream else await for_non_stream()
422
-
423
- async def chat(
424
- self,
425
- prompt: str,
426
- stream: bool = False,
427
- optimizer: str = None,
428
- conversationally: bool = False,
429
- ) -> str | AsyncGenerator:
430
- """Generate response `str` asynchronously.
431
- Args:
432
- prompt (str): Prompt to be send.
433
- stream (bool, optional): Flag for streaming response. Defaults to False.
434
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
435
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
436
- Returns:
437
- str|AsyncGenerator: Response generated
438
- """
439
-
440
- async def for_stream():
441
- async_ask = await self.ask(
442
- prompt, True, optimizer=optimizer, conversationally=conversationally
443
- )
444
- async for response in async_ask:
445
- yield await self.get_message(response)
446
-
447
- async def for_non_stream():
448
- return await self.get_message(
449
- await self.ask(
450
- prompt,
451
- False,
452
- optimizer=optimizer,
453
- conversationally=conversationally,
454
- )
455
- )
456
-
457
- return for_stream() if stream else await for_non_stream()
458
-
459
- async def get_message(self, response: dict) -> str:
460
- """Retrieves message only from response
461
-
462
- Args:
463
- response (dict): Response generated by `self.ask`
464
-
465
- Returns:
466
- str: Message extracted
467
- """
468
- assert isinstance(response, dict), "Response should be of dict data-type only"
469
- return response.get("completion")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
webscout/Provider/Llama2.py DELETED
@@ -1,437 +0,0 @@
1
- import time
2
- import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
9
- import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
- import json
21
- import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
26
- from Helpingai_T2 import Perplexity
27
- from webscout import exceptions
28
- from typing import Any, AsyncGenerator, Dict
29
- import logging
30
- import httpx
31
-
32
- class AsyncLLAMA2(AsyncProvider):
33
- def __init__(
34
- self,
35
- is_conversation: bool = True,
36
- max_tokens: int = 800,
37
- temperature: float = 0.75,
38
- presence_penalty: int = 0,
39
- frequency_penalty: int = 0,
40
- top_p: float = 0.9,
41
- model: str = "meta/meta-llama-3-70b-instruct",
42
- timeout: int = 30,
43
- intro: str = None,
44
- filepath: str = None,
45
- update_file: bool = True,
46
- proxies: dict = {},
47
- history_offset: int = 10250,
48
- act: str = None,
49
- ):
50
- """Instantiates LLAMA2
51
-
52
- Args:
53
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
54
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 800.
55
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.75.
56
- presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
57
- frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
58
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.9.
59
- model (str, optional): LLM model name. Defaults to "meta/llama-2-70b-chat".
60
- timeout (int, optional): Http request timeout. Defaults to 30.
61
- intro (str, optional): Conversation introductory prompt. Defaults to None.
62
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
63
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
64
- proxies (dict, optional): Http request proxies. Defaults to {}.
65
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
66
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
67
- """
68
- self.is_conversation = is_conversation
69
- self.max_tokens_to_sample = max_tokens
70
- self.model = model
71
- self.temperature = temperature
72
- self.presence_penalty = presence_penalty
73
- self.frequency_penalty = frequency_penalty
74
- self.top_p = top_p
75
- self.chat_endpoint = "https://www.llama2.ai/api"
76
- self.stream_chunk_size = 64
77
- self.timeout = timeout
78
- self.last_response = {}
79
- self.headers = {
80
- "Content-Type": "application/json",
81
- "Referer": "https://www.llama2.ai/",
82
- "Content-Type": "text/plain;charset=UTF-8",
83
- "Origin": "https://www.llama2.ai",
84
- }
85
-
86
- self.__available_optimizers = (
87
- method
88
- for method in dir(Optimizers)
89
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
90
- )
91
- Conversation.intro = (
92
- AwesomePrompts().get_act(
93
- act, raise_not_found=True, default=None, case_insensitive=True
94
- )
95
- if act
96
- else intro or Conversation.intro
97
- )
98
- self.conversation = Conversation(
99
- is_conversation, self.max_tokens_to_sample, filepath, update_file
100
- )
101
- self.conversation.history_offset = history_offset
102
- self.session = httpx.AsyncClient(
103
- headers=self.headers,
104
- proxies=proxies,
105
- )
106
-
107
- async def ask(
108
- self,
109
- prompt: str,
110
- stream: bool = False,
111
- raw: bool = False,
112
- optimizer: str = None,
113
- conversationally: bool = False,
114
- ) -> dict | AsyncGenerator:
115
- """Chat with AI asynchronously.
116
-
117
- Args:
118
- prompt (str): Prompt to be send.
119
- stream (bool, optional): Flag for streaming response. Defaults to False.
120
- raw (bool, optional): Stream back raw response as received. Defaults to False.
121
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
122
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
123
- Returns:
124
- dict|AsyncGeneraror[dict] : ai content
125
- ```json
126
- {
127
- "text" : "How may I help you today?"
128
- }
129
- ```
130
- """
131
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
132
- if optimizer:
133
- if optimizer in self.__available_optimizers:
134
- conversation_prompt = getattr(Optimizers, optimizer)(
135
- conversation_prompt if conversationally else prompt
136
- )
137
- else:
138
- raise Exception(
139
- f"Optimizer is not one of {self.__available_optimizers}"
140
- )
141
-
142
- payload = {
143
- "prompt": f"{conversation_prompt}<s>[INST] {prompt} [/INST]",
144
- "model": self.model,
145
- "systemPrompt": "You are a helpful assistant.",
146
- "temperature": self.temperature,
147
- "topP": self.top_p,
148
- "maxTokens": self.max_tokens_to_sample,
149
- "image": None,
150
- "audio": None,
151
- }
152
-
153
- async def for_stream():
154
- async with self.session.stream(
155
- "POST", self.chat_endpoint, json=payload, timeout=self.timeout
156
- ) as response:
157
- if (
158
- not response.is_success
159
- or not response.headers.get("Content-Type")
160
- == "text/plain; charset=utf-8"
161
- ):
162
- raise exceptions.FailedToGenerateResponseError(
163
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
164
- )
165
- message_load: str = ""
166
- async for value in response.aiter_lines():
167
- try:
168
- if bool(value.strip()):
169
- message_load += value + "\n"
170
- resp: dict = dict(text=message_load)
171
- yield value if raw else resp
172
- self.last_response.update(resp)
173
- except json.decoder.JSONDecodeError:
174
- pass
175
- self.conversation.update_chat_history(
176
- prompt, await self.get_message(self.last_response)
177
- )
178
-
179
- async def for_non_stream():
180
- async for _ in for_stream():
181
- pass
182
- return self.last_response
183
-
184
- return for_stream() if stream else await for_non_stream()
185
-
186
- async def chat(
187
- self,
188
- prompt: str,
189
- stream: bool = False,
190
- optimizer: str = None,
191
- conversationally: bool = False,
192
- ) -> str | AsyncGenerator:
193
- """Generate response `str` asynchronously.
194
- Args:
195
- prompt (str): Prompt to be send.
196
- stream (bool, optional): Flag for streaming response. Defaults to False.
197
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
198
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
199
- Returns:
200
- str|AsyncGenerator: Response generated
201
- """
202
-
203
- async def for_stream():
204
- async_ask = await self.ask(
205
- prompt, True, optimizer=optimizer, conversationally=conversationally
206
- )
207
- async for response in async_ask:
208
- yield await self.get_message(response)
209
-
210
- async def for_non_stream():
211
- return await self.get_message(
212
- await self.ask(
213
- prompt,
214
- False,
215
- optimizer=optimizer,
216
- conversationally=conversationally,
217
- )
218
- )
219
-
220
- return for_stream() if stream else await for_non_stream()
221
-
222
- async def get_message(self, response: dict) -> str:
223
- """Retrieves message only from response
224
-
225
- Args:
226
- response (str): Response generated by `self.ask`
227
-
228
- Returns:
229
- str: Message extracted
230
- """
231
- assert isinstance(response, dict), "Response should be of dict data-type only"
232
- return response["text"]
233
- class LLAMA2(Provider):
234
- def __init__(
235
- self,
236
- is_conversation: bool = True,
237
- max_tokens: int = 800,
238
- temperature: float = 0.75,
239
- presence_penalty: int = 0,
240
- frequency_penalty: int = 0,
241
- top_p: float = 0.9,
242
- model: str = "meta/meta-llama-3-70b-instruct",
243
- timeout: int = 30,
244
- intro: str = None,
245
- filepath: str = None,
246
- update_file: bool = True,
247
- proxies: dict = {},
248
- history_offset: int = 10250,
249
- act: str = None,
250
- ):
251
- """Instantiates LLAMA2
252
-
253
- Args:
254
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
255
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 800.
256
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.75.
257
- presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
258
- frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
259
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.9.
260
- model (str, optional): LLM model name. Defaults to "meta/llama-2-70b-chat".
261
- timeout (int, optional): Http request timeout. Defaults to 30.
262
- intro (str, optional): Conversation introductory prompt. Defaults to None.
263
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
264
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
265
- proxies (dict, optional): Http request proxies. Defaults to {}.
266
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
267
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
268
- """
269
- self.session = requests.Session()
270
- self.is_conversation = is_conversation
271
- self.max_tokens_to_sample = max_tokens
272
- self.model = model
273
- self.temperature = temperature
274
- self.presence_penalty = presence_penalty
275
- self.frequency_penalty = frequency_penalty
276
- self.top_p = top_p
277
- self.chat_endpoint = "https://www.llama2.ai/api"
278
- self.stream_chunk_size = 64
279
- self.timeout = timeout
280
- self.last_response = {}
281
- self.headers = {
282
- "Content-Type": "application/json",
283
- "Referer": "https://www.llama2.ai/",
284
- "Content-Type": "text/plain;charset=UTF-8",
285
- "Origin": "https://www.llama2.ai",
286
- }
287
-
288
- self.__available_optimizers = (
289
- method
290
- for method in dir(Optimizers)
291
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
292
- )
293
- self.session.headers.update(self.headers)
294
- Conversation.intro = (
295
- AwesomePrompts().get_act(
296
- act, raise_not_found=True, default=None, case_insensitive=True
297
- )
298
- if act
299
- else intro or Conversation.intro
300
- )
301
- self.conversation = Conversation(
302
- is_conversation, self.max_tokens_to_sample, filepath, update_file
303
- )
304
- self.conversation.history_offset = history_offset
305
- self.session.proxies = proxies
306
-
307
- def ask(
308
- self,
309
- prompt: str,
310
- stream: bool = False,
311
- raw: bool = False,
312
- optimizer: str = None,
313
- conversationally: bool = False,
314
- ) -> dict:
315
- """Chat with AI
316
-
317
- Args:
318
- prompt (str): Prompt to be send.
319
- stream (bool, optional): Flag for streaming response. Defaults to False.
320
- raw (bool, optional): Stream back raw response as received. Defaults to False.
321
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
322
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
323
- Returns:
324
- dict : {}
325
- ```json
326
- {
327
- "text" : "How may I help you today?"
328
- }
329
- ```
330
- """
331
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
332
- if optimizer:
333
- if optimizer in self.__available_optimizers:
334
- conversation_prompt = getattr(Optimizers, optimizer)(
335
- conversation_prompt if conversationally else prompt
336
- )
337
- else:
338
- raise Exception(
339
- f"Optimizer is not one of {self.__available_optimizers}"
340
- )
341
- self.session.headers.update(self.headers)
342
-
343
- payload = {
344
- "prompt": f"{conversation_prompt}<s>[INST] {prompt} [/INST]",
345
- "model": self.model,
346
- "systemPrompt": "You are a helpful assistant.",
347
- "temperature": self.temperature,
348
- "topP": self.top_p,
349
- "maxTokens": self.max_tokens_to_sample,
350
- "image": None,
351
- "audio": None,
352
- }
353
-
354
- def for_stream():
355
- response = self.session.post(
356
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
357
- )
358
- if (
359
- not response.ok
360
- or not response.headers.get("Content-Type")
361
- == "text/plain; charset=utf-8"
362
- ):
363
- raise exceptions.FailedToGenerateResponseError(
364
- f"Failed to generate response - ({response.status_code}, {response.reason})"
365
- )
366
-
367
- message_load: str = ""
368
- for value in response.iter_lines(
369
- decode_unicode=True,
370
- delimiter="\n",
371
- chunk_size=self.stream_chunk_size,
372
- ):
373
- try:
374
- if bool(value.strip()):
375
- message_load += value + "\n"
376
- resp: dict = dict(text=message_load)
377
- yield value if raw else resp
378
- self.last_response.update(resp)
379
- except json.decoder.JSONDecodeError:
380
- pass
381
- self.conversation.update_chat_history(
382
- prompt, self.get_message(self.last_response)
383
- )
384
-
385
- def for_non_stream():
386
- for _ in for_stream():
387
- pass
388
- return self.last_response
389
-
390
- return for_stream() if stream else for_non_stream()
391
-
392
- def chat(
393
- self,
394
- prompt: str,
395
- stream: bool = False,
396
- optimizer: str = None,
397
- conversationally: bool = False,
398
- ) -> str:
399
- """Generate response `str`
400
- Args:
401
- prompt (str): Prompt to be send.
402
- stream (bool, optional): Flag for streaming response. Defaults to False.
403
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
404
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
405
- Returns:
406
- str: Response generated
407
- """
408
-
409
- def for_stream():
410
- for response in self.ask(
411
- prompt, True, optimizer=optimizer, conversationally=conversationally
412
- ):
413
- yield self.get_message(response)
414
-
415
- def for_non_stream():
416
- return self.get_message(
417
- self.ask(
418
- prompt,
419
- False,
420
- optimizer=optimizer,
421
- conversationally=conversationally,
422
- )
423
- )
424
-
425
- return for_stream() if stream else for_non_stream()
426
-
427
- def get_message(self, response: dict) -> str:
428
- """Retrieves message only from response
429
-
430
- Args:
431
- response (str): Response generated by `self.ask`
432
-
433
- Returns:
434
- str: Message extracted
435
- """
436
- assert isinstance(response, dict), "Response should be of dict data-type only"
437
- return response["text"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
webscout/Provider/OpenGPT.py DELETED
@@ -1,487 +0,0 @@
1
- import time
2
- import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
9
- import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
- import json
21
- import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
26
- from Helpingai_T2 import Perplexity
27
- from webscout import exceptions
28
- from typing import Any, AsyncGenerator, Dict
29
- import logging
30
- import httpx
31
- #------------------------------------------------------OpenGPT-----------------------------------------------------------
32
- class OPENGPT:
33
- def __init__(
34
- self,
35
- assistant_id,
36
- is_conversation: bool = True,
37
- max_tokens: int = 600,
38
- timeout: int = 30,
39
- intro: str = None,
40
- filepath: str = None,
41
- update_file: bool = True,
42
- proxies: dict = {},
43
- history_offset: int = 10250,
44
- act: str = None,
45
- ):
46
- """Instantiates OPENGPT
47
-
48
- Args:
49
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
50
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
51
- timeout (int, optional): Http request timeout. Defaults to 30.
52
- intro (str, optional): Conversation introductory prompt. Defaults to None.
53
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
54
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
55
- proxies (dict, optional): Http request proxies. Defaults to {}.
56
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
57
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
58
- """
59
- self.session = requests.Session()
60
- self.max_tokens_to_sample = max_tokens
61
- self.is_conversation = is_conversation
62
- self.chat_endpoint = (
63
- "https://opengpts-example-vz4y4ooboq-uc.a.run.app/runs/stream"
64
- )
65
- self.stream_chunk_size = 64
66
- self.timeout = timeout
67
- self.last_response = {}
68
- self.assistant_id = assistant_id
69
- self.authority = "opengpts-example-vz4y4ooboq-uc.a.run.app"
70
-
71
- self.headers = {
72
- "authority": self.authority,
73
- "accept": "text/event-stream",
74
- "accept-language": "en-US,en;q=0.7",
75
- "cache-control": "no-cache",
76
- "content-type": "application/json",
77
- "origin": "https://opengpts-example-vz4y4ooboq-uc.a.run.app",
78
- "pragma": "no-cache",
79
- "referer": "https://opengpts-example-vz4y4ooboq-uc.a.run.app/",
80
- "sec-fetch-site": "same-origin",
81
- "sec-gpc": "1",
82
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
83
- }
84
-
85
- self.__available_optimizers = (
86
- method
87
- for method in dir(Optimizers)
88
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
89
- )
90
- self.session.headers.update(self.headers)
91
- Conversation.intro = (
92
- AwesomePrompts().get_act(
93
- act, raise_not_found=True, default=None, case_insensitive=True
94
- )
95
- if act
96
- else intro or Conversation.intro
97
- )
98
- self.conversation = Conversation(
99
- is_conversation, self.max_tokens_to_sample, filepath, update_file
100
- )
101
- self.conversation.history_offset = history_offset
102
- self.session.proxies = proxies
103
-
104
- def ask(
105
- self,
106
- prompt: str,
107
- stream: bool = False,
108
- raw: bool = False,
109
- optimizer: str = None,
110
- conversationally: bool = False,
111
- ) -> dict:
112
- """Chat with AI
113
-
114
- Args:
115
- prompt (str): Prompt to be send.
116
- stream (bool, optional): Flag for streaming response. Defaults to False.
117
- raw (bool, optional): Stream back raw response as received. Defaults to False.
118
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
119
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
120
- Returns:
121
- dict : {}
122
- ```json
123
- {
124
- "messages": [
125
- {
126
- "content": "Hello there",
127
- "additional_kwargs": {},
128
- "type": "human",
129
- "example": false
130
- },
131
- {
132
- "content": "Hello! How can I assist you today?",
133
- "additional_kwargs": {
134
- "agent": {
135
- "return_values": {
136
- "output": "Hello! How can I assist you today?"
137
- },
138
- "log": "Hello! How can I assist you today?",
139
- "type": "AgentFinish"
140
- }
141
- },
142
- "type": "ai",
143
- "example": false
144
- }]
145
- }
146
- ```
147
- """
148
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
149
- if optimizer:
150
- if optimizer in self.__available_optimizers:
151
- conversation_prompt = getattr(Optimizers, optimizer)(
152
- conversation_prompt if conversationally else prompt
153
- )
154
- else:
155
- raise Exception(
156
- f"Optimizer is not one of {self.__available_optimizers}"
157
- )
158
-
159
- self.session.headers.update(self.headers)
160
- self.session.headers.update(
161
- dict(
162
- cookie=f"opengpts_user_id={uuid4().__str__()}",
163
- )
164
- )
165
- payload = {
166
- "input": [
167
- {
168
- "content": conversation_prompt,
169
- "additional_kwargs": {},
170
- "type": "human",
171
- "example": False,
172
- },
173
- ],
174
- "assistant_id": self.assistant_id,
175
- "thread_id": "",
176
- }
177
-
178
- def for_stream():
179
- response = self.session.post(
180
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
181
- )
182
- if (
183
- not response.ok
184
- or not response.headers.get("Content-Type")
185
- == "text/event-stream; charset=utf-8"
186
- ):
187
- raise Exception(
188
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
189
- )
190
-
191
- for value in response.iter_lines(
192
- decode_unicode=True,
193
- chunk_size=self.stream_chunk_size,
194
- ):
195
- try:
196
- modified_value = re.sub("data:", "", value)
197
- resp = json.loads(modified_value)
198
- if len(resp) == 1:
199
- continue
200
- self.last_response.update(resp[1])
201
- yield value if raw else resp[1]
202
- except json.decoder.JSONDecodeError:
203
- pass
204
- self.conversation.update_chat_history(
205
- prompt, self.get_message(self.last_response)
206
- )
207
-
208
- def for_non_stream():
209
- for _ in for_stream():
210
- pass
211
- return self.last_response
212
-
213
- return for_stream() if stream else for_non_stream()
214
-
215
- def chat(
216
- self,
217
- prompt: str,
218
- stream: bool = False,
219
- optimizer: str = None,
220
- conversationally: bool = False,
221
- ) -> str:
222
- """Generate response `str`
223
- Args:
224
- prompt (str): Prompt to be send.
225
- stream (bool, optional): Flag for streaming response. Defaults to False.
226
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
227
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
228
- Returns:
229
- str: Response generated
230
- """
231
-
232
- def for_stream():
233
- for response in self.ask(
234
- prompt, True, optimizer=optimizer, conversationally=conversationally
235
- ):
236
- yield self.get_message(response)
237
-
238
- def for_non_stream():
239
- return self.get_message(
240
- self.ask(
241
- prompt,
242
- False,
243
- optimizer=optimizer,
244
- conversationally=conversationally,
245
- )
246
- )
247
-
248
- return for_stream() if stream else for_non_stream()
249
-
250
- def get_message(self, response: dict) -> str:
251
- """Retrieves message only from response
252
-
253
- Args:
254
- response (dict): Response generated by `self.ask`
255
-
256
- Returns:
257
- str: Message extracted
258
- """
259
- assert isinstance(response, dict), "Response should be of dict data-type only"
260
- return response["content"]
261
- class AsyncOPENGPT(AsyncProvider):
262
- def __init__(
263
- self,
264
- is_conversation: bool = True,
265
- max_tokens: int = 600,
266
- timeout: int = 30,
267
- intro: str = None,
268
- filepath: str = None,
269
- update_file: bool = True,
270
- proxies: dict = {},
271
- history_offset: int = 10250,
272
- act: str = None,
273
- ):
274
- """Instantiates OPENGPT
275
-
276
- Args:
277
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
278
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
279
- timeout (int, optional): Http request timeout. Defaults to 30.
280
- intro (str, optional): Conversation introductory prompt. Defaults to None.
281
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
282
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
283
- proxies (dict, optional): Http request proxies. Defaults to {}.
284
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
285
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
286
- """
287
- self.max_tokens_to_sample = max_tokens
288
- self.is_conversation = is_conversation
289
- self.chat_endpoint = (
290
- "https://opengpts-example-vz4y4ooboq-uc.a.run.app/runs/stream"
291
- )
292
- self.stream_chunk_size = 64
293
- self.timeout = timeout
294
- self.last_response = {}
295
- self.assistant_id = "bca37014-6f97-4f2b-8928-81ea8d478d88"
296
- self.authority = "opengpts-example-vz4y4ooboq-uc.a.run.app"
297
-
298
- self.headers = {
299
- "authority": self.authority,
300
- "accept": "text/event-stream",
301
- "accept-language": "en-US,en;q=0.7",
302
- "cache-control": "no-cache",
303
- "content-type": "application/json",
304
- "origin": "https://opengpts-example-vz4y4ooboq-uc.a.run.app",
305
- "pragma": "no-cache",
306
- "referer": "https://opengpts-example-vz4y4ooboq-uc.a.run.app/",
307
- "sec-fetch-site": "same-origin",
308
- "sec-gpc": "1",
309
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
310
- }
311
-
312
- self.__available_optimizers = (
313
- method
314
- for method in dir(Optimizers)
315
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
316
- )
317
- Conversation.intro = (
318
- AwesomePrompts().get_act(
319
- act, raise_not_found=True, default=None, case_insensitive=True
320
- )
321
- if act
322
- else intro or Conversation.intro
323
- )
324
- self.conversation = Conversation(
325
- is_conversation, self.max_tokens_to_sample, filepath, update_file
326
- )
327
- self.conversation.history_offset = history_offset
328
- self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies)
329
-
330
- async def ask(
331
- self,
332
- prompt: str,
333
- stream: bool = False,
334
- raw: bool = False,
335
- optimizer: str = None,
336
- conversationally: bool = False,
337
- ) -> dict | AsyncGenerator:
338
- """Chat with AI asynchronously
339
-
340
- Args:
341
- prompt (str): Prompt to be send.
342
- stream (bool, optional): Flag for streaming response. Defaults to False.
343
- raw (bool, optional): Stream back raw response as received. Defaults to False.
344
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
345
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
346
- Returns:
347
- dict|AsyncGenerator : ai content.
348
- ```json
349
- {
350
- "messages": [
351
- {
352
- "content": "Hello there",
353
- "additional_kwargs": {},
354
- "type": "human",
355
- "example": false
356
- },
357
- {
358
- "content": "Hello! How can I assist you today?",
359
- "additional_kwargs": {
360
- "agent": {
361
- "return_values": {
362
- "output": "Hello! How can I assist you today?"
363
- },
364
- "log": "Hello! How can I assist you today?",
365
- "type": "AgentFinish"
366
- }
367
- },
368
- "type": "ai",
369
- "example": false
370
- }]
371
- }
372
- ```
373
- """
374
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
375
- if optimizer:
376
- if optimizer in self.__available_optimizers:
377
- conversation_prompt = getattr(Optimizers, optimizer)(
378
- conversation_prompt if conversationally else prompt
379
- )
380
- else:
381
- raise Exception(
382
- f"Optimizer is not one of {self.__available_optimizers}"
383
- )
384
- self.headers.update(
385
- dict(
386
- cookie=f"opengpts_user_id={uuid4().__str__()}",
387
- )
388
- )
389
- payload = {
390
- "input": [
391
- {
392
- "content": conversation_prompt,
393
- "additional_kwargs": {},
394
- "type": "human",
395
- "example": False,
396
- },
397
- ],
398
- "assistant_id": self.assistant_id,
399
- "thread_id": "",
400
- }
401
-
402
- async def for_stream():
403
- async with self.session.stream(
404
- "POST",
405
- self.chat_endpoint,
406
- json=payload,
407
- timeout=self.timeout,
408
- headers=self.headers,
409
- ) as response:
410
- if (
411
- not response.is_success
412
- or not response.headers.get("Content-Type")
413
- == "text/event-stream; charset=utf-8"
414
- ):
415
- raise exceptions.FailedToGenerateResponseError(
416
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase}) - {response.text}"
417
- )
418
-
419
- async for value in response.aiter_lines():
420
- try:
421
- modified_value = re.sub("data:", "", value)
422
- resp = json.loads(modified_value)
423
- if len(resp) == 1:
424
- continue
425
- self.last_response.update(resp[1])
426
- yield value if raw else resp[1]
427
- except json.decoder.JSONDecodeError:
428
- pass
429
-
430
- self.conversation.update_chat_history(
431
- prompt, await self.get_message(self.last_response)
432
- )
433
-
434
- async def for_non_stream():
435
- async for _ in for_stream():
436
- pass
437
- return self.last_response
438
-
439
- return for_stream() if stream else await for_non_stream()
440
-
441
- async def chat(
442
- self,
443
- prompt: str,
444
- stream: bool = False,
445
- optimizer: str = None,
446
- conversationally: bool = False,
447
- ) -> str | AsyncGenerator:
448
- """Generate response `str` asynchronously.
449
- Args:
450
- prompt (str): Prompt to be send.
451
- stream (bool, optional): Flag for streaming response. Defaults to False.
452
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
453
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
454
- Returns:
455
- str|AsyncGenerator: Response generated
456
- """
457
-
458
- async def for_stream():
459
- async_ask = await self.ask(
460
- prompt, True, optimizer=optimizer, conversationally=conversationally
461
- )
462
- async for response in async_ask:
463
- yield await self.get_message(response)
464
-
465
- async def for_non_stream():
466
- return await self.get_message(
467
- await self.ask(
468
- prompt,
469
- False,
470
- optimizer=optimizer,
471
- conversationally=conversationally,
472
- )
473
- )
474
-
475
- return for_stream() if stream else await for_non_stream()
476
-
477
- async def get_message(self, response: dict) -> str:
478
- """Retrieves message only from response
479
-
480
- Args:
481
- response (dict): Response generated by `self.ask`
482
-
483
- Returns:
484
- str: Message extracted
485
- """
486
- assert isinstance(response, dict), "Response should be of dict data-type only"
487
- return response["content"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
webscout/Provider/Openai.py DELETED
@@ -1,511 +0,0 @@
1
- import time
2
- import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
9
- import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
- import json
21
- import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
26
- from Helpingai_T2 import Perplexity
27
- from webscout import exceptions
28
- from typing import Any, AsyncGenerator, Dict
29
- import logging
30
- import httpx
31
- #----------------------------------------------------------OpenAI-----------------------------------
32
- class OPENAI(Provider):
33
- def __init__(
34
- self,
35
- api_key: str,
36
- is_conversation: bool = True,
37
- max_tokens: int = 600,
38
- temperature: float = 1,
39
- presence_penalty: int = 0,
40
- frequency_penalty: int = 0,
41
- top_p: float = 1,
42
- model: str = "gpt-3.5-turbo",
43
- timeout: int = 30,
44
- intro: str = None,
45
- filepath: str = None,
46
- update_file: bool = True,
47
- proxies: dict = {},
48
- history_offset: int = 10250,
49
- act: str = None,
50
- ):
51
- """Instantiates OPENAI
52
-
53
- Args:
54
- api_key (key): OpenAI's API key.
55
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
56
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
57
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 1.
58
- presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
59
- frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
60
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
61
- model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
62
- timeout (int, optional): Http request timeout. Defaults to 30.
63
- intro (str, optional): Conversation introductory prompt. Defaults to None.
64
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
65
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
66
- proxies (dict, optional): Http request proxies. Defaults to {}.
67
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
68
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
69
- """
70
- self.is_conversation = is_conversation
71
- self.max_tokens_to_sample = max_tokens
72
- self.api_key = api_key
73
- self.model = model
74
- self.temperature = temperature
75
- self.presence_penalty = presence_penalty
76
- self.frequency_penalty = frequency_penalty
77
- self.top_p = top_p
78
- self.chat_endpoint = "https://api.openai.com/v1/chat/completions"
79
- self.stream_chunk_size = 64
80
- self.timeout = timeout
81
- self.last_response = {}
82
- self.headers = {
83
- "Content-Type": "application/json",
84
- "Authorization": f"Bearer {self.api_key}",
85
- }
86
-
87
- self.__available_optimizers = (
88
- method
89
- for method in dir(Optimizers)
90
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
91
- )
92
- self.session.headers.update(self.headers)
93
- Conversation.intro = (
94
- AwesomePrompts().get_act(
95
- act, raise_not_found=True, default=None, case_insensitive=True
96
- )
97
- if act
98
- else intro or Conversation.intro
99
- )
100
- self.conversation = Conversation(
101
- is_conversation, self.max_tokens_to_sample, filepath, update_file
102
- )
103
- self.conversation.history_offset = history_offset
104
- self.session.proxies = proxies
105
-
106
- def ask(
107
- self,
108
- prompt: str,
109
- stream: bool = False,
110
- raw: bool = False,
111
- optimizer: str = None,
112
- conversationally: bool = False,
113
- ) -> dict:
114
- """Chat with AI
115
-
116
- Args:
117
- prompt (str): Prompt to be send.
118
- stream (bool, optional): Flag for streaming response. Defaults to False.
119
- raw (bool, optional): Stream back raw response as received. Defaults to False.
120
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
121
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
122
- Returns:
123
- dict : {}
124
- ```json
125
- {
126
- "id": "chatcmpl-TaREJpBZsRVQFRFic1wIA7Q7XfnaD",
127
- "object": "chat.completion",
128
- "created": 1704623244,
129
- "model": "gpt-3.5-turbo",
130
- "usage": {
131
- "prompt_tokens": 0,
132
- "completion_tokens": 0,
133
- "total_tokens": 0
134
- },
135
- "choices": [
136
- {
137
- "message": {
138
- "role": "assistant",
139
- "content": "Hello! How can I assist you today?"
140
- },
141
- "finish_reason": "stop",
142
- "index": 0
143
- }
144
- ]
145
- }
146
- ```
147
- """
148
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
149
- if optimizer:
150
- if optimizer in self.__available_optimizers:
151
- conversation_prompt = getattr(Optimizers, optimizer)(
152
- conversation_prompt if conversationally else prompt
153
- )
154
- else:
155
- raise exceptions.FailedToGenerateResponseError(
156
- f"Optimizer is not one of {self.__available_optimizers}"
157
- )
158
- self.session.headers.update(self.headers)
159
- payload = {
160
- "frequency_penalty": self.frequency_penalty,
161
- "messages": [{"content": conversation_prompt, "role": "user"}],
162
- "model": self.model,
163
- "presence_penalty": self.presence_penalty,
164
- "stream": stream,
165
- "temperature": self.temperature,
166
- "top_p": self.top_p,
167
- }
168
-
169
- def for_stream():
170
- response = self.session.post(
171
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
172
- )
173
- if not response.ok:
174
- raise exceptions.FailedToGenerateResponseError(
175
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
176
- )
177
-
178
- message_load = ""
179
- for value in response.iter_lines(
180
- decode_unicode=True,
181
- delimiter="" if raw else "data:",
182
- chunk_size=self.stream_chunk_size,
183
- ):
184
- try:
185
- resp = json.loads(value)
186
- incomplete_message = self.get_message(resp)
187
- if incomplete_message:
188
- message_load += incomplete_message
189
- resp["choices"][0]["delta"]["content"] = message_load
190
- self.last_response.update(resp)
191
- yield value if raw else resp
192
- elif raw:
193
- yield value
194
- except json.decoder.JSONDecodeError:
195
- pass
196
- self.conversation.update_chat_history(
197
- prompt, self.get_message(self.last_response)
198
- )
199
-
200
- def for_non_stream():
201
- response = self.session.post(
202
- self.chat_endpoint, json=payload, stream=False, timeout=self.timeout
203
- )
204
- if (
205
- not response.ok
206
- or not response.headers.get("Content-Type", "") == "application/json"
207
- ):
208
- raise exceptions.FailedToGenerateResponseError(
209
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
210
- )
211
- resp = response.json()
212
- self.last_response.update(resp)
213
- self.conversation.update_chat_history(
214
- prompt, self.get_message(self.last_response)
215
- )
216
- return resp
217
-
218
- return for_stream() if stream else for_non_stream()
219
-
220
- def chat(
221
- self,
222
- prompt: str,
223
- stream: bool = False,
224
- optimizer: str = None,
225
- conversationally: bool = False,
226
- ) -> str:
227
- """Generate response `str`
228
- Args:
229
- prompt (str): Prompt to be send.
230
- stream (bool, optional): Flag for streaming response. Defaults to False.
231
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
232
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
233
- Returns:
234
- str: Response generated
235
- """
236
-
237
- def for_stream():
238
- for response in self.ask(
239
- prompt, True, optimizer=optimizer, conversationally=conversationally
240
- ):
241
- yield self.get_message(response)
242
-
243
- def for_non_stream():
244
- return self.get_message(
245
- self.ask(
246
- prompt,
247
- False,
248
- optimizer=optimizer,
249
- conversationally=conversationally,
250
- )
251
- )
252
-
253
- return for_stream() if stream else for_non_stream()
254
-
255
- def get_message(self, response: dict) -> str:
256
- """Retrieves message only from response
257
-
258
- Args:
259
- response (dict): Response generated by `self.ask`
260
-
261
- Returns:
262
- str: Message extracted
263
- """
264
- assert isinstance(response, dict), "Response should be of dict data-type only"
265
- try:
266
- if response["choices"][0].get("delta"):
267
- return response["choices"][0]["delta"]["content"]
268
- return response["choices"][0]["message"]["content"]
269
- except KeyError:
270
- return ""
271
- class AsyncOPENAI(AsyncProvider):
272
- def __init__(
273
- self,
274
- api_key: str,
275
- is_conversation: bool = True,
276
- max_tokens: int = 600,
277
- temperature: float = 1,
278
- presence_penalty: int = 0,
279
- frequency_penalty: int = 0,
280
- top_p: float = 1,
281
- model: str = "gpt-3.5-turbo",
282
- timeout: int = 30,
283
- intro: str = None,
284
- filepath: str = None,
285
- update_file: bool = True,
286
- proxies: dict = {},
287
- history_offset: int = 10250,
288
- act: str = None,
289
- ):
290
- """Instantiates OPENAI
291
-
292
- Args:
293
- api_key (key): OpenAI's API key.
294
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
295
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
296
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 1.
297
- presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
298
- frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
299
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
300
- model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
301
- timeout (int, optional): Http request timeout. Defaults to 30.
302
- intro (str, optional): Conversation introductory prompt. Defaults to None.
303
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
304
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
305
- proxies (dict, optional): Http request proxies. Defaults to {}.
306
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
307
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
308
- """
309
- self.is_conversation = is_conversation
310
- self.max_tokens_to_sample = max_tokens
311
- self.api_key = api_key
312
- self.model = model
313
- self.temperature = temperature
314
- self.presence_penalty = presence_penalty
315
- self.frequency_penalty = frequency_penalty
316
- self.top_p = top_p
317
- self.chat_endpoint = "https://api.openai.com/v1/chat/completions"
318
- self.stream_chunk_size = 64
319
- self.timeout = timeout
320
- self.last_response = {}
321
- self.headers = {
322
- "Content-Type": "application/json",
323
- "Authorization": f"Bearer {self.api_key}",
324
- }
325
-
326
- self.__available_optimizers = (
327
- method
328
- for method in dir(Optimizers)
329
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
330
- )
331
- Conversation.intro = (
332
- AwesomePrompts().get_act(
333
- act, raise_not_found=True, default=None, case_insensitive=True
334
- )
335
- if act
336
- else intro or Conversation.intro
337
- )
338
- self.conversation = Conversation(
339
- is_conversation, self.max_tokens_to_sample, filepath, update_file
340
- )
341
- self.conversation.history_offset = history_offset
342
- self.session = httpx.AsyncClient(
343
- headers=self.headers,
344
- proxies=proxies,
345
- )
346
-
347
- async def ask(
348
- self,
349
- prompt: str,
350
- stream: bool = False,
351
- raw: bool = False,
352
- optimizer: str = None,
353
- conversationally: bool = False,
354
- ) -> dict | AsyncGenerator:
355
- """Chat with AI asynchronously.
356
-
357
- Args:
358
- prompt (str): Prompt to be send.
359
- stream (bool, optional): Flag for streaming response. Defaults to False.
360
- raw (bool, optional): Stream back raw response as received. Defaults to False.
361
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
362
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
363
- Returns:
364
- dict|AsyncGenerator : ai content.
365
- ```json
366
- {
367
- "id": "chatcmpl-TaREJpBZsRVQFRFic1wIA7Q7XfnaD",
368
- "object": "chat.completion",
369
- "created": 1704623244,
370
- "model": "gpt-3.5-turbo",
371
- "usage": {
372
- "prompt_tokens": 0,
373
- "completion_tokens": 0,
374
- "total_tokens": 0
375
- },
376
- "choices": [
377
- {
378
- "message": {
379
- "role": "assistant",
380
- "content": "Hello! How can I assist you today?"
381
- },
382
- "finish_reason": "stop",
383
- "index": 0
384
- }
385
- ]
386
- }
387
- ```
388
- """
389
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
390
- if optimizer:
391
- if optimizer in self.__available_optimizers:
392
- conversation_prompt = getattr(Optimizers, optimizer)(
393
- conversation_prompt if conversationally else prompt
394
- )
395
- else:
396
- raise Exception(
397
- f"Optimizer is not one of {self.__available_optimizers}"
398
- )
399
- payload = {
400
- "frequency_penalty": self.frequency_penalty,
401
- "messages": [{"content": conversation_prompt, "role": "user"}],
402
- "model": self.model,
403
- "presence_penalty": self.presence_penalty,
404
- "stream": stream,
405
- "temperature": self.temperature,
406
- "top_p": self.top_p,
407
- }
408
-
409
- async def for_stream():
410
- async with self.session.stream(
411
- "POST", self.chat_endpoint, json=payload, timeout=self.timeout
412
- ) as response:
413
- if not response.is_success:
414
- raise Exception(
415
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
416
- )
417
-
418
- message_load = ""
419
- async for value in response.aiter_lines():
420
- try:
421
-
422
- resp = sanitize_stream(value)
423
- incomplete_message = await self.get_message(resp)
424
- if incomplete_message:
425
- message_load += incomplete_message
426
- resp["choices"][0]["delta"]["content"] = message_load
427
- self.last_response.update(resp)
428
- yield value if raw else resp
429
- elif raw:
430
- yield value
431
- except json.decoder.JSONDecodeError:
432
- pass
433
- self.conversation.update_chat_history(
434
- prompt, await self.get_message(self.last_response)
435
- )
436
-
437
- async def for_non_stream():
438
- response = httpx.post(
439
- self.chat_endpoint,
440
- json=payload,
441
- timeout=self.timeout,
442
- headers=self.headers,
443
- )
444
- if (
445
- not response.is_success
446
- or not response.headers.get("Content-Type", "") == "application/json"
447
- ):
448
- raise Exception(
449
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
450
- )
451
- resp = response.json()
452
- self.last_response.update(resp)
453
- self.conversation.update_chat_history(
454
- prompt, await self.get_message(self.last_response)
455
- )
456
- return resp
457
-
458
- return for_stream() if stream else await for_non_stream()
459
-
460
- async def chat(
461
- self,
462
- prompt: str,
463
- stream: bool = False,
464
- optimizer: str = None,
465
- conversationally: bool = False,
466
- ) -> str | AsyncGenerator:
467
- """Generate response `str` asynchronously.
468
- Args:
469
- prompt (str): Prompt to be send.
470
- stream (bool, optional): Flag for streaming response. Defaults to False.
471
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
472
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
473
- Returns:
474
- str|AsyncGenerator: Response generated
475
- """
476
-
477
- async def for_stream():
478
- async_ask = await self.ask(
479
- prompt, True, optimizer=optimizer, conversationally=conversationally
480
- )
481
- async for response in async_ask:
482
- yield await self.get_message(response)
483
-
484
- async def for_non_stream():
485
- return await self.get_message(
486
- await self.ask(
487
- prompt,
488
- False,
489
- optimizer=optimizer,
490
- conversationally=conversationally,
491
- )
492
- )
493
-
494
- return for_stream() if stream else await for_non_stream()
495
-
496
- async def get_message(self, response: dict) -> str:
497
- """Retrieves message only from response asynchronously.
498
-
499
- Args:
500
- response (dict): Response generated by `self.ask`
501
-
502
- Returns:
503
- str: Message extracted
504
- """
505
- assert isinstance(response, dict), "Response should be of dict data-type only"
506
- try:
507
- if response["choices"][0].get("delta"):
508
- return response["choices"][0]["delta"]["content"]
509
- return response["choices"][0]["message"]["content"]
510
- except KeyError:
511
- return ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
webscout/Provider/Perplexity.py DELETED
@@ -1,230 +0,0 @@
1
- import time
2
- import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
9
- import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
- import json
21
- import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
26
- from Helpingai_T2 import Perplexity
27
- from webscout import exceptions
28
- from typing import Any, AsyncGenerator, Dict
29
- import logging
30
- import httpx
31
- #------------------------------------------------------PERPLEXITY--------------------------------------------------------
32
- class PERPLEXITY(Provider):
33
- def __init__(
34
- self,
35
- is_conversation: bool = True,
36
- max_tokens: int = 600,
37
- timeout: int = 30,
38
- intro: str = None,
39
- filepath: str = None,
40
- update_file: bool = True,
41
- proxies: dict = {},
42
- history_offset: int = 10250,
43
- act: str = None,
44
- quiet: bool = False,
45
- ):
46
- """Instantiates PERPLEXITY
47
-
48
- Args:
49
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
50
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
51
- timeout (int, optional): Http request timeout. Defaults to 30.
52
- intro (str, optional): Conversation introductory prompt. Defaults to None.
53
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
54
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
55
- proxies (dict, optional): Http request proxies. Defaults to {}.
56
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
57
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
58
- quiet (bool, optional): Ignore web search-results and yield final response only. Defaults to False.
59
- """
60
- self.max_tokens_to_sample = max_tokens
61
- self.is_conversation = is_conversation
62
- self.last_response = {}
63
- self.web_results: dict = {}
64
- self.quiet = quiet
65
-
66
- self.__available_optimizers = (
67
- method
68
- for method in dir(Optimizers)
69
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
70
- )
71
- Conversation.intro = (
72
- AwesomePrompts().get_act(
73
- act, raise_not_found=True, default=None, case_insensitive=True
74
- )
75
- if act
76
- else intro or Conversation.intro
77
- )
78
- self.conversation = Conversation(
79
- is_conversation, self.max_tokens_to_sample, filepath, update_file
80
- )
81
- self.conversation.history_offset = history_offset
82
-
83
- def ask(
84
- self,
85
- prompt: str,
86
- stream: bool = False,
87
- raw: bool = False,
88
- optimizer: str = None,
89
- conversationally: bool = False,
90
- ) -> dict:
91
- """Chat with AI
92
-
93
- Args:
94
- prompt (str): Prompt to be send.
95
- stream (bool, optional): Flag for streaming response. Defaults to False.
96
- raw (bool, optional): Stream back raw response as received. Defaults to False.
97
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
98
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
99
- Returns:
100
- dict : {}
101
- ```json
102
- {
103
- "status": "pending",
104
- "uuid": "3604dfcc-611f-4b7d-989d-edca2a7233c7",
105
- "read_write_token": null,
106
- "frontend_context_uuid": "f6d43119-5231-481d-b692-f52e1f52d2c6",
107
- "final": false,
108
- "backend_uuid": "a6d6ec9e-da69-4841-af74-0de0409267a8",
109
- "media_items": [],
110
- "widget_data": [],
111
- "knowledge_cards": [],
112
- "expect_search_results": "false",
113
- "mode": "concise",
114
- "search_focus": "internet",
115
- "gpt4": false,
116
- "display_model": "turbo",
117
- "attachments": null,
118
- "answer": "",
119
- "web_results": [],
120
- "chunks": [],
121
- "extra_web_results": []
122
- }
123
- ```
124
- """
125
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
126
- if optimizer:
127
- if optimizer in self.__available_optimizers:
128
- conversation_prompt = getattr(Optimizers, optimizer)(
129
- conversation_prompt if conversationally else prompt
130
- )
131
- else:
132
- raise Exception(
133
- f"Optimizer is not one of {self.__available_optimizers}"
134
- )
135
-
136
- def for_stream():
137
- for response in Perplexity().generate_answer(conversation_prompt):
138
- yield json.dumps(response) if raw else response
139
- self.last_response.update(response)
140
-
141
- self.conversation.update_chat_history(
142
- prompt,
143
- self.get_message(self.last_response),
144
- )
145
-
146
- def for_non_stream():
147
- for _ in for_stream():
148
- pass
149
- return self.last_response
150
-
151
- return for_stream() if stream else for_non_stream()
152
-
153
- def chat(
154
- self,
155
- prompt: str,
156
- stream: bool = False,
157
- optimizer: str = None,
158
- conversationally: bool = False,
159
- ) -> str:
160
- """Generate response `str`
161
- Args:
162
- prompt (str): Prompt to be send.
163
- stream (bool, optional): Flag for streaming response. Defaults to False.
164
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
165
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
166
- Returns:
167
- str: Response generated
168
- """
169
-
170
- def for_stream():
171
- for response in self.ask(
172
- prompt, True, optimizer=optimizer, conversationally=conversationally
173
- ):
174
- yield self.get_message(response)
175
-
176
- def for_non_stream():
177
- return self.get_message(
178
- self.ask(
179
- prompt,
180
- False,
181
- optimizer=optimizer,
182
- conversationally=conversationally,
183
- )
184
- )
185
-
186
- return for_stream() if stream else for_non_stream()
187
-
188
- def get_message(self, response: dict) -> str:
189
- """Retrieves message only from response
190
-
191
- Args:
192
- response (dict): Response generated by `self.ask`
193
-
194
- Returns:
195
- str: Message extracted
196
- """
197
- assert isinstance(response, dict), "Response should be of dict data-type only"
198
- text_str: str = response.get("answer", "")
199
-
200
- def update_web_results(web_results: list) -> None:
201
- for index, results in enumerate(web_results, start=1):
202
- self.web_results[str(index) + ". " + results["name"]] = dict(
203
- url=results.get("url"), snippet=results.get("snippet")
204
- )
205
-
206
- if response.get("text"):
207
- # last chunk
208
- target: dict[str, Any] = json.loads(response.get("text"))
209
- text_str = target.get("answer")
210
- web_results: list[dict] = target.get("web_results")
211
- self.web_results.clear()
212
- update_web_results(web_results)
213
-
214
- return (
215
- text_str
216
- if self.quiet or not self.web_results
217
- else text_str + "\n\n# WEB-RESULTS\n\n" + yaml.dump(self.web_results)
218
- )
219
-
220
- else:
221
- if str(response.get("expect_search_results")).lower() == "true":
222
- return (
223
- text_str
224
- if self.quiet
225
- else text_str
226
- + "\n\n# WEB-RESULTS\n\n"
227
- + yaml.dump(response.get("web_results"))
228
- )
229
- else:
230
- return text_str
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
webscout/Provider/Phind.py DELETED
@@ -1,518 +0,0 @@
1
- import time
2
- import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
9
- import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
- import json
21
- import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
26
- from Helpingai_T2 import Perplexity
27
- from webscout import exceptions
28
- from typing import Any, AsyncGenerator, Dict
29
- import logging
30
- import httpx
31
-
32
- #------------------------------------------------------phind-------------------------------------------------------------
33
- class PhindSearch:
34
- # default_model = "Phind Model"
35
- def __init__(
36
- self,
37
- is_conversation: bool = True,
38
- max_tokens: int = 8000,
39
- timeout: int = 30,
40
- intro: str = None,
41
- filepath: str = None,
42
- update_file: bool = True,
43
- proxies: dict = {},
44
- history_offset: int = 10250,
45
- act: str = None,
46
- model: str = "Phind Model",
47
- quiet: bool = False,
48
- ):
49
- """Instantiates PHIND
50
-
51
- Args:
52
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
53
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
54
- timeout (int, optional): Http request timeout. Defaults to 30.
55
- intro (str, optional): Conversation introductory prompt. Defaults to None.
56
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
57
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
58
- proxies (dict, optional): Http request proxies. Defaults to {}.
59
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
60
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
61
- model (str, optional): Model name. Defaults to "Phind Model".
62
- quiet (bool, optional): Ignore web search-results and yield final response only. Defaults to False.
63
- """
64
- self.session = requests.Session()
65
- self.max_tokens_to_sample = max_tokens
66
- self.is_conversation = is_conversation
67
- self.chat_endpoint = "https://https.extension.phind.com/agent/"
68
- self.stream_chunk_size = 64
69
- self.timeout = timeout
70
- self.last_response = {}
71
- self.model = model
72
- self.quiet = quiet
73
-
74
- self.headers = {
75
- "Content-Type": "application/json",
76
- "User-Agent": "",
77
- "Accept": "*/*",
78
- "Accept-Encoding": "Identity",
79
- }
80
-
81
- self.__available_optimizers = (
82
- method
83
- for method in dir(Optimizers)
84
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
85
- )
86
- self.session.headers.update(self.headers)
87
- Conversation.intro = (
88
- AwesomePrompts().get_act(
89
- act, raise_not_found=True, default=None, case_insensitive=True
90
- )
91
- if act
92
- else intro or Conversation.intro
93
- )
94
- self.conversation = Conversation(
95
- is_conversation, self.max_tokens_to_sample, filepath, update_file
96
- )
97
- self.conversation.history_offset = history_offset
98
- self.session.proxies = proxies
99
-
100
- def ask(
101
- self,
102
- prompt: str,
103
- stream: bool = False,
104
- raw: bool = False,
105
- optimizer: str = None,
106
- conversationally: bool = False,
107
- ) -> dict:
108
- """Chat with AI
109
-
110
- Args:
111
- prompt (str): Prompt to be send.
112
- stream (bool, optional): Flag for streaming response. Defaults to False.
113
- raw (bool, optional): Stream back raw response as received. Defaults to False.
114
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
115
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
116
- Returns:
117
- dict : {}
118
- ```json
119
- {
120
- "id": "chatcmpl-r0wujizf2i2xb60mjiwt",
121
- "object": "chat.completion.chunk",
122
- "created": 1706775384,
123
- "model": "trt-llm-phind-model-serving",
124
- "choices": [
125
- {
126
- "index": 0,
127
- "delta": {
128
- "content": "Hello! How can I assist you with your programming today?"
129
- },
130
- "finish_reason": null
131
- }
132
- ]
133
- }
134
- ```
135
- """
136
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
137
- if optimizer:
138
- if optimizer in self.__available_optimizers:
139
- conversation_prompt = getattr(Optimizers, optimizer)(
140
- conversation_prompt if conversationally else prompt
141
- )
142
- else:
143
- raise Exception(
144
- f"Optimizer is not one of {self.__available_optimizers}"
145
- )
146
-
147
- self.session.headers.update(self.headers)
148
- payload = {
149
- "additional_extension_context": "",
150
- "allow_magic_buttons": True,
151
- "is_vscode_extension": True,
152
- "message_history": [
153
- {"content": conversation_prompt, "metadata": {}, "role": "user"}
154
- ],
155
- "requested_model": self.model,
156
- "user_input": prompt,
157
- }
158
-
159
- def for_stream():
160
- response = self.session.post(
161
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
162
- )
163
- if (
164
- not response.ok
165
- or not response.headers.get("Content-Type")
166
- == "text/event-stream; charset=utf-8"
167
- ):
168
- raise exceptions.FailedToGenerateResponseError(
169
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
170
- )
171
- streaming_text = ""
172
- for value in response.iter_lines(
173
- decode_unicode=True,
174
- chunk_size=self.stream_chunk_size,
175
- ):
176
- try:
177
- modified_value = re.sub("data:", "", value)
178
- json_modified_value = json.loads(modified_value)
179
- retrieved_text = self.get_message(json_modified_value)
180
- if not retrieved_text:
181
- continue
182
- streaming_text += retrieved_text
183
- json_modified_value["choices"][0]["delta"][
184
- "content"
185
- ] = streaming_text
186
- self.last_response.update(json_modified_value)
187
- yield value if raw else json_modified_value
188
- except json.decoder.JSONDecodeError:
189
- pass
190
- self.conversation.update_chat_history(
191
- prompt, self.get_message(self.last_response)
192
- )
193
-
194
- def for_non_stream():
195
- for _ in for_stream():
196
- pass
197
- return self.last_response
198
-
199
- return for_stream() if stream else for_non_stream()
200
-
201
- def chat(
202
- self,
203
- prompt: str,
204
- stream: bool = False,
205
- optimizer: str = None,
206
- conversationally: bool = False,
207
- ) -> str:
208
- """Generate response `str`
209
- Args:
210
- prompt (str): Prompt to be send.
211
- stream (bool, optional): Flag for streaming response. Defaults to False.
212
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
213
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
214
- Returns:
215
- str: Response generated
216
- """
217
-
218
- def for_stream():
219
- for response in self.ask(
220
- prompt, True, optimizer=optimizer, conversationally=conversationally
221
- ):
222
- yield self.get_message(response)
223
-
224
- def for_non_stream():
225
- return self.get_message(
226
- self.ask(
227
- prompt,
228
- False,
229
- optimizer=optimizer,
230
- conversationally=conversationally,
231
- )
232
- )
233
-
234
- return for_stream() if stream else for_non_stream()
235
-
236
- def get_message(self, response: dict) -> str:
237
- """Retrieves message only from response
238
-
239
- Args:
240
- response (dict): Response generated by `self.ask`
241
-
242
- Returns:
243
- str: Message extracted
244
- """
245
- assert isinstance(response, dict), "Response should be of dict data-type only"
246
- if response.get("type", "") == "metadata":
247
- return
248
-
249
- delta: dict = response["choices"][0]["delta"]
250
-
251
- if not delta:
252
- return ""
253
-
254
- elif delta.get("function_call"):
255
- if self.quiet:
256
- return ""
257
-
258
- function_call: dict = delta["function_call"]
259
- if function_call.get("name"):
260
- return function_call["name"]
261
- elif function_call.get("arguments"):
262
- return function_call.get("arguments")
263
-
264
- elif delta.get("metadata"):
265
- if self.quiet:
266
- return ""
267
- return yaml.dump(delta["metadata"])
268
-
269
- else:
270
- return (
271
- response["choices"][0]["delta"].get("content")
272
- if response["choices"][0].get("finish_reason") is None
273
- else ""
274
- )
275
- class AsyncPhindSearch(AsyncProvider):
276
- def __init__(
277
- self,
278
- is_conversation: bool = True,
279
- max_tokens: int = 600,
280
- timeout: int = 30,
281
- intro: str = None,
282
- filepath: str = None,
283
- update_file: bool = True,
284
- proxies: dict = {},
285
- history_offset: int = 10250,
286
- act: str = None,
287
- model: str = "Phind Model",
288
- quiet: bool = False,
289
- ):
290
- """Instantiates PHIND
291
-
292
- Args:
293
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
294
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
295
- timeout (int, optional): Http request timeout. Defaults to 30.
296
- intro (str, optional): Conversation introductory prompt. Defaults to None.
297
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
298
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
299
- proxies (dict, optional): Http request proxies. Defaults to {}.
300
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
301
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
302
- model (str, optional): Model name. Defaults to "Phind Model".
303
- quiet (bool, optional): Ignore web search-results and yield final response only. Defaults to False.
304
- """
305
- self.max_tokens_to_sample = max_tokens
306
- self.is_conversation = is_conversation
307
- self.chat_endpoint = "https://https.extension.phind.com/agent/"
308
- self.stream_chunk_size = 64
309
- self.timeout = timeout
310
- self.last_response = {}
311
- self.model = model
312
- self.quiet = quiet
313
-
314
- self.headers = {
315
- "Content-Type": "application/json",
316
- "User-Agent": "",
317
- "Accept": "*/*",
318
- "Accept-Encoding": "Identity",
319
- }
320
-
321
- self.__available_optimizers = (
322
- method
323
- for method in dir(Optimizers)
324
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
325
- )
326
- Conversation.intro = (
327
- AwesomePrompts().get_act(
328
- act, raise_not_found=True, default=None, case_insensitive=True
329
- )
330
- if act
331
- else intro or Conversation.intro
332
- )
333
- self.conversation = Conversation(
334
- is_conversation, self.max_tokens_to_sample, filepath, update_file
335
- )
336
- self.conversation.history_offset = history_offset
337
- self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies)
338
-
339
- async def ask(
340
- self,
341
- prompt: str,
342
- stream: bool = False,
343
- raw: bool = False,
344
- optimizer: str = None,
345
- conversationally: bool = False,
346
- synchronous_generator=False,
347
- ) -> dict | AsyncGenerator:
348
- """Asynchronously Chat with AI
349
-
350
- Args:
351
- prompt (str): Prompt to be send.
352
- stream (bool, optional): Flag for streaming response. Defaults to False.
353
- raw (bool, optional): Stream back raw response as received. Defaults to False.
354
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
355
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
356
- Returns:
357
- dict|AsyncGenerator : ai content.
358
- ```json
359
- {
360
- "id": "chatcmpl-r0wujizf2i2xb60mjiwt",
361
- "object": "chat.completion.chunk",
362
- "created": 1706775384,
363
- "model": "trt-llm-phind-model-serving",
364
- "choices": [
365
- {
366
- "index": 0,
367
- "delta": {
368
- "content": "Hello! How can I assist you with your programming today?"
369
- },
370
- "finish_reason": null
371
- }
372
- ]
373
- }
374
- ```
375
- """
376
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
377
- if optimizer:
378
- if optimizer in self.__available_optimizers:
379
- conversation_prompt = getattr(Optimizers, optimizer)(
380
- conversation_prompt if conversationally else prompt
381
- )
382
- else:
383
- raise Exception(
384
- f"Optimizer is not one of {self.__available_optimizers}"
385
- )
386
-
387
- payload = {
388
- "additional_extension_context": "",
389
- "allow_magic_buttons": True,
390
- "is_vscode_extension": True,
391
- "message_history": [
392
- {"content": conversation_prompt, "metadata": {}, "role": "user"}
393
- ],
394
- "requested_model": self.model,
395
- "user_input": prompt,
396
- }
397
-
398
- async def for_stream():
399
- async with self.session.stream(
400
- "POST",
401
- self.chat_endpoint,
402
- json=payload,
403
- timeout=self.timeout,
404
- ) as response:
405
- if (
406
- not response.is_success
407
- or not response.headers.get("Content-Type")
408
- == "text/event-stream; charset=utf-8"
409
- ):
410
- raise exceptions.FailedToGenerateResponseError(
411
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
412
- )
413
- streaming_text = ""
414
- async for value in response.aiter_lines():
415
- try:
416
- modified_value = re.sub("data:", "", value)
417
- json_modified_value = json.loads(modified_value)
418
- retrieved_text = await self.get_message(json_modified_value)
419
- if not retrieved_text:
420
- continue
421
- streaming_text += retrieved_text
422
- json_modified_value["choices"][0]["delta"][
423
- "content"
424
- ] = streaming_text
425
- self.last_response.update(json_modified_value)
426
- yield value if raw else json_modified_value
427
- except json.decoder.JSONDecodeError:
428
- pass
429
- self.conversation.update_chat_history(
430
- prompt, await self.get_message(self.last_response)
431
- )
432
-
433
- async def for_non_stream():
434
- async for _ in for_stream():
435
- pass
436
- return self.last_response
437
-
438
- return (
439
- for_stream()
440
- if stream and not synchronous_generator
441
- else await for_non_stream()
442
- )
443
-
444
- async def chat(
445
- self,
446
- prompt: str,
447
- stream: bool = False,
448
- optimizer: str = None,
449
- conversationally: bool = False,
450
- ) -> str | AsyncGenerator:
451
- """Generate response `str`
452
- Args:
453
- prompt (str): Prompt to be send.
454
- stream (bool, optional): Flag for streaming response. Defaults to False.
455
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
456
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
457
- Returns:
458
- str|AsyncGenerator: Response generated
459
- """
460
-
461
- async def for_stream():
462
- ask_resp = await self.ask(
463
- prompt, True, optimizer=optimizer, conversationally=conversationally
464
- )
465
- async for response in ask_resp:
466
- yield await self.get_message(response)
467
-
468
- async def for_non_stream():
469
- return await self.get_message(
470
- await self.ask(
471
- prompt,
472
- False,
473
- optimizer=optimizer,
474
- conversationally=conversationally,
475
- )
476
- )
477
-
478
- return for_stream() if stream else await for_non_stream()
479
-
480
- async def get_message(self, response: dict) -> str:
481
- """Retrieves message only from response
482
-
483
- Args:
484
- response (dict): Response generated by `self.ask`
485
-
486
- Returns:
487
- str: Message extracted
488
- """
489
- assert isinstance(response, dict), "Response should be of dict data-type only"
490
- if response.get("type", "") == "metadata":
491
- return
492
-
493
- delta: dict = response["choices"][0]["delta"]
494
-
495
- if not delta:
496
- return ""
497
-
498
- elif delta.get("function_call"):
499
- if self.quiet:
500
- return ""
501
-
502
- function_call: dict = delta["function_call"]
503
- if function_call.get("name"):
504
- return function_call["name"]
505
- elif function_call.get("arguments"):
506
- return function_call.get("arguments")
507
-
508
- elif delta.get("metadata"):
509
- if self.quiet:
510
- return ""
511
- return yaml.dump(delta["metadata"])
512
-
513
- else:
514
- return (
515
- response["choices"][0]["delta"].get("content")
516
- if response["choices"][0].get("finish_reason") is None
517
- else ""
518
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
webscout/Provider/Poe.py DELETED
@@ -1,208 +0,0 @@
1
- from poe_api_wrapper import PoeApi
2
- from poe_api_wrapper.api import BOTS_LIST
3
- from ..AIbase import Provider
4
- from ..AIutel import Conversation
5
- from ..AIutel import Optimizers
6
- from ..AIutel import AwesomePrompts
7
- from pathlib import Path
8
- from json import loads
9
- from json import dumps
10
- from loguru import logger
11
- import logging
12
-
13
- logger.remove()
14
-
15
-
16
- class POE(Provider):
17
- def __init__(
18
- self,
19
- cookie: str,
20
- model: str = "Assistant",
21
- proxy: bool = False,
22
- timeout: int = 30,
23
- filepath: str = None,
24
- update_file: str = True,
25
- intro: str = None,
26
- act: str = None,
27
- init: bool = True,
28
- ):
29
- """Initializes POE
30
-
31
- Args:
32
- cookie (str): Path to `poe.com.cookies.json` file or 'p-b' cookie-value.
33
- model (str, optional): Model name. Default to Assistant.
34
- proxy (bool, optional): Flag for Httpx request proxy. Defaults to False.
35
- timeout (int, optional): Http request timeout. Defaults to 30.
36
- filepath (str, optional): Path to save the chat history. Defaults to None.
37
- update_file (str, optional): Flag for controlling chat history updates. Defaults to True.
38
- intro (str, optional): Conversation introductory prompt. Defaults to None.
39
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
40
- init (bool, optional): Resend the intro prompt. Defaults to True.
41
- """
42
- assert isinstance(
43
- cookie, str
44
- ), f"Cookie must be of {str} datatype only not {type(cookie)}"
45
- assert (
46
- model in BOTS_LIST.keys()
47
- ), f"model name '{model}' is not one of {', '.join(list(BOTS_LIST.keys()))}"
48
- cookie_path = Path(cookie)
49
-
50
- if cookie_path.exists() or any(["/" in cookie, ".json" in cookie]):
51
- cookie = None
52
- all_cookies = loads(cookie_path.read_text())
53
- for entry in all_cookies:
54
- if entry["name"] == "p-b":
55
- cookie = entry["value"]
56
- assert (
57
- cookie
58
- ), f'Required cookie value cannot be retrieved from the path "{cookie_path.as_posix()}"'
59
-
60
- if proxy:
61
- import poe_api_wrapper.proxies as proxies
62
-
63
- proxies.PROXY = True
64
-
65
- self.bot = BOTS_LIST[model]
66
- self.session = PoeApi(cookie)
67
- self.last_response = {}
68
- self.__available_optimizers = (
69
- method
70
- for method in dir(Optimizers)
71
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
72
- )
73
- Conversation.intro = (
74
- AwesomePrompts().get_act(
75
- act, raise_not_found=True, default=None, case_insensitive=True
76
- )
77
- if act
78
- else intro or Conversation.intro
79
- )
80
- self.conversation = Conversation(
81
- status=False, filepath=filepath, update_file=update_file
82
- )
83
- if init:
84
- self.ask(self.conversation.intro) # Init
85
-
86
- def ask(
87
- self,
88
- prompt: str,
89
- stream: bool = False,
90
- raw: bool = False,
91
- optimizer: str = None,
92
- conversationally: bool = False,
93
- ) -> dict:
94
- """Chat with AI
95
-
96
- Args:
97
- prompt (str): Prompt to be send.
98
- stream (bool, optional): Flag for streaming response. Defaults to False.
99
- raw (bool, optional): Stream back raw response as received. Defaults to False.
100
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defeaults to None
101
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
102
- Returns:
103
- dict : {}
104
- ```json
105
- {
106
- "id": "TWVzc2FnZToxMTU0MzgyNDQ1ODU=",
107
- "messageId": 115438244585,
108
- "creationTime": 1707777376544407,
109
- "clientNonce": null,
110
- "state": "complete",
111
- "text": "Hello! How can I assist you today?",
112
- "author": "capybara",
113
- "contentType": "text_markdown",
114
- "sourceType": "chat_input",
115
- "attachmentTruncationState": "not_truncated",
116
- "attachments": [],
117
- "vote": null,
118
- "suggestedReplies": [],
119
- "hasCitations": false,
120
- "__isNode": "Message",
121
- "textLengthOnCancellation": null,
122
- "chatCode": "21a2jn0yrq9phxiy478",
123
- "chatId": 328236777,
124
- "title": null,
125
- "response": ""
126
- }
127
- ```
128
- """
129
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
130
- if optimizer:
131
- if optimizer in self.__available_optimizers:
132
- conversation_prompt = getattr(Optimizers, optimizer)(
133
- conversation_prompt if conversationally else prompt
134
- )
135
- else:
136
- raise Exception(
137
- f"Optimizer is not one of {self.__available_optimizers}"
138
- )
139
-
140
- def for_stream():
141
- for response in self.session.send_message(self.bot, conversation_prompt):
142
- if raw:
143
- yield dumps(response)
144
- else:
145
- yield response
146
-
147
- self.last_response.update(response)
148
-
149
- self.conversation.update_chat_history(
150
- prompt,
151
- self.get_message(self.last_response),
152
- force=True,
153
- )
154
-
155
- def for_non_stream():
156
- # let's make use of stream
157
- for _ in for_stream():
158
- pass
159
- return self.last_response
160
-
161
- return for_stream() if stream else for_non_stream()
162
-
163
- def chat(
164
- self,
165
- prompt: str,
166
- stream: bool = False,
167
- optimizer: str = None,
168
- conversationally: bool = False,
169
- ) -> str:
170
- """Generate response `str`
171
- Args:
172
- prompt (str): Prompt to be send.
173
- stream (bool, optional): Flag for streaming response. Defaults to False.
174
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
175
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
176
- Returns:
177
- str: Response generated
178
- """
179
-
180
- def for_stream():
181
- for response in self.ask(
182
- prompt, True, optimizer=optimizer, conversationally=conversationally
183
- ):
184
- yield self.get_message(response)
185
-
186
- def for_non_stream():
187
- return self.get_message(
188
- self.ask(
189
- prompt,
190
- False,
191
- optimizer=optimizer,
192
- conversationally=conversationally,
193
- )
194
- )
195
-
196
- return for_stream() if stream else for_non_stream()
197
-
198
- def get_message(self, response: dict) -> str:
199
- """Retrieves message only from response
200
-
201
- Args:
202
- response (dict): Response generated by `self.ask`
203
-
204
- Returns:
205
- str: Message extracted
206
- """
207
- assert isinstance(response, dict), "Response should be of dict data-type only"
208
- return response["text"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
webscout/Provider/Reka.py DELETED
@@ -1,226 +0,0 @@
1
- import time
2
- import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
9
- import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
- import json
21
- import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
26
- from Helpingai_T2 import Perplexity
27
- from webscout import exceptions
28
- from typing import Any, AsyncGenerator, Dict
29
- import logging
30
- import httpx
31
- #-----------------------------------------------REKA-----------------------------------------------
32
- class REKA(Provider):
33
- def __init__(
34
- self,
35
- api_key: str,
36
- is_conversation: bool = True,
37
- max_tokens: int = 600,
38
- timeout: int = 30,
39
- intro: str = None,
40
- filepath: str = None,
41
- update_file: bool = True,
42
- proxies: dict = {},
43
- history_offset: int = 10250,
44
- act: str = None,
45
- model: str = "reka-core",
46
- system_prompt: str = "Be Helpful and Friendly. Keep your response straightforward, short and concise",
47
- use_search_engine: bool = False,
48
- use_code_interpreter: bool = False,
49
- ):
50
- """Instantiates REKA
51
-
52
- Args:
53
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
54
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
55
- timeout (int, optional): Http request timeout. Defaults to 30.
56
- intro (str, optional): Conversation introductory prompt. Defaults to None.
57
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
58
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
59
- proxies (dict, optional): Http request proxies. Defaults to {}.
60
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
61
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
62
- model (str, optional): REKA model name. Defaults to "reka-core".
63
- system_prompt (str, optional): System prompt for REKA. Defaults to "Be Helpful and Friendly. Keep your response straightforward, short and concise".
64
- use_search_engine (bool, optional): Whether to use the search engine. Defaults to False.
65
- use_code_interpreter (bool, optional): Whether to use the code interpreter. Defaults to False.
66
- """
67
- self.session = requests.Session()
68
- self.is_conversation = is_conversation
69
- self.max_tokens_to_sample = max_tokens
70
- self.api_endpoint = "https://chat.reka.ai/api/chat"
71
- self.stream_chunk_size = 64
72
- self.timeout = timeout
73
- self.last_response = {}
74
- self.model = model
75
- self.system_prompt = system_prompt
76
- self.use_search_engine = use_search_engine
77
- self.use_code_interpreter = use_code_interpreter
78
- self.access_token = api_key
79
- self.headers = {
80
- "Authorization": f"Bearer {self.access_token}",
81
- }
82
-
83
- self.__available_optimizers = (
84
- method
85
- for method in dir(Optimizers)
86
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
87
- )
88
- self.session.headers.update(self.headers)
89
- Conversation.intro = (
90
- AwesomePrompts().get_act(
91
- act, raise_not_found=True, default=None, case_insensitive=True
92
- )
93
- if act
94
- else intro or Conversation.intro
95
- )
96
- self.conversation = Conversation(
97
- is_conversation, self.max_tokens_to_sample, filepath, update_file
98
- )
99
- self.conversation.history_offset = history_offset
100
- self.session.proxies = proxies
101
-
102
- def ask(
103
- self,
104
- prompt: str,
105
- stream: bool = False,
106
- raw: bool = False,
107
- optimizer: str = None,
108
- conversationally: bool = False,
109
- ) -> dict:
110
- """Chat with AI
111
-
112
- Args:
113
- prompt (str): Prompt to be send.
114
- stream (bool, optional): Flag for streaming response. Defaults to False.
115
- raw (bool, optional): Stream back raw response as received. Defaults to False.
116
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
117
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
118
- Returns:
119
- dict : {}
120
- ```json
121
- {
122
- "text" : "How may I assist you today?"
123
- }
124
- ```
125
- """
126
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
127
- if optimizer:
128
- if optimizer in self.__available_optimizers:
129
- conversation_prompt = getattr(Optimizers, optimizer)(
130
- conversation_prompt if conversationally else prompt
131
- )
132
- else:
133
- raise Exception(
134
- f"Optimizer is not one of {self.__available_optimizers}"
135
- )
136
-
137
- self.session.headers.update(self.headers)
138
- payload = {
139
-
140
- "conversation_history": [
141
- {"type": "human", "text": f"## SYSTEM PROMPT: {self.system_prompt}\n\n## QUERY: {conversation_prompt}"},
142
- ],
143
-
144
- "stream": stream,
145
- "use_search_engine": self.use_search_engine,
146
- "use_code_interpreter": self.use_code_interpreter,
147
- "model_name": self.model,
148
- # "model_name": "reka-flash",
149
- # "model_name": "reka-edge",
150
- }
151
-
152
- def for_stream():
153
- response = self.session.post(self.api_endpoint, json=payload, stream=True, timeout=self.timeout)
154
- if not response.ok:
155
- raise Exception(
156
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
157
- )
158
-
159
- for value in response.iter_lines(
160
- decode_unicode=True,
161
- chunk_size=self.stream_chunk_size,
162
- ):
163
- try:
164
- resp = json.loads(value)
165
- self.last_response.update(resp)
166
- yield value if raw else resp
167
- except json.decoder.JSONDecodeError:
168
- pass
169
- self.conversation.update_chat_history(
170
- prompt, self.get_message(self.last_response)
171
- )
172
-
173
- def for_non_stream():
174
- # let's make use of stream
175
- for _ in for_stream():
176
- pass
177
- return self.last_response
178
-
179
- return for_stream() if stream else for_non_stream()
180
-
181
- def chat(
182
- self,
183
- prompt: str,
184
- stream: bool = False,
185
- optimizer: str = None,
186
- conversationally: bool = False,
187
- ) -> str:
188
- """Generate response `str`
189
- Args:
190
- prompt (str): Prompt to be send.
191
- stream (bool, optional): Flag for streaming response. Defaults to False.
192
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
193
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
194
- Returns:
195
- str: Response generated
196
- """
197
-
198
- def for_stream():
199
- for response in self.ask(
200
- prompt, True, optimizer=optimizer, conversationally=conversationally
201
- ):
202
- yield self.get_message(response)
203
-
204
- def for_non_stream():
205
- return self.get_message(
206
- self.ask(
207
- prompt,
208
- False,
209
- optimizer=optimizer,
210
- conversationally=conversationally,
211
- )
212
- )
213
-
214
- return for_stream() if stream else for_non_stream()
215
-
216
- def get_message(self, response: dict) -> str:
217
- """Retrieves message only from response
218
-
219
- Args:
220
- response (dict): Response generated by `self.ask`
221
-
222
- Returns:
223
- str: Message extracted
224
- """
225
- assert isinstance(response, dict), "Response should be of dict data-type only"
226
- return response.get("text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
webscout/Provider/ThinkAnyAI.py DELETED
@@ -1,280 +0,0 @@
1
- import time
2
- import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
9
- import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
- import json
21
- import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
26
- from Helpingai_T2 import Perplexity
27
- from webscout import exceptions
28
- from typing import Any, AsyncGenerator, Dict
29
- import logging
30
- import httpx
31
- #------------------------------------ThinkAnyAI------------
32
- class ThinkAnyAI(Provider):
33
- def __init__(
34
- self,
35
- model: str = "claude-3-haiku",
36
- locale: str = "en",
37
- web_search: bool = False,
38
- chunk_size: int = 1,
39
- streaming: bool = True,
40
- is_conversation: bool = True,
41
- max_tokens: int = 600,
42
- timeout: int = 30,
43
- intro: str = None,
44
- filepath: str = None,
45
- update_file: bool = True,
46
- proxies: dict = {},
47
- history_offset: int = 10250,
48
- act: str = None,
49
- ):
50
- """Initializes ThinkAnyAI
51
-
52
- Args:
53
- model (str): The AI model to be used for generating responses. Defaults to "claude-3-haiku".
54
- locale (str): The language locale. Defaults to "en" (English).
55
- web_search (bool): Whether to include web search results in the response. Defaults to False.
56
- chunk_size (int): The size of data chunks when streaming responses. Defaults to 1.
57
- streaming (bool): Whether to stream response data. Defaults to True.
58
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
59
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
60
- timeout (int, optional): Http request timeout. Defaults to 30.
61
- intro (str, optional): Conversation introductory prompt. Defaults to None.
62
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
63
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
64
- proxies (dict, optional): Http request proxies. Defaults to {}.
65
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
66
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
67
- """
68
- self.base_url = "https://thinkany.ai/api"
69
- self.model = model
70
- self.locale = locale
71
- self.web_search = web_search
72
- self.chunk_size = chunk_size
73
- self.streaming = streaming
74
- self.last_response = {}
75
- self.session = requests.Session()
76
- self.session.proxies = proxies
77
-
78
- self.__available_optimizers = (
79
- method
80
- for method in dir(Optimizers)
81
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
82
- )
83
-
84
- Conversation.intro = (
85
- AwesomePrompts().get_act(
86
- act, raise_not_found=True, default=None, case_insensitive=True
87
- )
88
- if act
89
- else intro or Conversation.intro
90
- )
91
- self.conversation = Conversation(
92
- is_conversation, max_tokens, filepath, update_file
93
- )
94
- self.conversation.history_offset = history_offset
95
-
96
- def ask(
97
- self,
98
- prompt: str,
99
- stream: bool = False,
100
- raw: bool = False,
101
- optimizer: str = None,
102
- conversationally: bool = False,
103
- ) -> dict | AsyncGenerator:
104
- """Chat with AI asynchronously.
105
-
106
- Args:
107
- prompt (str): Prompt to be send.
108
- stream (bool, optional): Flag for streaming response. Defaults to False.
109
- raw (bool, optional): Stream back raw response as received. Defaults to False.
110
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defeaults to None
111
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
112
- Returns:
113
- dict : {}
114
- ```json
115
- {
116
- "content": "General Kenobi! \n\n(I couldn't help but respond with the iconic Star Wars greeting since you used it first. )\n\nIs there anything I can help you with today?\n[Image of Hello there General Kenobi]",
117
- "conversation_id": "c_f13f6217f9a997aa",
118
- "response_id": "r_d3665f95975c368f",
119
- "factualityQueries": null,
120
- "textQuery": [
121
- "hello there",
122
- 1
123
- ],
124
- "choices": [
125
- {
126
- "id": "rc_ea075c9671bfd8cb",
127
- "content": [
128
- "General Kenobi! \n\n(I couldn't help but respond with the iconic Star Wars greeting since you used it first. )\n\nIs there anything I can help you with today?\n[Image of Hello there General Kenobi]"
129
- ]
130
- },
131
- {
132
- "id": "rc_de6dd3fb793a5402",
133
- "content": [
134
- "General Kenobi! (or just a friendly hello, whichever you prefer!). \n\nI see you're a person of culture as well. *Star Wars* references are always appreciated. \n\nHow can I help you today?\n"
135
- ]
136
- },
137
- {
138
- "id": "rc_a672ac089caf32db",
139
- "content": [
140
- "General Kenobi! (or just a friendly hello if you're not a Star Wars fan!). \n\nHow can I help you today? Feel free to ask me anything, or tell me what you'd like to chat about. I'm here to assist in any way I can.\n[Image of Obi-Wan Kenobi saying hello there]"
141
- ]
142
- }
143
- ],
144
-
145
- "images": [
146
- "https://i.pinimg.com/originals/40/74/60/407460925c9e419d82b93313f0b42f71.jpg"
147
- ]
148
- }
149
-
150
- ```
151
- """
152
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
153
- if optimizer:
154
- if optimizer in self.__available_optimizers:
155
- conversation_prompt = getattr(Optimizers, optimizer)(
156
- conversation_prompt if conversationally else prompt
157
- )
158
- else:
159
- raise Exception(
160
- f"Optimizer is not one of {self.__available_optimizers}"
161
- )
162
-
163
- def initiate_conversation(query: str) -> str:
164
- """
165
- Initiates a new conversation with the ThinkAny AI API.
166
-
167
- Args:
168
- query (str): The initial query to start the conversation.
169
-
170
- Returns:
171
- str: The UUID (Unique Identifier) of the conversation.
172
- """
173
- url = f"{self.base_url}/new-conversation"
174
- payload = {
175
- "content": query,
176
- "locale": self.locale,
177
- "mode": "search" if self.web_search else "chat",
178
- "model": self.model,
179
- "source": "all",
180
- }
181
- response = self.session.post(url, json=payload)
182
- return response.json().get("data", {}).get("uuid", "DevsDoCode")
183
-
184
- def RAG_search(uuid: str) -> tuple[bool, list]:
185
- """
186
- Performs a web search using the Retrieve And Generate (RAG) model.
187
-
188
- Args:
189
- uuid (str): The UUID of the conversation.
190
-
191
- Returns:
192
- tuple: A tuple containing a boolean indicating the success of the search
193
- and a list of search result links.
194
- """
195
- if not self.web_search:
196
- return True, []
197
- url = f"{self.base_url}/rag-search"
198
- payload = {"conv_uuid": uuid}
199
- response = self.session.post(url, json=payload)
200
- links = [source["link"] for source in response.json().get("data", [])]
201
- return response.json().get("message", "").strip(), links
202
-
203
- def for_stream():
204
- conversation_uuid = initiate_conversation(conversation_prompt)
205
- web_search_result, links = RAG_search(conversation_uuid)
206
- if not web_search_result:
207
- print("Failed to generate WEB response. Making normal Query...")
208
-
209
- url = f"{self.base_url}/chat"
210
- payload = {
211
- "role": "user",
212
- "content": prompt,
213
- "conv_uuid": conversation_uuid,
214
- "model": self.model,
215
- }
216
- response = self.session.post(url, json=payload, stream=True)
217
- complete_content = ""
218
- for content in response.iter_content(
219
- decode_unicode=True, chunk_size=self.chunk_size
220
- ):
221
- complete_content += content
222
- yield content if raw else dict(text=complete_content)
223
- self.last_response.update(dict(text=complete_content, links=links))
224
- self.conversation.update_chat_history(
225
- prompt, self.get_message(self.last_response)
226
- )
227
-
228
- def for_non_stream():
229
- for _ in for_stream():
230
- pass
231
- return self.last_response
232
-
233
- return for_stream() if stream else for_non_stream()
234
-
235
- def chat(
236
- self,
237
- prompt: str,
238
- stream: bool = False,
239
- optimizer: str = None,
240
- conversationally: bool = False,
241
- ) -> str:
242
- """Generate response `str`
243
- Args:
244
- prompt (str): Prompt to be send.
245
- stream (bool, optional): Flag for streaming response. Defaults to False.
246
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
247
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
248
- Returns:
249
- str: Response generated
250
- """
251
-
252
- def for_stream():
253
- for response in self.ask(
254
- prompt, True, optimizer=optimizer, conversationally=conversationally
255
- ):
256
- yield self.get_message(response)
257
-
258
- def for_non_stream():
259
- return self.get_message(
260
- self.ask(
261
- prompt,
262
- False,
263
- optimizer=optimizer,
264
- conversationally=conversationally,
265
- )
266
- )
267
-
268
- return for_stream() if stream else for_non_stream()
269
-
270
- def get_message(self, response: Dict[str, Any]) -> str:
271
- """Retrieves message only from response
272
-
273
- Args:
274
- response (dict): Response generated by `self.ask`
275
-
276
- Returns:
277
- str: Message extracted
278
- """
279
- assert isinstance(response, dict), "Response should be of dict data-type only"
280
- return response["text"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
webscout/Provider/Xjai.py DELETED
@@ -1,230 +0,0 @@
1
- import time
2
- import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
9
- import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
- import json
21
- import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
26
- from webscout import exceptions
27
- from typing import Any, AsyncGenerator, Dict
28
- import logging
29
- import httpx
30
- #-----------------------------------------------xjai-------------------------------------------
31
- class Xjai(Provider):
32
- def __init__(
33
- self,
34
- is_conversation: bool = True,
35
- max_tokens: int = 600,
36
- temperature: float = 0.8,
37
- top_p: float = 1,
38
- timeout: int = 30,
39
- intro: str = None,
40
- filepath: str = None,
41
- update_file: bool = True,
42
- proxies: dict = {},
43
- history_offset: int = 10250,
44
- act: str = None,
45
- ):
46
- """
47
- Initializes the Xjai class for interacting with the Xjai AI chat API.
48
-
49
- Args:
50
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
51
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
52
- temperature (float, optional): The creativity level of the AI's response. Defaults to 0.8.
53
- top_p (float, optional): The probability threshold for token selection. Defaults to 1.
54
- timeout (int, optional): Http request timeout. Defaults to 30.
55
- intro (str, optional): Conversation introductory prompt. Defaults to None.
56
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
57
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
58
- proxies (dict, optional): Http request proxies. Defaults to {}.
59
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
60
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
61
- """
62
- self.session = requests.Session()
63
- self.is_conversation = is_conversation
64
- self.max_tokens_to_sample = max_tokens
65
- self.temperature = temperature
66
- self.top_p = top_p
67
- self.chat_endpoint = "https://p1api.xjai.pro/freeapi/chat-process"
68
- self.stream_chunk_size = 1 # Process response line by line
69
- self.timeout = timeout
70
- self.last_response = {}
71
-
72
- self.__available_optimizers = (
73
- method
74
- for method in dir(Optimizers)
75
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
76
- )
77
- Conversation.intro = (
78
- AwesomePrompts().get_act(
79
- act, raise_not_found=True, default=None, case_insensitive=True
80
- )
81
- if act
82
- else intro or Conversation.intro
83
- )
84
- self.conversation = Conversation(
85
- is_conversation, self.max_tokens_to_sample, filepath, update_file
86
- )
87
- self.conversation.history_offset = history_offset
88
- self.session.proxies = proxies
89
-
90
- def ask(
91
- self,
92
- prompt: str,
93
- stream: bool = False,
94
- raw: bool = False,
95
- optimizer: str = None,
96
- conversationally: bool = False,
97
- ) -> Any:
98
- """
99
- Sends a chat request to the Xjai AI chat API and returns the response.
100
-
101
- Args:
102
- prompt (str): The query to send to the AI.
103
- stream (bool, optional): Flag for streaming response. Defaults to False.
104
- raw (bool, optional): Stream back raw response as received. Defaults to False.
105
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
106
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
107
-
108
- Returns:
109
- Any: The response from the AI, either as a dictionary or a generator
110
- depending on the `stream` and `raw` parameters.
111
- """
112
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
113
- if optimizer:
114
- if optimizer in self.__available_optimizers:
115
- conversation_prompt = getattr(Optimizers, optimizer)(
116
- conversation_prompt if conversationally else prompt
117
- )
118
- else:
119
- raise Exception(
120
- f"Optimizer is not one of {self.__available_optimizers}"
121
- )
122
-
123
- headers = {
124
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
125
- "(KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36"
126
- }
127
-
128
- payload = {
129
- "prompt": conversation_prompt + "\n\nReply in English Only",
130
- "systemMessage": "Reply in English Only",
131
- "temperature": self.temperature,
132
- "top_p": self.top_p
133
- }
134
-
135
- def generate_response():
136
- response = self.session.post(
137
- self.chat_endpoint, headers=headers, json=payload, stream=True, timeout=self.timeout
138
- )
139
- output = ""
140
- print_next = False
141
-
142
- for line in response.iter_lines(decode_unicode=True, chunk_size=self.stream_chunk_size):
143
- line_content = line.decode("utf-8")
144
- # Filter out irrelevant content
145
- if '[ChatAI](https://srv.aiflarepro.com/#/?cid=4111)' in line_content:
146
- continue
147
- if '&KFw6loC9Qvy&' in line_content:
148
- parts = line_content.split('&KFw6loC9Qvy&')
149
- if print_next:
150
- output += parts[0]
151
- print_next = False
152
- else:
153
- output += parts[1]
154
- print_next = True
155
- if len(parts) > 2:
156
- print_next = False
157
- elif print_next:
158
- output += line_content + '\n'
159
-
160
- # Update chat history
161
- self.conversation.update_chat_history(prompt, output)
162
-
163
- return output
164
-
165
- def for_stream():
166
- response = generate_response()
167
- for line in response.splitlines():
168
- yield line if raw else dict(text=line)
169
-
170
- def for_non_stream():
171
- response = generate_response()
172
- return response if raw else dict(text=response)
173
-
174
- return for_stream() if stream else for_non_stream()
175
-
176
- def chat(
177
- self,
178
- prompt: str,
179
- stream: bool = False,
180
- optimizer: str = None,
181
- conversationally: bool = False,
182
- ) -> Any:
183
- """
184
- Generates a response from the Xjai AI chat API.
185
-
186
- Args:
187
- prompt (str): The query to send to the AI.
188
- stream (bool, optional): Flag for streaming response. Defaults to False.
189
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
190
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
191
-
192
- Returns:
193
- Any: The response from the AI, either as a string or a generator
194
- depending on the `stream` parameter.
195
- """
196
-
197
- def for_stream():
198
- for response in self.ask(
199
- prompt, True, optimizer=optimizer, conversationally=conversationally
200
- ):
201
- yield self.get_message(response)
202
-
203
- def for_non_stream():
204
- return self.get_message(
205
- self.ask(
206
- prompt,
207
- False,
208
- optimizer=optimizer,
209
- conversationally=conversationally,
210
- )
211
- )
212
-
213
- return for_stream() if stream else for_non_stream()
214
-
215
- def get_message(self, response: Any) -> str:
216
- """
217
- Retrieves the message from the AI's response.
218
-
219
- Args:
220
- response (Any): The response from the AI, either a dictionary
221
- or a raw string.
222
-
223
- Returns:
224
- str: The extracted message from the AI's response.
225
- """
226
- if isinstance(response, dict):
227
- return response["text"]
228
- else: # Assume raw string
229
- return response
230
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
webscout/Provider/Yepchat.py DELETED
@@ -1,478 +0,0 @@
1
- import time
2
- import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
9
- import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
- import json
21
- import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
26
- from Helpingai_T2 import Perplexity
27
- from webscout import exceptions
28
- from typing import Any, AsyncGenerator, Dict
29
- import logging
30
- import httpx
31
-
32
- #-------------------------------------------------------yep.com--------------------------------------------------------
33
- class YEPCHAT(Provider):
34
- def __init__(
35
- self,
36
- is_conversation: bool = True,
37
- max_tokens: int = 600,
38
- temperature: float = 0.6,
39
- presence_penalty: int = 0,
40
- frequency_penalty: int = 0,
41
- top_p: float = 0.7,
42
- model: str = "Mixtral-8x7B-Instruct-v0.1",
43
- timeout: int = 30,
44
- intro: str = None,
45
- filepath: str = None,
46
- update_file: bool = True,
47
- proxies: dict = {},
48
- history_offset: int = 10250,
49
- act: str = None,
50
- ):
51
- """Instantiates YEPCHAT
52
-
53
- Args:
54
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
55
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
56
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.6.
57
- presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
58
- frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
59
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.7.
60
- model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
61
- timeout (int, optional): Http request timeout. Defaults to 30.
62
- intro (str, optional): Conversation introductory prompt. Defaults to None.
63
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
64
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
65
- proxies (dict, optional): Http request proxies. Defaults to {}.
66
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
67
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
68
- """
69
- self.session = requests.Session()
70
- self.is_conversation = is_conversation
71
- self.max_tokens_to_sample = max_tokens
72
- self.model = model
73
- self.temperature = temperature
74
- self.presence_penalty = presence_penalty
75
- self.frequency_penalty = frequency_penalty
76
- self.top_p = top_p
77
- self.chat_endpoint = "https://api.yep.com/v1/chat/completions"
78
- self.stream_chunk_size = 64
79
- self.timeout = timeout
80
- self.last_response = {}
81
- self.headers = {
82
- "Accept": "*/*",
83
- "Accept-Encoding": "gzip, deflate",
84
- "Accept-Language": "en-US,en;q=0.9",
85
- "Content-Type": "application/json; charset=utf-8",
86
- "Origin": "https://yep.com",
87
- "Referer": "https://yep.com/",
88
- "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
89
- }
90
-
91
- self.__available_optimizers = (
92
- method
93
- for method in dir(Optimizers)
94
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
95
- )
96
- self.session.headers.update(self.headers)
97
- Conversation.intro = (
98
- AwesomePrompts().get_act(
99
- act, raise_not_found=True, default=None, case_insensitive=True
100
- )
101
- if act
102
- else intro or Conversation.intro
103
- )
104
- self.conversation = Conversation(
105
- is_conversation, self.max_tokens_to_sample, filepath, update_file
106
- )
107
- self.conversation.history_offset = history_offset
108
- self.session.proxies = proxies
109
-
110
- def ask(
111
- self,
112
- prompt: str,
113
- stream: bool = False,
114
- raw: bool = False,
115
- optimizer: str = None,
116
- conversationally: bool = False,
117
- ) -> dict:
118
- """Chat with AI
119
-
120
- Args:
121
- prompt (str): Prompt to be send.
122
- stream (bool, optional): Flag for streaming response. Defaults to False.
123
- raw (bool, optional): Stream back raw response as received. Defaults to False.
124
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
125
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
126
- Returns:
127
- dict : {}
128
- ```json
129
- {
130
- "id": "cmpl-c61c1c88de4e4ad3a79134775d17ea0c",
131
- "object": "chat.completion.chunk",
132
- "created": 1713876886,
133
- "model": "Mixtral-8x7B-Instruct-v0.1",
134
- "choices": [
135
- {
136
- "index": 0,
137
- "delta": {
138
- "role": null,
139
- "content": " Sure, I can help with that. Are you looking for information on how to start coding, or do you need help with a specific coding problem? We can discuss various programming languages like Python, JavaScript, Java, C++, or others. Please provide more details so I can assist you better."
140
- },
141
- "finish_reason": null
142
- }
143
- ]
144
- }
145
- ```
146
- """
147
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
148
- if optimizer:
149
- if optimizer in self.__available_optimizers:
150
- conversation_prompt = getattr(Optimizers, optimizer)(
151
- conversation_prompt if conversationally else prompt
152
- )
153
- else:
154
- raise Exception(
155
- f"Optimizer is not one of {self.__available_optimizers}"
156
- )
157
- self.session.headers.update(self.headers)
158
- payload = {
159
- "stream": True,
160
- "max_tokens": 1280,
161
- "top_p": self.top_p,
162
- "temperature": self.temperature,
163
- "messages": [{"content": conversation_prompt, "role": "user"}],
164
- "model": self.model,
165
- }
166
-
167
- def for_stream():
168
- response = self.session.post(
169
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
170
- )
171
- if not response.ok:
172
- raise exceptions.FailedToGenerateResponseError(
173
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
174
- )
175
-
176
- message_load = ""
177
- for value in response.iter_lines(
178
- decode_unicode=True,
179
- delimiter="" if raw else "data:",
180
- chunk_size=self.stream_chunk_size,
181
- ):
182
- try:
183
- resp = json.loads(value)
184
- incomplete_message = self.get_message(resp)
185
- if incomplete_message:
186
- message_load += incomplete_message
187
- resp["choices"][0]["delta"]["content"] = message_load
188
- self.last_response.update(resp)
189
- yield value if raw else resp
190
- elif raw:
191
- yield value
192
- except json.decoder.JSONDecodeError:
193
- pass
194
- self.conversation.update_chat_history(
195
- prompt, self.get_message(self.last_response)
196
- )
197
-
198
- def for_non_stream():
199
- for _ in for_stream():
200
- pass
201
- return self.last_response
202
-
203
- return for_stream() if stream else for_non_stream()
204
-
205
- def chat(
206
- self,
207
- prompt: str,
208
- stream: bool = False,
209
- optimizer: str = None,
210
- conversationally: bool = False,
211
- ) -> str:
212
- """Generate response `str`
213
- Args:
214
- prompt (str): Prompt to be send.
215
- stream (bool, optional): Flag for streaming response. Defaults to False.
216
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
217
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
218
- Returns:
219
- str: Response generated
220
- """
221
-
222
- def for_stream():
223
- for response in self.ask(
224
- prompt, True, optimizer=optimizer, conversationally=conversationally
225
- ):
226
- yield self.get_message(response)
227
-
228
- def for_non_stream():
229
- return self.get_message(
230
- self.ask(
231
- prompt,
232
- False,
233
- optimizer=optimizer,
234
- conversationally=conversationally,
235
- )
236
- )
237
-
238
- return for_stream() if stream else for_non_stream()
239
-
240
- def get_message(self, response: dict) -> str:
241
- """Retrieves message only from response
242
-
243
- Args:
244
- response (dict): Response generated by `self.ask`
245
-
246
- Returns:
247
- str: Message extracted
248
- """
249
- assert isinstance(response, dict), "Response should be of dict data-type only"
250
- try:
251
- if response["choices"][0].get("delta"):
252
- return response["choices"][0]["delta"]["content"]
253
- return response["choices"][0]["message"]["content"]
254
- except KeyError:
255
- return ""
256
- class AsyncYEPCHAT(AsyncProvider):
257
- def __init__(
258
- self,
259
- is_conversation: bool = True,
260
- max_tokens: int = 600,
261
- temperature: float = 0.6,
262
- presence_penalty: int = 0,
263
- frequency_penalty: int = 0,
264
- top_p: float = 0.7,
265
- model: str = "Mixtral-8x7B-Instruct-v0.1",
266
- timeout: int = 30,
267
- intro: str = None,
268
- filepath: str = None,
269
- update_file: bool = True,
270
- proxies: dict = {},
271
- history_offset: int = 10250,
272
- act: str = None,
273
- ):
274
- """Instantiates YEPCHAT
275
-
276
- Args:
277
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
278
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
279
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.6.
280
- presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
281
- frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
282
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.7.
283
- model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
284
- timeout (int, optional): Http request timeout. Defaults to 30.
285
- intro (str, optional): Conversation introductory prompt. Defaults to None.
286
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
287
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
288
- proxies (dict, optional): Http request proxies. Defaults to {}.
289
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
290
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
291
- """
292
- self.session = requests.Session()
293
- self.is_conversation = is_conversation
294
- self.max_tokens_to_sample = max_tokens
295
- self.model = model
296
- self.temperature = temperature
297
- self.presence_penalty = presence_penalty
298
- self.frequency_penalty = frequency_penalty
299
- self.top_p = top_p
300
- self.chat_endpoint = "https://api.yep.com/v1/chat/completions"
301
- self.stream_chunk_size = 64
302
- self.timeout = timeout
303
- self.last_response = {}
304
- self.headers = {
305
- "Accept": "*/*",
306
- "Accept-Encoding": "gzip, deflate",
307
- "Accept-Language": "en-US,en;q=0.9",
308
- "Content-Type": "application/json; charset=utf-8",
309
- "Origin": "https://yep.com",
310
- "Referer": "https://yep.com/",
311
- "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
312
- }
313
-
314
- self.__available_optimizers = (
315
- method
316
- for method in dir(Optimizers)
317
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
318
- )
319
- Conversation.intro = (
320
- AwesomePrompts().get_act(
321
- act, raise_not_found=True, default=None, case_insensitive=True
322
- )
323
- if act
324
- else intro or Conversation.intro
325
- )
326
- self.conversation = Conversation(
327
- is_conversation, self.max_tokens_to_sample, filepath, update_file
328
- )
329
- self.conversation.history_offset = history_offset
330
- self.session = httpx.AsyncClient(
331
- headers=self.headers,
332
- proxies=proxies,
333
- )
334
-
335
- async def ask(
336
- self,
337
- prompt: str,
338
- stream: bool = False,
339
- raw: bool = False,
340
- optimizer: str = None,
341
- conversationally: bool = False,
342
- ) -> dict:
343
- """Chat with AI asynchronously.
344
-
345
- Args:
346
- prompt (str): Prompt to be send.
347
- stream (bool, optional): Flag for streaming response. Defaults to False.
348
- raw (bool, optional): Stream back raw response as received. Defaults to False.
349
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
350
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
351
- Returns:
352
- dict : {}
353
- ```json
354
- {
355
- "id": "cmpl-c61c1c88de4e4ad3a79134775d17ea0c",
356
- "object": "chat.completion.chunk",
357
- "created": 1713876886,
358
- "model": "Mixtral-8x7B-Instruct-v0.1",
359
- "choices": [
360
- {
361
- "index": 0,
362
- "delta": {
363
- "role": null,
364
- "content": " Sure, I can help with that. Are you looking for information on how to start coding, or do you need help with a specific coding problem? We can discuss various programming languages like Python, JavaScript, Java, C++, or others. Please provide more details so I can assist you better."
365
- },
366
- "finish_reason": null
367
- }
368
- ]
369
- }
370
- ```
371
- """
372
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
373
- if optimizer:
374
- if optimizer in self.__available_optimizers:
375
- conversation_prompt = getattr(Optimizers, optimizer)(
376
- conversation_prompt if conversationally else prompt
377
- )
378
- else:
379
- raise Exception(
380
- f"Optimizer is not one of {self.__available_optimizers}"
381
- )
382
- payload = {
383
- "stream": True,
384
- "max_tokens": 1280,
385
- "top_p": self.top_p,
386
- "temperature": self.temperature,
387
- "messages": [{"content": conversation_prompt, "role": "user"}],
388
- "model": self.model,
389
- }
390
-
391
- async def for_stream():
392
- async with self.session.stream(
393
- "POST", self.chat_endpoint, json=payload, timeout=self.timeout
394
- ) as response:
395
- if not response.is_success:
396
- raise exceptions.FailedToGenerateResponseError(
397
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase}) - {response.text}"
398
- )
399
-
400
- message_load = ""
401
- async for value in response.aiter_lines():
402
- try:
403
- resp = sanitize_stream(value)
404
- incomplete_message = await self.get_message(resp)
405
- if incomplete_message:
406
- message_load += incomplete_message
407
- resp["choices"][0]["delta"]["content"] = message_load
408
- self.last_response.update(resp)
409
- yield value if raw else resp
410
- elif raw:
411
- yield value
412
- except json.decoder.JSONDecodeError:
413
- pass
414
-
415
- self.conversation.update_chat_history(
416
- prompt, await self.get_message(self.last_response)
417
- )
418
-
419
- async def for_non_stream():
420
- async for _ in for_stream():
421
- pass
422
- return self.last_response
423
-
424
- return for_stream() if stream else await for_non_stream()
425
-
426
- async def chat(
427
- self,
428
- prompt: str,
429
- stream: bool = False,
430
- optimizer: str = None,
431
- conversationally: bool = False,
432
- ) -> str:
433
- """Generate response `str` asynchronously.
434
- Args:
435
- prompt (str): Prompt to be send.
436
- stream (bool, optional): Flag for streaming response. Defaults to False.
437
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
438
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
439
- Returns:
440
- str: Response generated
441
- """
442
-
443
- async def for_stream():
444
- async_ask = await self.ask(
445
- prompt, True, optimizer=optimizer, conversationally=conversationally
446
- )
447
-
448
- async for response in async_ask:
449
- yield await self.get_message(response)
450
-
451
- async def for_non_stream():
452
- return await self.get_message(
453
- await self.ask(
454
- prompt,
455
- False,
456
- optimizer=optimizer,
457
- conversationally=conversationally,
458
- )
459
- )
460
-
461
- return for_stream() if stream else await for_non_stream()
462
-
463
- async def get_message(self, response: dict) -> str:
464
- """Retrieves message only from response
465
-
466
- Args:
467
- response (dict): Response generated by `self.ask`
468
-
469
- Returns:
470
- str: Message extracted
471
- """
472
- assert isinstance(response, dict), "Response should be of dict data-type only"
473
- try:
474
- if response["choices"][0].get("delta"):
475
- return response["choices"][0]["delta"]["content"]
476
- return response["choices"][0]["message"]["content"]
477
- except KeyError:
478
- return ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
webscout/Provider/Youchat.py DELETED
@@ -1,221 +0,0 @@
1
- import time
2
- import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
9
- import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
- import json
21
- import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
26
- from Helpingai_T2 import Perplexity
27
- from webscout import exceptions
28
- from typing import Any, AsyncGenerator, Dict
29
- import logging
30
- import httpx
31
-
32
- #-------------------------------------------------------youchat--------------------------------------------------------
33
- class YouChat(Provider):
34
- def __init__(
35
- self,
36
- is_conversation: bool = True,
37
- max_tokens: int = 600,
38
- timeout: int = 30,
39
- intro: str = None,
40
- filepath: str = None,
41
- update_file: bool = True,
42
- proxies: dict = {},
43
- history_offset: int = 10250,
44
- act: str = None,
45
- ):
46
- self.session = requests.Session()
47
- self.is_conversation = is_conversation
48
- self.max_tokens_to_sample = max_tokens
49
- self.chat_endpoint = "https://you.com/api/streamingSearch"
50
- self.stream_chunk_size = 64
51
- self.timeout = timeout
52
- self.last_response = {}
53
-
54
- self.payload = {
55
- "q": "",
56
- "page": 1,
57
- "count": 10,
58
- "safeSearch": "Off",
59
- "onShoppingPage": False,
60
- "mkt": "",
61
- "responseFilter": "WebPages,Translations,TimeZone,Computation,RelatedSearches",
62
- "domain": "youchat",
63
- "queryTraceId": uuid.uuid4(),
64
- "conversationTurnId": uuid.uuid4(),
65
- "pastChatLength": 0,
66
- "selectedChatMode": "default",
67
- "chat": "[]",
68
- }
69
-
70
- self.headers = {
71
- "cache-control": "no-cache",
72
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
73
- 'Referer': f'https://you.com/search?q={self.payload["q"]}&fromSearchBar=true&tbm=youchat&chatMode=default'
74
- }
75
-
76
- self.__available_optimizers = (
77
- method
78
- for method in dir(Optimizers)
79
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
80
- )
81
- self.session.headers.update(self.headers)
82
- Conversation.intro = (
83
- AwesomePrompts().get_act(
84
- act, raise_not_found=True, default=None, case_insensitive=True
85
- )
86
- if act
87
- else intro or Conversation.intro
88
- )
89
- self.conversation = Conversation(
90
- is_conversation, self.max_tokens_to_sample, filepath, update_file
91
- )
92
- self.conversation.history_offset = history_offset
93
- self.session.proxies = proxies
94
-
95
- def ask(
96
- self,
97
- prompt: str,
98
- stream: bool = False,
99
- raw: bool = False,
100
- optimizer: str = None,
101
- conversationally: bool = False,
102
- ) -> dict:
103
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
104
- if optimizer:
105
- if optimizer in self.__available_optimizers:
106
- conversation_prompt = getattr(Optimizers, optimizer)(
107
- conversation_prompt if conversationally else prompt
108
- )
109
- else:
110
- raise Exception(
111
- f"Optimizer is not one of {self.__available_optimizers}"
112
- )
113
- self.session.headers.update(self.headers)
114
- self.session.headers.update(
115
- dict(
116
- cookie=f"safesearch_guest=Off; uuid_guest={str(uuid4())}",
117
- )
118
- )
119
- self.payload["q"] = prompt
120
-
121
- def for_stream():
122
- response = self.session.get(
123
- self.chat_endpoint,
124
- params=self.payload,
125
- stream=True,
126
- timeout=self.timeout,
127
- )
128
-
129
- if not response.ok:
130
- raise exceptions.FailedToGenerateResponseError(
131
- f"Failed to generate response - ({response.status_code}, {response.reason})"
132
- )
133
-
134
- streaming_response = ""
135
- for line in response.iter_lines(decode_unicode=True, chunk_size=64):
136
- if line:
137
- modified_value = re.sub("data:", "", line)
138
- try:
139
- json_modified_value = json.loads(modified_value)
140
- if "youChatToken" in json_modified_value:
141
- streaming_response += json_modified_value["youChatToken"]
142
- if print:
143
- print(json_modified_value["youChatToken"], end="")
144
- except:
145
- continue
146
- self.last_response.update(dict(text=streaming_response))
147
- self.conversation.update_chat_history(
148
- prompt, self.get_message(self.last_response)
149
- )
150
- return streaming_response
151
-
152
- def for_non_stream():
153
- for _ in for_stream():
154
- pass
155
- return self.last_response
156
-
157
- return for_stream() if stream else for_non_stream()
158
-
159
- def chat(
160
- self,
161
- prompt: str,
162
- stream: bool = False,
163
- optimizer: str = None,
164
- conversationally: bool = False,
165
- ) -> str:
166
- """Generate response `str`
167
- Args:
168
- prompt (str): Prompt to be send.
169
- stream (bool, optional): Flag for streaming response. Defaults to False.
170
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
171
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
172
- Returns:
173
- str: Response generated
174
- """
175
-
176
- def chat(
177
- self,
178
- prompt: str,
179
- stream: bool = False,
180
- optimizer: str = None,
181
- conversationally: bool = False,
182
- ) -> str:
183
- """Generate response `str`
184
- Args:
185
- prompt (str): Prompt to be send.
186
- stream (bool, optional): Flag for streaming response. Defaults to False.
187
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
188
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
189
- Returns:
190
- str: Response generated
191
- """
192
-
193
- def for_stream():
194
- for response in self.ask(
195
- prompt, True, optimizer=optimizer, conversationally=conversationally
196
- ):
197
- yield self.get_message(response)
198
-
199
- def for_non_stream():
200
- return self.get_message(
201
- self.ask(
202
- prompt,
203
- False,
204
- optimizer=optimizer,
205
- conversationally=conversationally,
206
- )
207
- )
208
-
209
- return for_stream() if stream else for_non_stream()
210
-
211
- def get_message(self, response: dict) -> str:
212
- """Retrieves message only from response
213
-
214
- Args:
215
- response (dict): Response generated by `self.ask`
216
-
217
- Returns:
218
- str: Message extracted
219
- """
220
- assert isinstance(response, dict), "Response should be of dict data-type only"
221
- return response["text"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
webscout/Provider/__init__.py DELETED
@@ -1,61 +0,0 @@
1
- # webscout/providers/__init__.py
2
-
3
- from .ThinkAnyAI import ThinkAnyAI
4
- from .Xjai import Xjai
5
- from .Llama2 import LLAMA2
6
- from .Llama2 import AsyncLLAMA2
7
- from .Cohere import Cohere
8
- from .Reka import REKA
9
- from .Groq import GROQ
10
- from .Groq import AsyncGROQ
11
- from .Openai import OPENAI
12
- from .Openai import AsyncOPENAI
13
- from .Leo import LEO
14
- from .Leo import AsyncLEO
15
- from .Koboldai import KOBOLDAI
16
- from .Koboldai import AsyncKOBOLDAI
17
- from .OpenGPT import OPENGPT
18
- from .OpenGPT import AsyncOPENGPT
19
- from .Perplexity import PERPLEXITY
20
- from .Blackboxai import BLACKBOXAI
21
- from .Blackboxai import AsyncBLACKBOXAI
22
- from .Phind import PhindSearch
23
- from .Phind import AsyncPhindSearch
24
- from .Yepchat import YEPCHAT
25
- from .Yepchat import AsyncYEPCHAT
26
- from .Youchat import YouChat
27
- from .Gemini import GEMINI
28
- from .Berlin4h import Berlin4h
29
- from .ChatGPTUK import ChatGPTUK
30
- from .Poe import POE
31
- from .BasedGPT import *
32
- __all__ = [
33
- 'ThinkAnyAI',
34
- 'Xjai',
35
- 'LLAMA2',
36
- 'AsyncLLAMA2',
37
- 'Cohere',
38
- 'REKA',
39
- 'GROQ',
40
- 'AsyncGROQ',
41
- 'OPENAI',
42
- 'AsyncOPENAI',
43
- 'LEO',
44
- 'AsyncLEO',
45
- 'KOBOLDAI',
46
- 'AsyncKOBOLDAI',
47
- 'OPENGPT',
48
- 'AsyncOPENGPT',
49
- 'PERPLEXITY',
50
- 'BLACKBOXAI',
51
- 'AsyncBLACKBOXAI',
52
- 'PhindSearch',
53
- 'AsyncPhindSearch',
54
- 'YEPCHAT',
55
- 'AsyncYEPCHAT',
56
- 'YouChat',
57
- 'GEMINI',
58
- 'Berlin4h',
59
- 'ChatGPTUK',
60
- 'POE'
61
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
webscout/Provider/__pycache__/BasedGPT.cpython-311.pyc DELETED
Binary file (11 kB)
 
webscout/Provider/__pycache__/Berlin4h.cpython-311.pyc DELETED
Binary file (10.6 kB)
 
webscout/Provider/__pycache__/Blackboxai.cpython-311.pyc DELETED
Binary file (19.7 kB)
 
webscout/Provider/__pycache__/ChatGPTUK.cpython-311.pyc DELETED
Binary file (10.8 kB)
 
webscout/Provider/__pycache__/ChatGPTlogin.cpython-311.pyc DELETED
Binary file (11.7 kB)
 
webscout/Provider/__pycache__/Cohere.cpython-311.pyc DELETED
Binary file (11.5 kB)
 
webscout/Provider/__pycache__/Gemini.cpython-311.pyc DELETED
Binary file (11.6 kB)
 
webscout/Provider/__pycache__/Groq.cpython-311.pyc DELETED
Binary file (23.6 kB)
 
webscout/Provider/__pycache__/Koboldai.cpython-311.pyc DELETED
Binary file (18.4 kB)
 
webscout/Provider/__pycache__/Leo.cpython-311.pyc DELETED
Binary file (21.5 kB)
 
webscout/Provider/__pycache__/Llama2.cpython-311.pyc DELETED
Binary file (19.6 kB)
 
webscout/Provider/__pycache__/OpenGPT.cpython-311.pyc DELETED
Binary file (21.2 kB)
 
webscout/Provider/__pycache__/Openai.cpython-311.pyc DELETED
Binary file (23.1 kB)
 
webscout/Provider/__pycache__/Perplexity.cpython-311.pyc DELETED
Binary file (11.8 kB)
 
webscout/Provider/__pycache__/Phind.cpython-311.pyc DELETED
Binary file (22.1 kB)
 
webscout/Provider/__pycache__/Poe.cpython-311.pyc DELETED
Binary file (10.2 kB)
 
webscout/Provider/__pycache__/Reka.cpython-311.pyc DELETED
Binary file (11.3 kB)
 
webscout/Provider/__pycache__/ThinkAnyAI.cpython-311.pyc DELETED
Binary file (15 kB)
 
webscout/Provider/__pycache__/Xjai.cpython-311.pyc DELETED
Binary file (11.1 kB)
 
webscout/Provider/__pycache__/Yepchat.cpython-311.pyc DELETED
Binary file (21.8 kB)
 
webscout/Provider/__pycache__/Youchat.cpython-311.pyc DELETED
Binary file (10.1 kB)
 
webscout/Provider/__pycache__/__init__.cpython-311.pyc DELETED
Binary file (1.82 kB)