he
Browse files- g4f/.v1/requirements.txt +6 -2
- g4f/Provider/Provider.py +1 -0
- g4f/Provider/Providers/AItianhu.py +4 -2
- g4f/Provider/Providers/Acytoo.py +1 -0
- g4f/Provider/Providers/AiService.py +41 -0
- g4f/Provider/Providers/Aichat.py +1 -0
- g4f/Provider/Providers/Ails.py +4 -2
- g4f/Provider/Providers/Bard.py +2 -0
- g4f/Provider/Providers/Bing.py +1 -0
- g4f/Provider/Providers/BingHuan.py +28 -0
- g4f/Provider/Providers/ChatgptAi.py +3 -2
- g4f/Provider/Providers/ChatgptLogin.py +2 -2
- g4f/Provider/Providers/DFEHub.py +42 -30
- g4f/Provider/Providers/DeepAi.py +2 -0
- g4f/Provider/Providers/EasyChat.py +36 -27
- g4f/Provider/Providers/Forefront.py +2 -0
- g4f/Provider/Providers/GetGpt.py +2 -0
- g4f/Provider/Providers/H2o.py +93 -105
- g4f/Provider/Providers/Liaobots.py +1 -0
- g4f/Provider/Providers/Lockchat.py +2 -1
- g4f/Provider/Providers/Theb.py +2 -1
- g4f/Provider/Providers/Vercel.py +1 -2
- g4f/Provider/Providers/Wewordle.py +73 -0
- g4f/Provider/Providers/You.py +1 -0
- g4f/Provider/Providers/Yqcloud.py +1 -0
- g4f/Provider/Providers/__init__.py +0 -0
- g4f/Provider/Providers/helpers/binghuan.py +221 -0
- g4f/Provider/__init__.py +5 -1
- g4f/__init__.py +4 -1
- g4f/models.py +31 -0
- testing/readme_table.py +23 -4
- testing/test.py +12 -0
g4f/.v1/requirements.txt
CHANGED
@@ -5,11 +5,13 @@ pypasser
|
|
5 |
names
|
6 |
colorama
|
7 |
curl_cffi
|
|
|
|
|
|
|
8 |
streamlit
|
9 |
selenium
|
10 |
fake-useragent
|
11 |
twocaptcha
|
12 |
-
https://github.com/AI-Yash/st-chat/archive/refs/pull/24/head.zip
|
13 |
pydantic
|
14 |
pymailtm
|
15 |
Levenshtein
|
@@ -18,4 +20,6 @@ mailgw_temporary_email
|
|
18 |
pycryptodome
|
19 |
random-password-generator
|
20 |
numpy>=1.22.2 # not directly required, pinned by Snyk to avoid a vulnerability
|
21 |
-
tornado>=6.3.2 # not directly required, pinned by Snyk to avoid a vulnerability
|
|
|
|
|
|
5 |
names
|
6 |
colorama
|
7 |
curl_cffi
|
8 |
+
aiohttp
|
9 |
+
flask
|
10 |
+
flask_cors
|
11 |
streamlit
|
12 |
selenium
|
13 |
fake-useragent
|
14 |
twocaptcha
|
|
|
15 |
pydantic
|
16 |
pymailtm
|
17 |
Levenshtein
|
|
|
20 |
pycryptodome
|
21 |
random-password-generator
|
22 |
numpy>=1.22.2 # not directly required, pinned by Snyk to avoid a vulnerability
|
23 |
+
tornado>=6.3.2 # not directly required, pinned by Snyk to avoid a vulnerability
|
24 |
+
PyExecJS
|
25 |
+
browser_cookie3
|
g4f/Provider/Provider.py
CHANGED
@@ -5,6 +5,7 @@ url = None
|
|
5 |
model = None
|
6 |
supports_stream = False
|
7 |
needs_auth = False
|
|
|
8 |
|
9 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
10 |
return
|
|
|
5 |
model = None
|
6 |
supports_stream = False
|
7 |
needs_auth = False
|
8 |
+
working = False
|
9 |
|
10 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
11 |
return
|
g4f/Provider/Providers/AItianhu.py
CHANGED
@@ -6,6 +6,8 @@ url = "https://www.aitianhu.com/api/chat-process"
|
|
6 |
model = ['gpt-3.5-turbo']
|
7 |
supports_stream = False
|
8 |
needs_auth = False
|
|
|
|
|
9 |
|
10 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
11 |
base = ''
|
@@ -20,8 +22,8 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
|
20 |
"prompt": base,
|
21 |
"options": {},
|
22 |
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
|
23 |
-
"temperature": 0.8,
|
24 |
-
"top_p": 1
|
25 |
}
|
26 |
response = requests.post(url, headers=headers, json=data)
|
27 |
if response.status_code == 200:
|
|
|
6 |
model = ['gpt-3.5-turbo']
|
7 |
supports_stream = False
|
8 |
needs_auth = False
|
9 |
+
working = True
|
10 |
+
|
11 |
|
12 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
13 |
base = ''
|
|
|
22 |
"prompt": base,
|
23 |
"options": {},
|
24 |
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
|
25 |
+
"temperature": kwargs.get("temperature", 0.8),
|
26 |
+
"top_p": kwargs.get("top_p", 1)
|
27 |
}
|
28 |
response = requests.post(url, headers=headers, json=data)
|
29 |
if response.status_code == 200:
|
g4f/Provider/Providers/Acytoo.py
CHANGED
@@ -6,6 +6,7 @@ url = "https://chat.acytoo.com/api/completions"
|
|
6 |
model = ['gpt-3.5-turbo']
|
7 |
supports_stream = False
|
8 |
needs_auth = False
|
|
|
9 |
|
10 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
11 |
base = ''
|
|
|
6 |
model = ['gpt-3.5-turbo']
|
7 |
supports_stream = False
|
8 |
needs_auth = False
|
9 |
+
working = False
|
10 |
|
11 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
12 |
base = ''
|
g4f/Provider/Providers/AiService.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os,sys
|
2 |
+
import requests
|
3 |
+
from ...typing import get_type_hints
|
4 |
+
|
5 |
+
url = "https://aiservice.vercel.app/api/chat/answer"
|
6 |
+
model = ['gpt-3.5-turbo']
|
7 |
+
supports_stream = False
|
8 |
+
needs_auth = False
|
9 |
+
working = True
|
10 |
+
|
11 |
+
|
12 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
13 |
+
base = ''
|
14 |
+
for message in messages:
|
15 |
+
base += '%s: %s\n' % (message['role'], message['content'])
|
16 |
+
base += 'assistant:'
|
17 |
+
|
18 |
+
headers = {
|
19 |
+
"accept": "*/*",
|
20 |
+
"content-type": "text/plain;charset=UTF-8",
|
21 |
+
"sec-fetch-dest": "empty",
|
22 |
+
"sec-fetch-mode": "cors",
|
23 |
+
"sec-fetch-site": "same-origin",
|
24 |
+
"Referer": "https://aiservice.vercel.app/chat",
|
25 |
+
}
|
26 |
+
data = {
|
27 |
+
"input": base
|
28 |
+
}
|
29 |
+
response = requests.post(url, headers=headers, json=data)
|
30 |
+
if response.status_code == 200:
|
31 |
+
_json = response.json()
|
32 |
+
yield _json['data']
|
33 |
+
else:
|
34 |
+
print(f"Error Occurred::{response.status_code}")
|
35 |
+
return None
|
36 |
+
|
37 |
+
|
38 |
+
|
39 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
40 |
+
'(%s)' % ', '.join(
|
41 |
+
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/Aichat.py
CHANGED
@@ -5,6 +5,7 @@ url = 'https://chat-gpt.org/chat'
|
|
5 |
model = ['gpt-3.5-turbo']
|
6 |
supports_stream = False
|
7 |
needs_auth = False
|
|
|
8 |
|
9 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
10 |
base = ''
|
|
|
5 |
model = ['gpt-3.5-turbo']
|
6 |
supports_stream = False
|
7 |
needs_auth = False
|
8 |
+
working = True
|
9 |
|
10 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
11 |
base = ''
|
g4f/Provider/Providers/Ails.py
CHANGED
@@ -13,6 +13,8 @@ url: str = 'https://ai.ls'
|
|
13 |
model: str = 'gpt-3.5-turbo'
|
14 |
supports_stream = True
|
15 |
needs_auth = False
|
|
|
|
|
16 |
|
17 |
class Utils:
|
18 |
def hash(json_data: Dict[str, str]) -> sha256:
|
@@ -45,7 +47,7 @@ def _create_completion(model: str, messages: list, temperature: float = 0.6, str
|
|
45 |
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
46 |
'authorization': 'Bearer free',
|
47 |
'client-id': str(uuid.uuid4()),
|
48 |
-
'client-v': '0.1.
|
49 |
'content-type': 'application/json',
|
50 |
'origin': 'https://ai.ls',
|
51 |
'referer': 'https://ai.ls/',
|
@@ -73,7 +75,7 @@ def _create_completion(model: str, messages: list, temperature: float = 0.6, str
|
|
73 |
|
74 |
json_data = json.dumps(separators=(',', ':'), obj={
|
75 |
'model': 'gpt-3.5-turbo',
|
76 |
-
'temperature':
|
77 |
'stream': True,
|
78 |
'messages': messages} | sig)
|
79 |
|
|
|
13 |
model: str = 'gpt-3.5-turbo'
|
14 |
supports_stream = True
|
15 |
needs_auth = False
|
16 |
+
working = True
|
17 |
+
|
18 |
|
19 |
class Utils:
|
20 |
def hash(json_data: Dict[str, str]) -> sha256:
|
|
|
47 |
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
48 |
'authorization': 'Bearer free',
|
49 |
'client-id': str(uuid.uuid4()),
|
50 |
+
'client-v': '0.1.249',
|
51 |
'content-type': 'application/json',
|
52 |
'origin': 'https://ai.ls',
|
53 |
'referer': 'https://ai.ls/',
|
|
|
75 |
|
76 |
json_data = json.dumps(separators=(',', ':'), obj={
|
77 |
'model': 'gpt-3.5-turbo',
|
78 |
+
'temperature': temperature,
|
79 |
'stream': True,
|
80 |
'messages': messages} | sig)
|
81 |
|
g4f/Provider/Providers/Bard.py
CHANGED
@@ -5,6 +5,8 @@ url = 'https://bard.google.com'
|
|
5 |
model = ['Palm2']
|
6 |
supports_stream = False
|
7 |
needs_auth = True
|
|
|
|
|
8 |
|
9 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
10 |
psid = {cookie.name: cookie.value for cookie in browser_cookie3.chrome(
|
|
|
5 |
model = ['Palm2']
|
6 |
supports_stream = False
|
7 |
needs_auth = True
|
8 |
+
working = True
|
9 |
+
|
10 |
|
11 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
12 |
psid = {cookie.name: cookie.value for cookie in browser_cookie3.chrome(
|
g4f/Provider/Providers/Bing.py
CHANGED
@@ -16,6 +16,7 @@ url = 'https://bing.com/chat'
|
|
16 |
model = ['gpt-4']
|
17 |
supports_stream = True
|
18 |
needs_auth = False
|
|
|
19 |
|
20 |
ssl_context = ssl.create_default_context()
|
21 |
ssl_context.load_verify_locations(certifi.where())
|
|
|
16 |
model = ['gpt-4']
|
17 |
supports_stream = True
|
18 |
needs_auth = False
|
19 |
+
working = True
|
20 |
|
21 |
ssl_context = ssl.create_default_context()
|
22 |
ssl_context.load_verify_locations(certifi.where())
|
g4f/Provider/Providers/BingHuan.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os,sys
|
2 |
+
import json
|
3 |
+
import subprocess
|
4 |
+
from ...typing import sha256, Dict, get_type_hints
|
5 |
+
|
6 |
+
url = 'https://b.ai-huan.xyz'
|
7 |
+
model = ['gpt-3.5-turbo', 'gpt-4']
|
8 |
+
supports_stream = True
|
9 |
+
needs_auth = False
|
10 |
+
working = False
|
11 |
+
|
12 |
+
|
13 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
14 |
+
path = os.path.dirname(os.path.realpath(__file__))
|
15 |
+
config = json.dumps({
|
16 |
+
'messages': messages,
|
17 |
+
'model': model}, separators=(',', ':'))
|
18 |
+
cmd = ['python', f'{path}/helpers/binghuan.py', config]
|
19 |
+
|
20 |
+
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
21 |
+
|
22 |
+
for line in iter(p.stdout.readline, b''):
|
23 |
+
yield line.decode('cp1252')
|
24 |
+
|
25 |
+
|
26 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
27 |
+
'(%s)' % ', '.join(
|
28 |
+
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/ChatgptAi.py
CHANGED
@@ -6,6 +6,8 @@ url = 'https://chatgpt.ai/gpt-4/'
|
|
6 |
model = ['gpt-4']
|
7 |
supports_stream = False
|
8 |
needs_auth = False
|
|
|
|
|
9 |
|
10 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
11 |
chat = ''
|
@@ -13,8 +15,7 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
|
13 |
chat += '%s: %s\n' % (message['role'], message['content'])
|
14 |
chat += 'assistant: '
|
15 |
|
16 |
-
response = requests.get('https://chatgpt.ai/
|
17 |
-
|
18 |
nonce, post_id, _, bot_id = re.findall(r'data-nonce="(.*)"\n data-post-id="(.*)"\n data-url="(.*)"\n data-bot-id="(.*)"\n data-width', response.text)[0]
|
19 |
|
20 |
headers = {
|
|
|
6 |
model = ['gpt-4']
|
7 |
supports_stream = False
|
8 |
needs_auth = False
|
9 |
+
working = True
|
10 |
+
|
11 |
|
12 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
13 |
chat = ''
|
|
|
15 |
chat += '%s: %s\n' % (message['role'], message['content'])
|
16 |
chat += 'assistant: '
|
17 |
|
18 |
+
response = requests.get('https://chatgpt.ai/')
|
|
|
19 |
nonce, post_id, _, bot_id = re.findall(r'data-nonce="(.*)"\n data-post-id="(.*)"\n data-url="(.*)"\n data-bot-id="(.*)"\n data-width', response.text)[0]
|
20 |
|
21 |
headers = {
|
g4f/Provider/Providers/ChatgptLogin.py
CHANGED
@@ -8,7 +8,7 @@ url = 'https://chatgptlogin.ac'
|
|
8 |
model = ['gpt-3.5-turbo']
|
9 |
supports_stream = False
|
10 |
needs_auth = False
|
11 |
-
|
12 |
|
13 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
14 |
def get_nonce():
|
@@ -75,7 +75,7 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
|
75 |
'userName': '<div class="mwai-name-text">User:</div>',
|
76 |
'aiName': '<div class="mwai-name-text">AI:</div>',
|
77 |
'model': 'gpt-3.5-turbo',
|
78 |
-
'temperature': 0.8,
|
79 |
'maxTokens': 1024,
|
80 |
'maxResults': 1,
|
81 |
'apiKey': '',
|
|
|
8 |
model = ['gpt-3.5-turbo']
|
9 |
supports_stream = False
|
10 |
needs_auth = False
|
11 |
+
working = False
|
12 |
|
13 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
14 |
def get_nonce():
|
|
|
75 |
'userName': '<div class="mwai-name-text">User:</div>',
|
76 |
'aiName': '<div class="mwai-name-text">AI:</div>',
|
77 |
'model': 'gpt-3.5-turbo',
|
78 |
+
'temperature': kwargs.get('temperature', 0.8),
|
79 |
'maxTokens': 1024,
|
80 |
'maxResults': 1,
|
81 |
'apiKey': '',
|
g4f/Provider/Providers/DFEHub.py
CHANGED
@@ -1,44 +1,56 @@
|
|
1 |
import os, requests
|
2 |
from ...typing import sha256, Dict, get_type_hints
|
3 |
import json
|
|
|
|
|
4 |
|
5 |
url = "https://chat.dfehub.com/api/chat"
|
6 |
model = ['gpt-3.5-turbo']
|
7 |
-
supports_stream =
|
8 |
needs_auth = False
|
|
|
9 |
|
10 |
-
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
11 |
-
base = ''
|
12 |
-
for message in messages:
|
13 |
-
base += '%s: %s\n' % (message['role'], message['content'])
|
14 |
-
base += 'assistant:'
|
15 |
|
|
|
16 |
headers = {
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
}
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
"role": "user",
|
29 |
-
"content": base
|
30 |
-
}
|
31 |
-
],
|
32 |
-
"key": "",
|
33 |
-
"prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
|
34 |
-
"temperature": 1
|
35 |
}
|
36 |
-
response = requests.post(
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
|
43 |
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
44 |
-
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
1 |
import os, requests
|
2 |
from ...typing import sha256, Dict, get_type_hints
|
3 |
import json
|
4 |
+
import re
|
5 |
+
import time
|
6 |
|
7 |
url = "https://chat.dfehub.com/api/chat"
|
8 |
model = ['gpt-3.5-turbo']
|
9 |
+
supports_stream = True
|
10 |
needs_auth = False
|
11 |
+
working = True
|
12 |
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
15 |
headers = {
|
16 |
+
'authority': 'chat.dfehub.com',
|
17 |
+
'accept': '*/*',
|
18 |
+
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
19 |
+
'content-type': 'application/json',
|
20 |
+
'origin': 'https://chat.dfehub.com',
|
21 |
+
'referer': 'https://chat.dfehub.com/',
|
22 |
+
'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
23 |
+
'sec-ch-ua-mobile': '?0',
|
24 |
+
'sec-ch-ua-platform': '"macOS"',
|
25 |
+
'sec-fetch-dest': 'empty',
|
26 |
+
'sec-fetch-mode': 'cors',
|
27 |
+
'sec-fetch-site': 'same-origin',
|
28 |
+
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
|
29 |
+
'x-requested-with': 'XMLHttpRequest',
|
30 |
}
|
31 |
+
|
32 |
+
json_data = {
|
33 |
+
'messages': messages,
|
34 |
+
'model': 'gpt-3.5-turbo',
|
35 |
+
'temperature': kwargs.get('temperature', 0.5),
|
36 |
+
'presence_penalty': kwargs.get('presence_penalty', 0),
|
37 |
+
'frequency_penalty': kwargs.get('frequency_penalty', 0),
|
38 |
+
'top_p': kwargs.get('top_p', 1),
|
39 |
+
"stream": True,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
}
|
41 |
+
response = requests.post('https://chat.dfehub.com/api/openai/v1/chat/completions',
|
42 |
+
headers=headers, json=json_data)
|
43 |
+
|
44 |
+
for chunk in response.iter_lines():
|
45 |
+
if b'detail' in chunk:
|
46 |
+
delay = re.findall(r"\d+\.\d+", chunk.decode())
|
47 |
+
delay = float(delay[-1])
|
48 |
+
print(f"Provider.DfeHub::Rate Limit Reached::Waiting {delay} seconds")
|
49 |
+
time.sleep(delay)
|
50 |
+
yield from _create_completion(model, messages, stream, **kwargs)
|
51 |
+
if b'content' in chunk:
|
52 |
+
data = json.loads(chunk.decode().split('data: ')[1])
|
53 |
+
yield (data['choices'][0]['delta']['content'])
|
54 |
|
55 |
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
56 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/DeepAi.py
CHANGED
@@ -10,6 +10,8 @@ url = 'https://deepai.org'
|
|
10 |
model = ['gpt-3.5-turbo']
|
11 |
supports_stream = True
|
12 |
needs_auth = False
|
|
|
|
|
13 |
|
14 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
15 |
def md5(text: str) -> str:
|
|
|
10 |
model = ['gpt-3.5-turbo']
|
11 |
supports_stream = True
|
12 |
needs_auth = False
|
13 |
+
working = True
|
14 |
+
|
15 |
|
16 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
17 |
def md5(text: str) -> str:
|
g4f/Provider/Providers/EasyChat.py
CHANGED
@@ -2,42 +2,51 @@ import os, requests
|
|
2 |
from ...typing import sha256, Dict, get_type_hints
|
3 |
import json
|
4 |
|
5 |
-
url = "https://free.easychat.work
|
6 |
model = ['gpt-3.5-turbo']
|
7 |
-
supports_stream =
|
8 |
needs_auth = False
|
|
|
|
|
9 |
|
10 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
11 |
-
''' limited to 240 messages/hour'''
|
12 |
-
base = ''
|
13 |
-
for message in messages:
|
14 |
-
base += '%s: %s\n' % (message['role'], message['content'])
|
15 |
-
base += 'assistant:'
|
16 |
-
|
17 |
headers = {
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
}
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
"presence_penalty": 0,
|
30 |
-
"frequency_penalty": 0,
|
31 |
-
"top_p": 1
|
32 |
}
|
33 |
|
34 |
-
response = requests.post(
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
|
42 |
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
43 |
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
2 |
from ...typing import sha256, Dict, get_type_hints
|
3 |
import json
|
4 |
|
5 |
+
url = "https://free.easychat.work"
|
6 |
model = ['gpt-3.5-turbo']
|
7 |
+
supports_stream = True
|
8 |
needs_auth = False
|
9 |
+
working = True
|
10 |
+
|
11 |
|
12 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
headers = {
|
14 |
+
'authority': 'free.easychat.work',
|
15 |
+
'accept': 'text/event-stream',
|
16 |
+
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
17 |
+
'content-type': 'application/json',
|
18 |
+
'endpoint': '',
|
19 |
+
'origin': 'https://free.easychat.work',
|
20 |
+
'plugins': '0',
|
21 |
+
'referer': 'https://free.easychat.work/',
|
22 |
+
'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
23 |
+
'sec-ch-ua-mobile': '?0',
|
24 |
+
'sec-ch-ua-platform': '"macOS"',
|
25 |
+
'sec-fetch-dest': 'empty',
|
26 |
+
'sec-fetch-mode': 'cors',
|
27 |
+
'sec-fetch-site': 'same-origin',
|
28 |
+
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
|
29 |
+
'usesearch': 'false',
|
30 |
+
'x-requested-with': 'XMLHttpRequest',
|
31 |
}
|
32 |
|
33 |
+
json_data = {
|
34 |
+
'messages': messages,
|
35 |
+
'stream': True,
|
36 |
+
'model': model,
|
37 |
+
'temperature': kwargs.get('temperature', 0.5),
|
38 |
+
'presence_penalty': kwargs.get('presence_penalty', 0),
|
39 |
+
'frequency_penalty': kwargs.get('frequency_penalty', 0),
|
40 |
+
'top_p': kwargs.get('top_p', 1),
|
|
|
|
|
|
|
41 |
}
|
42 |
|
43 |
+
response = requests.post('https://free.easychat.work/api/openai/v1/chat/completions',
|
44 |
+
headers=headers, json=json_data)
|
45 |
+
|
46 |
+
for chunk in response.iter_lines():
|
47 |
+
if b'content' in chunk:
|
48 |
+
data = json.loads(chunk.decode().split('data: ')[1])
|
49 |
+
yield (data['choices'][0]['delta']['content'])
|
50 |
|
51 |
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
52 |
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/Forefront.py
CHANGED
@@ -7,6 +7,8 @@ url = 'https://forefront.com'
|
|
7 |
model = ['gpt-3.5-turbo']
|
8 |
supports_stream = True
|
9 |
needs_auth = False
|
|
|
|
|
10 |
|
11 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
12 |
json_data = {
|
|
|
7 |
model = ['gpt-3.5-turbo']
|
8 |
supports_stream = True
|
9 |
needs_auth = False
|
10 |
+
working = False
|
11 |
+
|
12 |
|
13 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
14 |
json_data = {
|
g4f/Provider/Providers/GetGpt.py
CHANGED
@@ -9,6 +9,8 @@ url = 'https://chat.getgpt.world/'
|
|
9 |
model = ['gpt-3.5-turbo']
|
10 |
supports_stream = True
|
11 |
needs_auth = False
|
|
|
|
|
12 |
|
13 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
14 |
def encrypt(e):
|
|
|
9 |
model = ['gpt-3.5-turbo']
|
10 |
supports_stream = True
|
11 |
needs_auth = False
|
12 |
+
working = True
|
13 |
+
|
14 |
|
15 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
16 |
def encrypt(e):
|
g4f/Provider/Providers/H2o.py
CHANGED
@@ -1,106 +1,94 @@
|
|
1 |
-
from requests import Session
|
2 |
-
from uuid import uuid4
|
3 |
-
from json import loads
|
4 |
-
import os
|
5 |
-
import json
|
6 |
-
import requests
|
7 |
-
from ...typing import sha256, Dict, get_type_hints
|
8 |
-
|
9 |
-
url = 'https://gpt-gm.h2o.ai'
|
10 |
-
model = ['falcon-40b', 'falcon-7b', 'llama-13b']
|
11 |
-
supports_stream = True
|
12 |
-
needs_auth = False
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
'falcon-
|
17 |
-
'
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
}
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
}
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
}
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
for line in completion.iter_lines():
|
96 |
-
if b'data' in line:
|
97 |
-
line = loads(line.decode('utf-8').replace('data:', ''))
|
98 |
-
token = line['token']['text']
|
99 |
-
|
100 |
-
if token == '<|endoftext|>':
|
101 |
-
break
|
102 |
-
else:
|
103 |
-
yield (token)
|
104 |
-
|
105 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
106 |
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
1 |
+
from requests import Session
|
2 |
+
from uuid import uuid4
|
3 |
+
from json import loads
|
4 |
+
import os
|
5 |
+
import json
|
6 |
+
import requests
|
7 |
+
from ...typing import sha256, Dict, get_type_hints
|
8 |
+
|
9 |
+
url = 'https://gpt-gm.h2o.ai'
|
10 |
+
model = ['falcon-40b', 'falcon-7b', 'llama-13b']
|
11 |
+
supports_stream = True
|
12 |
+
needs_auth = False
|
13 |
+
working = True
|
14 |
+
|
15 |
+
models = {
|
16 |
+
'falcon-7b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
|
17 |
+
'falcon-40b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
|
18 |
+
'llama-13b': 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b'
|
19 |
+
}
|
20 |
+
|
21 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
22 |
+
|
23 |
+
conversation = ''
|
24 |
+
for message in messages:
|
25 |
+
conversation += '%s: %s\n' % (message['role'], message['content'])
|
26 |
+
|
27 |
+
conversation += 'assistant: '
|
28 |
+
session = requests.Session()
|
29 |
+
|
30 |
+
response = session.get("https://gpt-gm.h2o.ai/")
|
31 |
+
headers = {
|
32 |
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
|
33 |
+
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
|
34 |
+
"Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
|
35 |
+
"Content-Type": "application/x-www-form-urlencoded",
|
36 |
+
"Upgrade-Insecure-Requests": "1",
|
37 |
+
"Sec-Fetch-Dest": "document",
|
38 |
+
"Sec-Fetch-Mode": "navigate",
|
39 |
+
"Sec-Fetch-Site": "same-origin",
|
40 |
+
"Sec-Fetch-User": "?1",
|
41 |
+
"Referer": "https://gpt-gm.h2o.ai/r/jGfKSwU"
|
42 |
+
}
|
43 |
+
data = {
|
44 |
+
"ethicsModalAccepted": "true",
|
45 |
+
"shareConversationsWithModelAuthors": "true",
|
46 |
+
"ethicsModalAcceptedAt": "",
|
47 |
+
"activeModel": "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
|
48 |
+
"searchEnabled": "true"
|
49 |
+
}
|
50 |
+
response = session.post("https://gpt-gm.h2o.ai/settings", headers=headers, data=data)
|
51 |
+
|
52 |
+
headers = {
|
53 |
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
|
54 |
+
"Accept": "*/*",
|
55 |
+
"Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
|
56 |
+
"Content-Type": "application/json",
|
57 |
+
"Sec-Fetch-Dest": "empty",
|
58 |
+
"Sec-Fetch-Mode": "cors",
|
59 |
+
"Sec-Fetch-Site": "same-origin",
|
60 |
+
"Referer": "https://gpt-gm.h2o.ai/"
|
61 |
+
}
|
62 |
+
data = {
|
63 |
+
"model": models[model]
|
64 |
+
}
|
65 |
+
|
66 |
+
conversation_id = session.post("https://gpt-gm.h2o.ai/conversation", headers=headers, json=data)
|
67 |
+
data = {
|
68 |
+
"inputs": conversation,
|
69 |
+
"parameters": {
|
70 |
+
"temperature": kwargs.get('temperature', 0.4),
|
71 |
+
"truncate": kwargs.get('truncate', 2048),
|
72 |
+
"max_new_tokens": kwargs.get('max_new_tokens', 1024),
|
73 |
+
"do_sample": kwargs.get('do_sample', True),
|
74 |
+
"repetition_penalty": kwargs.get('repetition_penalty', 1.2),
|
75 |
+
"return_full_text": kwargs.get('return_full_text', False)
|
76 |
+
},
|
77 |
+
"stream": True,
|
78 |
+
"options": {
|
79 |
+
"id": kwargs.get('id', str(uuid4())),
|
80 |
+
"response_id": kwargs.get('response_id', str(uuid4())),
|
81 |
+
"is_retry": False,
|
82 |
+
"use_cache": False,
|
83 |
+
"web_search_id": ""
|
84 |
+
}
|
85 |
+
}
|
86 |
+
|
87 |
+
response = session.post(f"https://gpt-gm.h2o.ai/conversation/{conversation_id.json()['conversationId']}", headers=headers, json=data)
|
88 |
+
generated_text = response.text.replace("\n", "").split("data:")
|
89 |
+
generated_text = json.loads(generated_text[-1])
|
90 |
+
|
91 |
+
return generated_text["generated_text"]
|
92 |
+
|
93 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/Liaobots.py
CHANGED
@@ -5,6 +5,7 @@ url = 'https://liaobots.com'
|
|
5 |
model = ['gpt-3.5-turbo', 'gpt-4']
|
6 |
supports_stream = True
|
7 |
needs_auth = True
|
|
|
8 |
|
9 |
models = {
|
10 |
'gpt-4': {
|
|
|
5 |
model = ['gpt-3.5-turbo', 'gpt-4']
|
6 |
supports_stream = True
|
7 |
needs_auth = True
|
8 |
+
working = False
|
9 |
|
10 |
models = {
|
11 |
'gpt-4': {
|
g4f/Provider/Providers/Lockchat.py
CHANGED
@@ -6,11 +6,12 @@ url = 'http://supertest.lockchat.app'
|
|
6 |
model = ['gpt-4', 'gpt-3.5-turbo']
|
7 |
supports_stream = True
|
8 |
needs_auth = False
|
|
|
9 |
|
10 |
def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
|
11 |
|
12 |
payload = {
|
13 |
-
"temperature":
|
14 |
"messages": messages,
|
15 |
"model": model,
|
16 |
"stream": True,
|
|
|
6 |
model = ['gpt-4', 'gpt-3.5-turbo']
|
7 |
supports_stream = True
|
8 |
needs_auth = False
|
9 |
+
working = False
|
10 |
|
11 |
def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
|
12 |
|
13 |
payload = {
|
14 |
+
"temperature": temperature,
|
15 |
"messages": messages,
|
16 |
"model": model,
|
17 |
"stream": True,
|
g4f/Provider/Providers/Theb.py
CHANGED
@@ -9,6 +9,7 @@ url = 'https://theb.ai'
|
|
9 |
model = ['gpt-3.5-turbo']
|
10 |
supports_stream = True
|
11 |
needs_auth = False
|
|
|
12 |
|
13 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
14 |
|
@@ -20,7 +21,7 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
|
20 |
cmd = ['python3', f'{path}/helpers/theb.py', config]
|
21 |
|
22 |
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
23 |
-
|
24 |
for line in iter(p.stdout.readline, b''):
|
25 |
yield line.decode('utf-8')
|
26 |
|
|
|
9 |
model = ['gpt-3.5-turbo']
|
10 |
supports_stream = True
|
11 |
needs_auth = False
|
12 |
+
working = False
|
13 |
|
14 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
15 |
|
|
|
21 |
cmd = ['python3', f'{path}/helpers/theb.py', config]
|
22 |
|
23 |
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
24 |
+
|
25 |
for line in iter(p.stdout.readline, b''):
|
26 |
yield line.decode('utf-8')
|
27 |
|
g4f/Provider/Providers/Vercel.py
CHANGED
@@ -11,6 +11,7 @@ from ...typing import sha256, Dict, get_type_hints
|
|
11 |
url = 'https://play.vercel.ai'
|
12 |
supports_stream = True
|
13 |
needs_auth = False
|
|
|
14 |
|
15 |
models = {
|
16 |
'claude-instant-v1': 'anthropic:claude-instant-v1',
|
@@ -143,8 +144,6 @@ class Client:
|
|
143 |
index = len(lines) - 1
|
144 |
|
145 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
146 |
-
yield 'Vercel is currently not working.'
|
147 |
-
return
|
148 |
|
149 |
conversation = 'This is a conversation between a human and a language model, respond to the last message accordingly, referring to the past history of messages if needed.\n'
|
150 |
|
|
|
11 |
url = 'https://play.vercel.ai'
|
12 |
supports_stream = True
|
13 |
needs_auth = False
|
14 |
+
working = False
|
15 |
|
16 |
models = {
|
17 |
'claude-instant-v1': 'anthropic:claude-instant-v1',
|
|
|
144 |
index = len(lines) - 1
|
145 |
|
146 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
|
|
|
|
147 |
|
148 |
conversation = 'This is a conversation between a human and a language model, respond to the last message accordingly, referring to the past history of messages if needed.\n'
|
149 |
|
g4f/Provider/Providers/Wewordle.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os,sys
|
2 |
+
import requests
|
3 |
+
import json
|
4 |
+
import random
|
5 |
+
import time
|
6 |
+
import string
|
7 |
+
from ...typing import sha256, Dict, get_type_hints
|
8 |
+
|
9 |
+
url = "https://wewordle.org/gptapi/v1/android/turbo"
|
10 |
+
model = ['gpt-3.5-turbo']
|
11 |
+
supports_stream = False
|
12 |
+
needs_auth = False
|
13 |
+
working = False
|
14 |
+
|
15 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
16 |
+
base = ''
|
17 |
+
for message in messages:
|
18 |
+
base += '%s: %s\n' % (message['role'], message['content'])
|
19 |
+
base += 'assistant:'
|
20 |
+
# randomize user id and app id
|
21 |
+
_user_id = ''.join(random.choices(f'{string.ascii_lowercase}{string.digits}', k=16))
|
22 |
+
_app_id = ''.join(random.choices(f'{string.ascii_lowercase}{string.digits}', k=31))
|
23 |
+
# make current date with format utc
|
24 |
+
_request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
|
25 |
+
headers = {
|
26 |
+
'accept': '*/*',
|
27 |
+
'pragma': 'no-cache',
|
28 |
+
'Content-Type': 'application/json',
|
29 |
+
'Connection':'keep-alive'
|
30 |
+
# user agent android client
|
31 |
+
# 'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 10; SM-G975F Build/QP1A.190711.020)',
|
32 |
+
|
33 |
+
}
|
34 |
+
data = {
|
35 |
+
"user": _user_id,
|
36 |
+
"messages": [
|
37 |
+
{"role": "user", "content": base}
|
38 |
+
],
|
39 |
+
"subscriber": {
|
40 |
+
"originalPurchaseDate": None,
|
41 |
+
"originalApplicationVersion": None,
|
42 |
+
"allPurchaseDatesMillis": {},
|
43 |
+
"entitlements": {
|
44 |
+
"active": {},
|
45 |
+
"all": {}
|
46 |
+
},
|
47 |
+
"allPurchaseDates": {},
|
48 |
+
"allExpirationDatesMillis": {},
|
49 |
+
"allExpirationDates": {},
|
50 |
+
"originalAppUserId": f"$RCAnonymousID:{_app_id}",
|
51 |
+
"latestExpirationDate": None,
|
52 |
+
"requestDate": _request_date,
|
53 |
+
"latestExpirationDateMillis": None,
|
54 |
+
"nonSubscriptionTransactions": [],
|
55 |
+
"originalPurchaseDateMillis": None,
|
56 |
+
"managementURL": None,
|
57 |
+
"allPurchasedProductIdentifiers": [],
|
58 |
+
"firstSeen": _request_date,
|
59 |
+
"activeSubscriptions": []
|
60 |
+
}
|
61 |
+
}
|
62 |
+
response = requests.post(url, headers=headers, data=json.dumps(data))
|
63 |
+
if response.status_code == 200:
|
64 |
+
_json = response.json()
|
65 |
+
if 'message' in _json:
|
66 |
+
yield _json['message']['content']
|
67 |
+
else:
|
68 |
+
print(f"Error Occurred::{response.status_code}")
|
69 |
+
return None
|
70 |
+
|
71 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
72 |
+
'(%s)' % ', '.join(
|
73 |
+
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/You.py
CHANGED
@@ -9,6 +9,7 @@ url = 'https://you.com'
|
|
9 |
model = 'gpt-3.5-turbo'
|
10 |
supports_stream = True
|
11 |
needs_auth = False
|
|
|
12 |
|
13 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
14 |
|
|
|
9 |
model = 'gpt-3.5-turbo'
|
10 |
supports_stream = True
|
11 |
needs_auth = False
|
12 |
+
working = False
|
13 |
|
14 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
15 |
|
g4f/Provider/Providers/Yqcloud.py
CHANGED
@@ -9,6 +9,7 @@ model = [
|
|
9 |
]
|
10 |
supports_stream = True
|
11 |
needs_auth = False
|
|
|
12 |
|
13 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
14 |
|
|
|
9 |
]
|
10 |
supports_stream = True
|
11 |
needs_auth = False
|
12 |
+
working = False
|
13 |
|
14 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
15 |
|
g4f/Provider/Providers/__init__.py
ADDED
File without changes
|
g4f/Provider/Providers/helpers/binghuan.py
ADDED
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Original Code From : https://gitler.moe/g4f/gpt4free
|
2 |
+
# https://gitler.moe/g4f/gpt4free/src/branch/main/g4f/Provider/Providers/helpers/bing.py
|
3 |
+
import sys
|
4 |
+
import ssl
|
5 |
+
import uuid
|
6 |
+
import json
|
7 |
+
import time
|
8 |
+
import random
|
9 |
+
import asyncio
|
10 |
+
import certifi
|
11 |
+
# import requests
|
12 |
+
from curl_cffi import requests
|
13 |
+
import websockets
|
14 |
+
import browser_cookie3
|
15 |
+
|
16 |
+
config = json.loads(sys.argv[1])
|
17 |
+
|
18 |
+
ssl_context = ssl.create_default_context()
|
19 |
+
ssl_context.load_verify_locations(certifi.where())
|
20 |
+
|
21 |
+
|
22 |
+
|
23 |
+
conversationstyles = {
|
24 |
+
'gpt-4': [ #'precise'
|
25 |
+
"nlu_direct_response_filter",
|
26 |
+
"deepleo",
|
27 |
+
"disable_emoji_spoken_text",
|
28 |
+
"responsible_ai_policy_235",
|
29 |
+
"enablemm",
|
30 |
+
"h3precise",
|
31 |
+
"rcsprtsalwlst",
|
32 |
+
"dv3sugg",
|
33 |
+
"autosave",
|
34 |
+
"clgalileo",
|
35 |
+
"gencontentv3"
|
36 |
+
],
|
37 |
+
'balanced': [
|
38 |
+
"nlu_direct_response_filter",
|
39 |
+
"deepleo",
|
40 |
+
"disable_emoji_spoken_text",
|
41 |
+
"responsible_ai_policy_235",
|
42 |
+
"enablemm",
|
43 |
+
"harmonyv3",
|
44 |
+
"rcsprtsalwlst",
|
45 |
+
"dv3sugg",
|
46 |
+
"autosave"
|
47 |
+
],
|
48 |
+
'gpt-3.5-turbo': [ #'precise'
|
49 |
+
"nlu_direct_response_filter",
|
50 |
+
"deepleo",
|
51 |
+
"disable_emoji_spoken_text",
|
52 |
+
"responsible_ai_policy_235",
|
53 |
+
"enablemm",
|
54 |
+
"h3imaginative",
|
55 |
+
"rcsprtsalwlst",
|
56 |
+
"dv3sugg",
|
57 |
+
"autosave",
|
58 |
+
"gencontentv3"
|
59 |
+
]
|
60 |
+
}
|
61 |
+
|
62 |
+
def format(msg: dict) -> str:
|
63 |
+
return json.dumps(msg) + '\x1e'
|
64 |
+
|
65 |
+
def get_token():
|
66 |
+
return
|
67 |
+
|
68 |
+
try:
|
69 |
+
cookies = {c.name: c.value for c in browser_cookie3.edge(domain_name='bing.com')}
|
70 |
+
return cookies['_U']
|
71 |
+
except:
|
72 |
+
print('Error: could not find bing _U cookie in edge browser.')
|
73 |
+
exit(1)
|
74 |
+
|
75 |
+
class AsyncCompletion:
|
76 |
+
async def create(
|
77 |
+
prompt : str = None,
|
78 |
+
optionSets : list = None,
|
79 |
+
token : str = None): # No auth required anymore
|
80 |
+
|
81 |
+
create = None
|
82 |
+
for _ in range(5):
|
83 |
+
try:
|
84 |
+
create = requests.get('https://b.ai-huan.xyz/turing/conversation/create',
|
85 |
+
headers = {
|
86 |
+
'host': 'b.ai-huan.xyz',
|
87 |
+
'accept-encoding': 'gzip, deflate, br',
|
88 |
+
'connection': 'keep-alive',
|
89 |
+
'authority': 'b.ai-huan.xyz',
|
90 |
+
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
91 |
+
'accept-language': 'en-US,en;q=0.9',
|
92 |
+
'cache-control': 'max-age=0',
|
93 |
+
'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
|
94 |
+
'sec-ch-ua-arch': '"x86"',
|
95 |
+
'sec-ch-ua-bitness': '"64"',
|
96 |
+
'sec-ch-ua-full-version': '"110.0.1587.69"',
|
97 |
+
'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
|
98 |
+
'sec-ch-ua-mobile': '?0',
|
99 |
+
'sec-ch-ua-model': '""',
|
100 |
+
'sec-ch-ua-platform': '"Windows"',
|
101 |
+
'sec-ch-ua-platform-version': '"15.0.0"',
|
102 |
+
'sec-fetch-dest': 'document',
|
103 |
+
'sec-fetch-mode': 'navigate',
|
104 |
+
'sec-fetch-site': 'none',
|
105 |
+
'sec-fetch-user': '?1',
|
106 |
+
'upgrade-insecure-requests': '1',
|
107 |
+
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
|
108 |
+
'x-edge-shopping-flag': '1',
|
109 |
+
'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
|
110 |
+
}
|
111 |
+
)
|
112 |
+
|
113 |
+
conversationId = create.json()['conversationId']
|
114 |
+
clientId = create.json()['clientId']
|
115 |
+
conversationSignature = create.json()['conversationSignature']
|
116 |
+
|
117 |
+
except Exception as e:
|
118 |
+
time.sleep(0.5)
|
119 |
+
continue
|
120 |
+
|
121 |
+
if create == None: raise Exception('Failed to create conversation.')
|
122 |
+
|
123 |
+
wss: websockets.WebSocketClientProtocol or None = None
|
124 |
+
|
125 |
+
wss = await websockets.connect('wss://sydney.vcanbb.chat/sydney/ChatHub', max_size = None, ssl = ssl_context,
|
126 |
+
extra_headers = {
|
127 |
+
'accept': 'application/json',
|
128 |
+
'accept-language': 'en-US,en;q=0.9',
|
129 |
+
'content-type': 'application/json',
|
130 |
+
'sec-ch-ua': '"Not_A Brand";v="99", Microsoft Edge";v="110", "Chromium";v="110"',
|
131 |
+
'sec-ch-ua-arch': '"x86"',
|
132 |
+
'sec-ch-ua-bitness': '"64"',
|
133 |
+
'sec-ch-ua-full-version': '"109.0.1518.78"',
|
134 |
+
'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
|
135 |
+
'sec-ch-ua-mobile': '?0',
|
136 |
+
'sec-ch-ua-model': "",
|
137 |
+
'sec-ch-ua-platform': '"Windows"',
|
138 |
+
'sec-ch-ua-platform-version': '"15.0.0"',
|
139 |
+
'sec-fetch-dest': 'empty',
|
140 |
+
'sec-fetch-mode': 'cors',
|
141 |
+
'sec-fetch-site': 'same-origin',
|
142 |
+
'x-ms-client-request-id': str(uuid.uuid4()),
|
143 |
+
'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
|
144 |
+
'Referer': 'https://b.ai-huan.xyz/search?q=Bing+AI&showconv=1&FORM=hpcodx',
|
145 |
+
'Referrer-Policy': 'origin-when-cross-origin',
|
146 |
+
'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
|
147 |
+
}
|
148 |
+
)
|
149 |
+
|
150 |
+
await wss.send(format({'protocol': 'json', 'version': 1}))
|
151 |
+
await wss.recv()
|
152 |
+
|
153 |
+
struct = {
|
154 |
+
'arguments': [
|
155 |
+
{
|
156 |
+
'source': 'cib',
|
157 |
+
'optionsSets': optionSets,
|
158 |
+
'isStartOfSession': True,
|
159 |
+
'message': {
|
160 |
+
'author': 'user',
|
161 |
+
'inputMethod': 'Keyboard',
|
162 |
+
'text': prompt,
|
163 |
+
'messageType': 'Chat'
|
164 |
+
},
|
165 |
+
'conversationSignature': conversationSignature,
|
166 |
+
'participant': {
|
167 |
+
'id': clientId
|
168 |
+
},
|
169 |
+
'conversationId': conversationId
|
170 |
+
}
|
171 |
+
],
|
172 |
+
'invocationId': '0',
|
173 |
+
'target': 'chat',
|
174 |
+
'type': 4
|
175 |
+
}
|
176 |
+
|
177 |
+
await wss.send(format(struct))
|
178 |
+
|
179 |
+
base_string = ''
|
180 |
+
|
181 |
+
final = False
|
182 |
+
while not final:
|
183 |
+
objects = str(await wss.recv()).split('\x1e')
|
184 |
+
for obj in objects:
|
185 |
+
if obj is None or obj == '':
|
186 |
+
continue
|
187 |
+
|
188 |
+
response = json.loads(obj)
|
189 |
+
#print(response, flush=True, end='')
|
190 |
+
if response.get('type') == 1 and response['arguments'][0].get('messages',):
|
191 |
+
response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get('text')
|
192 |
+
|
193 |
+
yield (response_text.replace(base_string, ''))
|
194 |
+
base_string = response_text
|
195 |
+
|
196 |
+
elif response.get('type') == 2:
|
197 |
+
final = True
|
198 |
+
|
199 |
+
await wss.close()
|
200 |
+
|
201 |
+
# i thing bing realy donset understand multi message (based on prompt template)
|
202 |
+
def convert(messages):
|
203 |
+
context = ""
|
204 |
+
for message in messages:
|
205 |
+
context += "[%s](#message)\n%s\n\n" % (message['role'],
|
206 |
+
message['content'])
|
207 |
+
return context
|
208 |
+
|
209 |
+
async def run(optionSets, messages):
|
210 |
+
prompt = messages[-1]['content']
|
211 |
+
if(len(messages) > 1):
|
212 |
+
prompt = convert(messages)
|
213 |
+
async for value in AsyncCompletion.create(prompt=prompt, optionSets=optionSets):
|
214 |
+
try:
|
215 |
+
print(value, flush=True, end='')
|
216 |
+
except UnicodeEncodeError as e:
|
217 |
+
# emoji encoding problem
|
218 |
+
print(value.encode('utf-8'), flush=True, end='')
|
219 |
+
|
220 |
+
optionSet = conversationstyles[config['model']]
|
221 |
+
asyncio.run(run(optionSet, config['messages']))
|
g4f/Provider/__init__.py
CHANGED
@@ -18,7 +18,11 @@ from .Providers import (
|
|
18 |
AItianhu,
|
19 |
EasyChat,
|
20 |
Acytoo,
|
21 |
-
|
|
|
|
|
|
|
|
|
22 |
)
|
23 |
|
24 |
Palm = Bard
|
|
|
18 |
AItianhu,
|
19 |
EasyChat,
|
20 |
Acytoo,
|
21 |
+
DfeHub,
|
22 |
+
AiService,
|
23 |
+
BingHuan,
|
24 |
+
Wewordle,
|
25 |
+
ChatgptAi,
|
26 |
)
|
27 |
|
28 |
Palm = Bard
|
g4f/__init__.py
CHANGED
@@ -2,11 +2,14 @@ import sys
|
|
2 |
from . import Provider
|
3 |
from g4f.models import Model, ModelUtils
|
4 |
|
|
|
5 |
|
6 |
class ChatCompletion:
|
7 |
@staticmethod
|
8 |
def create(model: Model.model or str, messages: list, provider: Provider.Provider = None, stream: bool = False, auth: str = False, **kwargs):
|
9 |
kwargs['auth'] = auth
|
|
|
|
|
10 |
|
11 |
if provider and provider.needs_auth and not auth:
|
12 |
print(
|
@@ -27,7 +30,7 @@ class ChatCompletion:
|
|
27 |
f"ValueError: {engine.__name__} does not support 'stream' argument", file=sys.stderr)
|
28 |
sys.exit(1)
|
29 |
|
30 |
-
print(f'Using {engine.__name__} provider')
|
31 |
|
32 |
return (engine._create_completion(model.name, messages, stream, **kwargs)
|
33 |
if stream else ''.join(engine._create_completion(model.name, messages, stream, **kwargs)))
|
|
|
2 |
from . import Provider
|
3 |
from g4f.models import Model, ModelUtils
|
4 |
|
5 |
+
logging = False
|
6 |
|
7 |
class ChatCompletion:
|
8 |
@staticmethod
|
9 |
def create(model: Model.model or str, messages: list, provider: Provider.Provider = None, stream: bool = False, auth: str = False, **kwargs):
|
10 |
kwargs['auth'] = auth
|
11 |
+
if provider and provider.working == False:
|
12 |
+
return f'{provider.__name__} is not working'
|
13 |
|
14 |
if provider and provider.needs_auth and not auth:
|
15 |
print(
|
|
|
30 |
f"ValueError: {engine.__name__} does not support 'stream' argument", file=sys.stderr)
|
31 |
sys.exit(1)
|
32 |
|
33 |
+
if logging: print(f'Using {engine.__name__} provider')
|
34 |
|
35 |
return (engine._create_completion(model.name, messages, stream, **kwargs)
|
36 |
if stream else ''.join(engine._create_completion(model.name, messages, stream, **kwargs)))
|
g4f/models.py
CHANGED
@@ -152,11 +152,42 @@ class Model:
|
|
152 |
name: str = 'llama-13b'
|
153 |
base_provider: str = 'huggingface'
|
154 |
best_provider: Provider.Provider = Provider.H2o
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
|
156 |
class ModelUtils:
|
157 |
convert: dict = {
|
158 |
'gpt-3.5-turbo': Model.gpt_35_turbo,
|
|
|
|
|
|
|
|
|
159 |
'gpt-4': Model.gpt_4,
|
|
|
|
|
160 |
|
161 |
'claude-instant-v1-100k': Model.claude_instant_v1_100k,
|
162 |
'claude-v1-100k': Model.claude_v1_100k,
|
|
|
152 |
name: str = 'llama-13b'
|
153 |
base_provider: str = 'huggingface'
|
154 |
best_provider: Provider.Provider = Provider.H2o
|
155 |
+
|
156 |
+
class gpt_35_turbo_16k:
|
157 |
+
name: str = 'gpt-3.5-turbo-16k'
|
158 |
+
base_provider: str = 'openai'
|
159 |
+
best_provider: Provider.Provider = Provider.EasyChat
|
160 |
+
|
161 |
+
class gpt_35_turbo_0613:
|
162 |
+
name: str = 'gpt-3.5-turbo-0613'
|
163 |
+
base_provider: str = 'openai'
|
164 |
+
best_provider: Provider.Provider = Provider.EasyChat
|
165 |
+
|
166 |
+
class gpt_35_turbo_16k_0613:
|
167 |
+
name: str = 'gpt-3.5-turbo-16k-0613'
|
168 |
+
base_provider: str = 'openai'
|
169 |
+
best_provider: Provider.Provider = Provider.EasyChat
|
170 |
+
|
171 |
+
class gpt_4_32k:
|
172 |
+
name: str = 'gpt-4-32k'
|
173 |
+
base_provider: str = 'openai'
|
174 |
+
best_provider = None
|
175 |
+
|
176 |
+
class gpt_4_0613:
|
177 |
+
name: str = 'gpt-4-0613'
|
178 |
+
base_provider: str = 'openai'
|
179 |
+
best_provider = None
|
180 |
|
181 |
class ModelUtils:
|
182 |
convert: dict = {
|
183 |
'gpt-3.5-turbo': Model.gpt_35_turbo,
|
184 |
+
'gpt-3.6-turbo-16k': Model.gpt_35_turbo_16k,
|
185 |
+
'gpt-3.5-turbo-0613': Model.gpt_35_turbo_0613,
|
186 |
+
'gpt-3.5-turbo-16k-0613': Model.gpt_35_turbo_16k_0613,
|
187 |
+
|
188 |
'gpt-4': Model.gpt_4,
|
189 |
+
'gpt-4-32k': Model.gpt_4_32k,
|
190 |
+
'gpt-4-0613': Model.gpt_4_0613,
|
191 |
|
192 |
'claude-instant-v1-100k': Model.claude_instant_v1_100k,
|
193 |
'claude-v1-100k': Model.claude_v1_100k,
|
testing/readme_table.py
CHANGED
@@ -13,7 +13,15 @@ from g4f.Provider import (
|
|
13 |
H2o,
|
14 |
ChatgptLogin,
|
15 |
DeepAi,
|
16 |
-
GetGpt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
)
|
18 |
|
19 |
from urllib.parse import urlparse
|
@@ -33,7 +41,15 @@ providers = [
|
|
33 |
H2o,
|
34 |
ChatgptLogin,
|
35 |
DeepAi,
|
36 |
-
GetGpt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
]
|
38 |
|
39 |
# | Website| Provider| gpt-3.5-turbo | gpt-4 | Supports Stream | Status | Needs Auth |
|
@@ -41,12 +57,15 @@ print('| Website| Provider| gpt-3.5 | gpt-4 | Streaming | Status | Auth |')
|
|
41 |
print('| --- | --- | --- | --- | --- | --- | --- |')
|
42 |
|
43 |
for provider in providers:
|
|
|
44 |
parsed_url = urlparse(provider.url)
|
45 |
-
name = f"`g4f.Provider{provider.__name__.split('.')[-1]}`"
|
46 |
url = f'[{parsed_url.netloc}]({provider.url})'
|
47 |
has_gpt4 = 'βοΈ' if 'gpt-4' in provider.model else 'β'
|
48 |
has_gpt3_5 = 'βοΈ' if 'gpt-3.5-turbo' in provider.model else 'β'
|
49 |
streaming = 'βοΈ' if provider.supports_stream else 'β'
|
50 |
needs_auth = 'βοΈ' if provider.needs_auth else 'β'
|
51 |
|
52 |
-
|
|
|
|
|
|
13 |
H2o,
|
14 |
ChatgptLogin,
|
15 |
DeepAi,
|
16 |
+
GetGpt,
|
17 |
+
AItianhu,
|
18 |
+
EasyChat,
|
19 |
+
Acytoo,
|
20 |
+
DfeHub,
|
21 |
+
AiService,
|
22 |
+
BingHuan,
|
23 |
+
Wewordle,
|
24 |
+
ChatgptAi,
|
25 |
)
|
26 |
|
27 |
from urllib.parse import urlparse
|
|
|
41 |
H2o,
|
42 |
ChatgptLogin,
|
43 |
DeepAi,
|
44 |
+
GetGpt,
|
45 |
+
AItianhu,
|
46 |
+
EasyChat,
|
47 |
+
Acytoo,
|
48 |
+
DfeHub,
|
49 |
+
AiService,
|
50 |
+
BingHuan,
|
51 |
+
Wewordle,
|
52 |
+
ChatgptAi,
|
53 |
]
|
54 |
|
55 |
# | Website| Provider| gpt-3.5-turbo | gpt-4 | Supports Stream | Status | Needs Auth |
|
|
|
57 |
print('| --- | --- | --- | --- | --- | --- | --- |')
|
58 |
|
59 |
for provider in providers:
|
60 |
+
|
61 |
parsed_url = urlparse(provider.url)
|
62 |
+
name = f"`g4f.Provider.{provider.__name__.split('.')[-1]}`"
|
63 |
url = f'[{parsed_url.netloc}]({provider.url})'
|
64 |
has_gpt4 = 'βοΈ' if 'gpt-4' in provider.model else 'β'
|
65 |
has_gpt3_5 = 'βοΈ' if 'gpt-3.5-turbo' in provider.model else 'β'
|
66 |
streaming = 'βοΈ' if provider.supports_stream else 'β'
|
67 |
needs_auth = 'βοΈ' if provider.needs_auth else 'β'
|
68 |
|
69 |
+
working = '' if provider.working else ''
|
70 |
+
|
71 |
+
print(f'| {url} | {name} | {has_gpt3_5} | {has_gpt4} | {streaming} | {working} | {needs_auth} |')
|
testing/test.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import g4f
|
2 |
+
|
3 |
+
# Set with provider
|
4 |
+
stream = False
|
5 |
+
response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', provider=g4f.Provider.Yqcloud, messages=[
|
6 |
+
{"role": "user", "content": "hello"}], stream=stream)
|
7 |
+
|
8 |
+
if stream:
|
9 |
+
for message in response:
|
10 |
+
print(message)
|
11 |
+
else:
|
12 |
+
print(response)
|