kuro223 commited on
Commit
8eaf73d
·
1 Parent(s): b9c3605
g4f/Provider/Providers/AItianhu.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, requests
2
+ from ...typing import sha256, Dict, get_type_hints
3
+ import json
4
+
5
+ url = "https://www.aitianhu.com/api/chat-process"
6
+ model = ['gpt-3.5-turbo']
7
+ supports_stream = False
8
+ needs_auth = False
9
+
10
+ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
11
+ base = ''
12
+ for message in messages:
13
+ base += '%s: %s\n' % (message['role'], message['content'])
14
+ base += 'assistant:'
15
+
16
+ headers = {
17
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
18
+ }
19
+ data = {
20
+ "prompt": base,
21
+ "options": {},
22
+ "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
23
+ "temperature": 0.8,
24
+ "top_p": 1
25
+ }
26
+ response = requests.post(url, headers=headers, json=data)
27
+ if response.status_code == 200:
28
+ lines = response.text.strip().split('\n')
29
+ res = json.loads(lines[-1])
30
+ yield res['text']
31
+ else:
32
+ print(f"Error Occurred::{response.status_code}")
33
+ return None
34
+
35
+ params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
36
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/Providers/Acytoo.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, requests
2
+ from ...typing import sha256, Dict, get_type_hints
3
+ import json
4
+
5
+ url = "https://chat.acytoo.com/api/completions"
6
+ model = ['gpt-3.5-turbo']
7
+ supports_stream = False
8
+ needs_auth = False
9
+
10
+ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
11
+ base = ''
12
+ for message in messages:
13
+ base += '%s: %s\n' % (message['role'], message['content'])
14
+ base += 'assistant:'
15
+
16
+ headers = {
17
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
18
+ }
19
+ data = {
20
+ "key": "",
21
+ "model": "gpt-3.5-turbo",
22
+ "messages": [
23
+ {
24
+ "role": "user",
25
+ "content": base,
26
+ "createdAt": 1688518523500
27
+ }
28
+ ],
29
+ "temperature": 1,
30
+ "password": ""
31
+ }
32
+
33
+ response = requests.post(url, headers=headers, data=json.dumps(data))
34
+ if response.status_code == 200:
35
+ yield response.text
36
+ else:
37
+ print(f"Error Occurred::{response.status_code}")
38
+ return None
39
+
40
+ params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
41
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/Providers/DFEHub.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, requests
2
+ from ...typing import sha256, Dict, get_type_hints
3
+ import json
4
+
5
+ url = "https://chat.dfehub.com/api/chat"
6
+ model = ['gpt-3.5-turbo']
7
+ supports_stream = False
8
+ needs_auth = False
9
+
10
+ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
11
+ base = ''
12
+ for message in messages:
13
+ base += '%s: %s\n' % (message['role'], message['content'])
14
+ base += 'assistant:'
15
+
16
+ headers = {
17
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
18
+ }
19
+ data = {
20
+ "model": {
21
+ "id": "gpt-3.5-turbo",
22
+ "name": "GPT-3.5",
23
+ "maxLength": 12000,
24
+ "tokenLimit": 4000
25
+ },
26
+ "messages": [
27
+ {
28
+ "role": "user",
29
+ "content": base
30
+ }
31
+ ],
32
+ "key": "",
33
+ "prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
34
+ "temperature": 1
35
+ }
36
+ response = requests.post(url, headers=headers, data=json.dumps(data))
37
+ if response.status_code == 200:
38
+ yield response.text
39
+ else:
40
+ print(f"Error Occurred::{response.status_code}")
41
+ return None
42
+
43
+ params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
44
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/Providers/EasyChat.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, requests
2
+ from ...typing import sha256, Dict, get_type_hints
3
+ import json
4
+
5
+ url = "https://free.easychat.work/api/openai/v1/chat/completions"
6
+ model = ['gpt-3.5-turbo']
7
+ supports_stream = False
8
+ needs_auth = False
9
+
10
+ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
11
+ ''' limited to 240 messages/hour'''
12
+ base = ''
13
+ for message in messages:
14
+ base += '%s: %s\n' % (message['role'], message['content'])
15
+ base += 'assistant:'
16
+
17
+ headers = {
18
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
19
+ }
20
+
21
+ data = {
22
+ "messages": [
23
+ {"role": "system", "content": "You are ChatGPT, a large language model trained by OpenAI."},
24
+ {"role": "user", "content": base}
25
+ ],
26
+ "stream": False,
27
+ "model": "gpt-3.5-turbo",
28
+ "temperature": 0.5,
29
+ "presence_penalty": 0,
30
+ "frequency_penalty": 0,
31
+ "top_p": 1
32
+ }
33
+
34
+ response = requests.post(url, headers=headers, json=data)
35
+ if response.status_code == 200:
36
+ response = response.json()
37
+ yield response['choices'][0]['message']['content']
38
+ else:
39
+ print(f"Error Occurred::{response.status_code}")
40
+ return None
41
+
42
+ params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
43
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/Providers/Lockchat.py CHANGED
@@ -2,7 +2,7 @@ import requests
2
  import os
3
  import json
4
  from ...typing import sha256, Dict, get_type_hints
5
- url = 'http://super.lockchat.app'
6
  model = ['gpt-4', 'gpt-3.5-turbo']
7
  supports_stream = True
8
  needs_auth = False
@@ -18,7 +18,7 @@ def _create_completion(model: str, messages: list, stream: bool, temperature: fl
18
  headers = {
19
  "user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
20
  }
21
- response = requests.post("http://super.lockchat.app/v1/chat/completions?auth=FnMNPlwZEnGFqvEc9470Vw==",
22
  json=payload, headers=headers, stream=True)
23
  for token in response.iter_lines():
24
  if b'The model: `gpt-4` does not exist' in token:
 
2
  import os
3
  import json
4
  from ...typing import sha256, Dict, get_type_hints
5
+ url = 'http://supertest.lockchat.app'
6
  model = ['gpt-4', 'gpt-3.5-turbo']
7
  supports_stream = True
8
  needs_auth = False
 
18
  headers = {
19
  "user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
20
  }
21
+ response = requests.post("http://supertest.lockchat.app/v1/chat/completions",
22
  json=payload, headers=headers, stream=True)
23
  for token in response.iter_lines():
24
  if b'The model: `gpt-4` does not exist' in token:
g4f/Provider/__init__.py CHANGED
@@ -14,7 +14,11 @@ from .Providers import (
14
  H2o,
15
  ChatgptLogin,
16
  DeepAi,
17
- GetGpt
 
 
 
 
18
  )
19
 
20
  Palm = Bard
 
14
  H2o,
15
  ChatgptLogin,
16
  DeepAi,
17
+ GetGpt,
18
+ AItianhu,
19
+ EasyChat,
20
+ Acytoo,
21
+ DFEHub,
22
  )
23
 
24
  Palm = Bard