File size: 4,285 Bytes
cca54bd
8c40f77
62ba823
8c40f77
2bc0c9e
a6d9aef
62ee107
47138fa
8c40f77
2bc0c9e
 
 
 
 
 
 
 
 
 
 
c05ec0b
f58f61e
 
 
 
 
c05ec0b
 
a6d9aef
 
2bc0c9e
 
8c40f77
f58f61e
 
 
 
fcb480c
f58f61e
 
 
 
 
 
 
 
 
c05ec0b
 
 
 
dc3e277
a6d9aef
 
cd938ac
a6d9aef
 
 
f58f61e
a6d9aef
f58f61e
f463969
f58f61e
 
 
 
 
 
 
 
 
a6d9aef
 
0f1a596
 
 
326b307
1d404c2
 
f58f61e
6d5b45e
e282ed6
1d404c2
e282ed6
a6d9aef
 
 
 
36a0624
 
a6d9aef
 
 
 
 
 
df0a8db
9013534
36a0624
a6d9aef
 
 
 
 
 
3208ef7
 
 
 
f58f61e
f463969
f58f61e
26587a6
f58f61e
 
3208ef7
 
 
 
 
 
 
f58f61e
60105e5
3208ef7
60105e5
3208ef7
 
 
 
 
 
 
 
a6d9aef
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
#Meow
import openai as closeai
from flask import Flask, request, Response, jsonify
import os
from rich import print
import json
import requests
from time import sleep

settings = {
    'node': {
        'id':os.environ.get('nodeId'),
        'models': os.environ.get('nodeModel')
    },
    'api': {
        'host': os.environ.get('apibase'),
        'key': os.environ.get('apikey')
    },
    'security': {
        'passw':os.environ.get('apipassw')        
    },
    'tg': {
        'token':os.environ.get('tgtoken'),
        'chat':os.environ.get('tgchat'),
        'topic':os.environ.get('tgtopic')
    },
    'web': {
        'port': os.environ.get('webport', 7860),
        'host': os.environ.get('webhost', '0.0.0.0'),
        'debug': os.environ.get('webdebug', False)
    }
}

def send_telegram_request(method, params=None):
    url = f'https://api.telegram.org/bot{settings["tg"]["token"]}/{method}'
    response = requests.post(url, json=params)
    data = response.json()
    print(data)
    return data

def send_message(text):
    return send_telegram_request('sendMessage', {
        'chat_id': settings["tg"]["chat"], 
        'text': text, 
        'message_thread_id': settings["tg"]["topic"]
    })

app = Flask(__name__)

closeai.api_base = settings["api"]["host"]
closeai.api_key = settings["api"]["key"]

@app.route("/")
def index():
    return f'Hi, its a node {settings["node"]["id"]} with {settings["node"]["models"]}.\n\n Its just api proxy for openai if your ip banned by openai or other reverse proxy. This space is not maintained, pls dont use it'

@app.route("/chat/completions", methods=['POST'])
def chat_completions():
    
    streaming = request.json.get('stream', False)

    auth = request.headers.get('Authorization', 'Bearer anonim')
    if auth != f'Bearer {settings["security"]["passw"]}':
        if streaming:
            er = 'Not authorized'
            def errorStream(er):
                yield 'data: %s\n\n' %  json.dumps({"status":"!=200","error":str(er)}, separators=(',' ':'))
            return app.response_class(errorStream(er), mimetype='text/event-stream')
        else:
            return 'Not authorized'
    
    model = request.json.get('model', 'gpt-4')
    messages = request.json.get('messages')
    response = ''

    try:
        response = closeai.ChatCompletion.create(model=model, stream=streaming, messages=messages, allow_fallback=False)
    except Exception as er:
        print(er)
        send_message(str(er))
        if '429' in str(er):sleep(45)
        def errorStream(er):
            yield 'data: %s\n\n' %  json.dumps({"status":"!=200","error":str(er)}, separators=(',' ':'))
        return app.response_class(errorStream(er), mimetype='text/event-stream')
    if not streaming:

        return {
            'model': model,
            'result': response["choices"][0]["message"]["content"],
            **response
        }

    def stream():
        for token in response:
            completion_data = {
                'model': model,
                'token': token,
                'status':200,
                **token
            }

            yield 'data: %s\n\n' % json.dumps(completion_data, separators=(',' ':'))

    return app.response_class(stream(), mimetype='text/event-stream')


@app.route("/v1/chat/completions", methods=['POST'])
def v_chat_completions():
    streaming = request.json.get('stream', False)

    auth = request.headers.get('Authorization', 'Bearer anonim')
    if auth != f'Bearer {settings["security"]["passw"]}':
        return json.dumps({'error':'Not authorized'})
    
    
    model = request.json.get('model', 'gpt-3.5-turbo')
    messages = request.json.get('messages')
    response = ''

    try:
        response = closeai.ChatCompletion.create(model=model, stream=streaming, messages=messages)
    except Exception as er:
        send_message(str(er))
        return json.dumps({"error":str(er)})
    if not streaming:
        return json.dumps(response)

    def stream():
        for token in response:
            completion_data = token
            yield 'data: %s\n\n' % json.dumps(completion_data, separators=(',' ':'))
    return app.response_class(stream(), mimetype='text/event-stream')


if __name__ == '__main__':
    app.run(**settings['web'])