File size: 1,375 Bytes
e5c992e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import time

import requests
import uvicorn
from fastapi import BackgroundTasks, FastAPI
import model
from ocr import ocr, get_ocr_model, init_chatgpt

app = FastAPI()


def write_scan_model_result(req: model.TxtImageScanRequest):
    ret = model.ConversationCallback(id=req.id, conversation=[], extractTags=[])
    start_time = time.time()
    if 'Scan' in req.tasks:
        # 'fr', 'es', 'de', 'ru', 'ja', 'ko'
        reader = get_ocr_model(req.languages)
        result, extract_tags = ocr(reader, chatgpt, req.fileUrl)
        for conv in result:
            ret.conversation.append(model.Conversation(question=conv['question'], answer=conv['answer']))
        for item in extract_tags:
            ret.extractTags.append(model.Tag(tag=item['tag'], confidence=item['confidence']))
        end_time = time.time()
        # cost time
        print("cost time: ", end_time - start_time)
    print(ret)
    try:
        requests.post(req.callbackUrl, json=ret.json())
    except Exception as ex:
        print(ex)


@app.post("/img-txt")
async def model_scan_handler(req: model.TxtImageScanRequest, background_tasks: BackgroundTasks):
    background_tasks.add_task(write_scan_model_result, req)
    return model.TxtImageScanResponse(ok=True, error="")


global chatgpt
if __name__ == "__main__":
    chatgpt = init_chatgpt()
    uvicorn.run(app, host="0.0.0.0", port=7860)