create
Browse files- .gitattributes +3 -0
- README.md +14 -14
- app.py +130 -0
- cn_example.jpg +3 -0
- en_example.jpg +3 -0
- jp_example.jpg +0 -0
- requirements.txt +5 -0
- simfang.ttf +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
cn_example.jpg filter=lfs diff=lfs merge=lfs -text
|
37 |
+
en_example.jpg filter=lfs diff=lfs merge=lfs -text
|
38 |
+
simfang.ttf filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -1,14 +1,14 @@
|
|
1 |
-
---
|
2 |
-
title: PP-OCRv5 Online Demo
|
3 |
-
emoji: π
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 5.30.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
short_description: Universal-Scene Text Recognition Model with High-Accuracy
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
+
---
|
2 |
+
title: PP-OCRv5 Online Demo
|
3 |
+
emoji: π
|
4 |
+
colorFrom: purple
|
5 |
+
colorTo: green
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 5.30.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: apache-2.0
|
11 |
+
short_description: Universal-Scene Text Recognition Model with High-Accuracy
|
12 |
+
---
|
13 |
+
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import atexit
|
2 |
+
import functools
|
3 |
+
from queue import Queue
|
4 |
+
from threading import Event, Thread
|
5 |
+
|
6 |
+
from paddleocr import PaddleOCR, draw_ocr
|
7 |
+
from PIL import Image
|
8 |
+
import gradio as gr
|
9 |
+
|
10 |
+
|
11 |
+
LANG_CONFIG = {
|
12 |
+
"ch": {"num_workers": 2},
|
13 |
+
"en": {"num_workers": 2},
|
14 |
+
"fr": {"num_workers": 1},
|
15 |
+
"german": {"num_workers": 1},
|
16 |
+
"korean": {"num_workers": 1},
|
17 |
+
"japan": {"num_workers": 1},
|
18 |
+
}
|
19 |
+
CONCURRENCY_LIMIT = 8
|
20 |
+
|
21 |
+
|
22 |
+
class PaddleOCRModelManager(object):
|
23 |
+
def __init__(self,
|
24 |
+
num_workers,
|
25 |
+
model_factory):
|
26 |
+
super().__init__()
|
27 |
+
self._model_factory = model_factory
|
28 |
+
self._queue = Queue()
|
29 |
+
self._workers = []
|
30 |
+
self._model_initialized_event = Event()
|
31 |
+
for _ in range(num_workers):
|
32 |
+
worker = Thread(target=self._worker, daemon=False)
|
33 |
+
worker.start()
|
34 |
+
self._model_initialized_event.wait()
|
35 |
+
self._model_initialized_event.clear()
|
36 |
+
self._workers.append(worker)
|
37 |
+
|
38 |
+
def infer(self, *args, **kwargs):
|
39 |
+
# XXX: Should I use a more lightweight data structure, say, a future?
|
40 |
+
result_queue = Queue(maxsize=1)
|
41 |
+
self._queue.put((args, kwargs, result_queue))
|
42 |
+
success, payload = result_queue.get()
|
43 |
+
if success:
|
44 |
+
return payload
|
45 |
+
else:
|
46 |
+
raise payload
|
47 |
+
|
48 |
+
def close(self):
|
49 |
+
for _ in self._workers:
|
50 |
+
self._queue.put(None)
|
51 |
+
for worker in self._workers:
|
52 |
+
worker.join()
|
53 |
+
|
54 |
+
def _worker(self):
|
55 |
+
model = self._model_factory()
|
56 |
+
self._model_initialized_event.set()
|
57 |
+
while True:
|
58 |
+
item = self._queue.get()
|
59 |
+
if item is None:
|
60 |
+
break
|
61 |
+
args, kwargs, result_queue = item
|
62 |
+
try:
|
63 |
+
result = model.ocr(*args, **kwargs)
|
64 |
+
result_queue.put((True, result))
|
65 |
+
except Exception as e:
|
66 |
+
result_queue.put((False, e))
|
67 |
+
finally:
|
68 |
+
self._queue.task_done()
|
69 |
+
|
70 |
+
|
71 |
+
def create_model(lang):
|
72 |
+
return PaddleOCR(lang=lang, use_angle_cls=True, use_gpu=False)
|
73 |
+
|
74 |
+
|
75 |
+
model_managers = {}
|
76 |
+
for lang, config in LANG_CONFIG.items():
|
77 |
+
model_manager = PaddleOCRModelManager(config["num_workers"], functools.partial(create_model, lang=lang))
|
78 |
+
model_managers[lang] = model_manager
|
79 |
+
|
80 |
+
|
81 |
+
def close_model_managers():
|
82 |
+
for manager in model_managers.values():
|
83 |
+
manager.close()
|
84 |
+
|
85 |
+
|
86 |
+
# XXX: Not sure if gradio allows adding custom teardown logic
|
87 |
+
atexit.register(close_model_managers)
|
88 |
+
|
89 |
+
|
90 |
+
def inference(img, lang):
|
91 |
+
ocr = model_managers[lang]
|
92 |
+
result = ocr.infer(img, cls=True)[0]
|
93 |
+
img_path = img
|
94 |
+
image = Image.open(img_path).convert("RGB")
|
95 |
+
boxes = [line[0] for line in result]
|
96 |
+
txts = [line[1][0] for line in result]
|
97 |
+
scores = [line[1][1] for line in result]
|
98 |
+
im_show = draw_ocr(image, boxes, txts, scores,
|
99 |
+
font_path="./simfang.ttf")
|
100 |
+
return im_show
|
101 |
+
|
102 |
+
|
103 |
+
title = 'PaddleOCR'
|
104 |
+
description = '''
|
105 |
+
- Gradio demo for PaddleOCR. PaddleOCR demo supports Chinese, English, French, German, Korean and Japanese.
|
106 |
+
- To use it, simply upload your image and choose a language from the dropdown menu, or click one of the examples to load them. Read more at the links below.
|
107 |
+
- [Docs](https://paddlepaddle.github.io/PaddleOCR/), [Github Repository](https://github.com/PaddlePaddle/PaddleOCR).
|
108 |
+
'''
|
109 |
+
|
110 |
+
examples = [
|
111 |
+
['en_example.jpg','en'],
|
112 |
+
['cn_example.jpg','ch'],
|
113 |
+
['jp_example.jpg','japan'],
|
114 |
+
]
|
115 |
+
|
116 |
+
css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"
|
117 |
+
gr.Interface(
|
118 |
+
inference,
|
119 |
+
[
|
120 |
+
gr.Image(type='filepath', label='Input'),
|
121 |
+
gr.Dropdown(choices=list(LANG_CONFIG.keys()), value='en', label='language')
|
122 |
+
],
|
123 |
+
gr.Image(type='pil', label='Output'),
|
124 |
+
title=title,
|
125 |
+
description=description,
|
126 |
+
examples=examples,
|
127 |
+
cache_examples=False,
|
128 |
+
css=css,
|
129 |
+
concurrency_limit=CONCURRENCY_LIMIT,
|
130 |
+
).launch(debug=False)
|
cn_example.jpg
ADDED
![]() |
Git LFS Details
|
en_example.jpg
ADDED
![]() |
Git LFS Details
|
jp_example.jpg
ADDED
![]() |
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Pillow
|
2 |
+
Gradio
|
3 |
+
requests
|
4 |
+
paddlepaddle
|
5 |
+
paddleocr
|
simfang.ttf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:521c6f7546b4eb64fa4b0cd604bbd36333a20a57e388c8e2ad2ad07b9e593864
|
3 |
+
size 10576012
|