Spaces:
Runtime error
Runtime error
File size: 2,914 Bytes
61d6ab6 6aa8b91 34d9991 6aa8b91 34d9991 6aa8b91 e422c4d 07ede6f 4f720b2 796c4c9 c7a1f13 796c4c9 c7a1f13 796c4c9 4f720b2 796c4c9 dffb2a2 61d6ab6 e73f4e9 dffb2a2 e73f4e9 61d6ab6 3eeef9b 61d6ab6 0993857 61d6ab6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
import os
'''
os.system("pip uninstall httpx -y")
os.system("pip uninstall pydantic -y")
os.system("pip uninstall gradio -y")
os.system("pip install -U gradio")
'''
os.system("pip install transformers==4.30.2")
'''
import subprocess
out = subprocess.check_output("pip --help")
print(out.decode())
out = subprocess.check_output("pip --version")
print(out.decode())
'''
os.system("pip install huggingface_hub")
from huggingface_hub import space_info
import sys
import re
from flair.models import SequenceTagger
from flair.data import Sentence
flair_ner_model_path = "flair_model"
assert os.path.exists(flair_ner_model_path)
loaded_model: SequenceTagger = SequenceTagger.load(os.path.join(flair_ner_model_path ,"best-model.pt"))
def one_item_process(r, loaded_model):
#assert type(r) == type(pd.Series())
zh = r["question"]
zh = zh.replace(" ", "").strip()
sentence = Sentence(" ".join(list(zh)))
loaded_model.predict(sentence)
sentence_str = str(sentence)
ask_spans = re.findall(r'\["(.+?)"/ASK\]', sentence_str)
sentence = re.findall(r'Sentence: "(.+?)"', sentence_str)
if ask_spans:
ask_spans = ask_spans[0]
else:
ask_spans = ""
if sentence:
sentence = sentence[0]
else:
sentence = ""
ask_spans, sentence = map(lambda x: x.replace(" ", "").strip(), [ask_spans, sentence])
return ask_spans, sentence
import gradio as gr
example_sample = [
"宁波在哪个省份?",
"美国的通货是什么?",
]
def demo_func(question):
assert type(question) == type("")
ask_spans, sentence = one_item_process(
{"question": question},
loaded_model
)
return {
"Question words": ask_spans
}
markdown_exp_size = "##"
lora_repo = "svjack/chatglm3-few-shot"
lora_repo_link = "svjack/chatglm3-few-shot/?input_list_index=2"
emoji_info = space_info(lora_repo).__dict__["cardData"]["emoji"]
space_cnt = 1
task_name = "[---Chinese Question Words extractor---]"
description = f"{markdown_exp_size} {task_name} few shot prompt in ChatGLM3 Few Shot space repo (click submit to activate) : [{lora_repo_link}](https://huggingface.co/spaces/{lora_repo_link}) {emoji_info}"
demo = gr.Interface(
fn=demo_func,
inputs="text",
outputs="json",
title=f"Chinese Question Words extractor 🐱 demonstration",
#description = description,
examples=example_sample if example_sample else None,
cache_examples = False
)
with demo:
gr.HTML(
'''
<div style="justify-content: center; display: flex;">
<iframe
src="https://svjack-chatglm3-few-shot-demo.hf.space/?input_list_index=2"
frameborder="0"
width="1400"
height="768"
></iframe>
</div>
'''
)
demo.launch(server_name=None, server_port=None)
|