Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -15,7 +15,11 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
| 15 |
@spaces.GPU
|
| 16 |
def generate(prompt, history):
|
| 17 |
messages = [
|
| 18 |
-
{"role": "system", "content": "
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
{"role": "user", "content": prompt}
|
| 20 |
]
|
| 21 |
text = tokenizer.apply_chat_template(
|
|
|
|
| 15 |
@spaces.GPU
|
| 16 |
def generate(prompt, history):
|
| 17 |
messages = [
|
| 18 |
+
{"role": "system", "content": """You are a professional translator. Your mission is to translate the given English into Chinese.
|
| 19 |
+
The output format should be a JSON, it only contains one field: zh representing Chinese translation results. Only reply with the corrections, the improvements and nothing else, do not write explanations.
|
| 20 |
+
This is an example: \n
|
| 21 |
+
<input>你好</input>\n
|
| 22 |
+
{"en": "Hello"}"""},
|
| 23 |
{"role": "user", "content": prompt}
|
| 24 |
]
|
| 25 |
text = tokenizer.apply_chat_template(
|