hsuwill000 commited on
Commit
49bafa9
·
verified ·
1 Parent(s): 32497e2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -2
app.py CHANGED
@@ -51,7 +51,9 @@ config.top_p = 0.9
51
  config.top_k = 30
52
 
53
  pipe = ov_genai.LLMPipeline(model_path, "CPU")
54
- pipe.get_tokenizer().set_chat_template(pipe.get_tokenizer().chat_template)
 
 
55
 
56
  # 讀取剛剛存的 txt 檔案
57
  documents = SimpleDirectoryReader("./data").load_data()
@@ -98,7 +100,12 @@ def generate_stream(prompt):
98
 
99
  def worker():
100
  nonlocal tps_result
101
- gen_result = pipe.generate([final_prompt], streamer=streamer, config=config)
 
 
 
 
 
102
  tps = gen_result.perf_metrics.get_throughput().mean
103
  tps_result = f"{tps:.2f} tokens/s"
104
  q.put(None) # 結束符號
 
51
  config.top_k = 30
52
 
53
  pipe = ov_genai.LLMPipeline(model_path, "CPU")
54
+ tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
55
+
56
+ #pipe.get_tokenizer().set_chat_template(pipe.get_tokenizer().chat_template)
57
 
58
  # 讀取剛剛存的 txt 檔案
59
  documents = SimpleDirectoryReader("./data").load_data()
 
100
 
101
  def worker():
102
  nonlocal tps_result
103
+ text = tokenizer.apply_chat_template(
104
+ final_prompt,
105
+ tokenize=False,
106
+ add_generation_prompt=True
107
+ )
108
+ gen_result = pipe.generate([text], streamer=streamer, config=config)
109
  tps = gen_result.perf_metrics.get_throughput().mean
110
  tps_result = f"{tps:.2f} tokens/s"
111
  q.put(None) # 結束符號