AC-Angelo93 commited on
Commit
301eb87
·
verified ·
1 Parent(s): 0356a3f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -15
app.py CHANGED
@@ -1,10 +1,9 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
 
10
  def respond(
@@ -25,19 +24,23 @@ def respond(
25
 
26
  messages.append({"role": "user", "content": message})
27
 
28
- response = ""
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
  temperature=temperature,
35
  top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
 
 
41
 
42
 
43
  """
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import torch
4
 
5
+ tokenizer = AutoTokenizer.from_pretrained("Fastweb/FastwebMIIA-7B")
6
+ model = AutoModelForCausalLM.from_pretrained("Fastweb/FastwebMIIA-7B")
 
 
7
 
8
 
9
  def respond(
 
24
 
25
  messages.append({"role": "user", "content": message})
26
 
27
+ # Format messages for the model
28
+ input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
29
+ input_ids = tokenizer(input_text, return_tensors="pt").input_ids
30
 
31
+ # Generate response
32
+ outputs = model.generate(
33
+ input_ids,
34
+ max_new_tokens=max_tokens,
35
  temperature=temperature,
36
  top_p=top_p,
37
+ do_sample=True,
38
+ pad_token_id=tokenizer.eos_token_id
39
+ )
40
+
41
+ # Decode the generated tokens, skipping the input tokens
42
+ response = tokenizer.decode(outputs[0][input_ids.shape[1]:], skip_special_tokens=True)
43
+ yield response
44
 
45
 
46
  """