Dhahlan2000 commited on
Commit
e88305b
·
verified ·
1 Parent(s): 306ea0c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -48,7 +48,7 @@ def transliterate_to_sinhala(text):
48
  # conv_model_name = "microsoft/Phi-3-mini-4k-instruct" # Use GPT-2 instead of the gated model
49
  # tokenizer = AutoTokenizer.from_pretrained(conv_model_name, trust_remote_code=True)
50
  # model = AutoModelForCausalLM.from_pretrained(conv_model_name, trust_remote_code=True).to(device)
51
- pipe1 = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto")
52
 
53
  # client = InferenceClient("google/gemma-2b-it")
54
 
@@ -72,7 +72,7 @@ def conversation_predict(text):
72
  # outputs = model.generate(**input_ids)
73
  # return tokenizer.decode(outputs[0])
74
 
75
- outputs = pipe1(text, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
76
  return outputs[0]["generated_text"]
77
 
78
  def ai_predicted(user_input):
 
48
  # conv_model_name = "microsoft/Phi-3-mini-4k-instruct" # Use GPT-2 instead of the gated model
49
  # tokenizer = AutoTokenizer.from_pretrained(conv_model_name, trust_remote_code=True)
50
  # model = AutoModelForCausalLM.from_pretrained(conv_model_name, trust_remote_code=True).to(device)
51
+ pipe1 = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0").to(device)
52
 
53
  # client = InferenceClient("google/gemma-2b-it")
54
 
 
72
  # outputs = model.generate(**input_ids)
73
  # return tokenizer.decode(outputs[0])
74
 
75
+ outputs = pipe1(text, max_new_tokens=256, temperature=0.7, top_k=50, top_p=0.95)
76
  return outputs[0]["generated_text"]
77
 
78
  def ai_predicted(user_input):