Tech-Meld commited on
Commit
d69301c
·
verified ·
1 Parent(s): 9b39101

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -3,24 +3,24 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import time
4
  import random
5
 
6
- # Load the model and tokenizer
7
- model_id = "Tech-Meld/Hajax_Chat_1.0"
8
  tokenizer = AutoTokenizer.from_pretrained(model_id)
9
  model = AutoModelForCausalLM.from_pretrained(model_id)
10
 
11
  # --- Functions ---
12
 
13
  def get_response(input_text, temperature, top_p, top_k, max_length):
14
- inputs = tokenizer.encode(input_text + tokenizer.eos_token, return_tensors='pt')
15
  outputs = model.generate(
16
- inputs,
 
17
  max_length=max_length,
18
- pad_token_id=tokenizer.eos_token_id,
19
  temperature=temperature,
20
  top_p=top_p,
21
  top_k=top_k,
22
  )
23
- response = tokenizer.decode(outputs[:, inputs.shape[-1]:][0], skip_special_tokens=True)
24
  return response
25
 
26
  def analyze_text(text):
@@ -82,7 +82,7 @@ iface = gr.Interface(
82
  gr.Label(label="Text Analysis", elem_id="analysis"),
83
  ],
84
  title="Chat with AI",
85
- description="Engage in a conversation with our advanced AI model. Customize the response using various parameters.",
86
  theme="default", # Use a custom theme to override the default Gradio styling
87
  css=css, # Apply the CSS styles defined earlier
88
  layout="vertical",
 
3
  import time
4
  import random
5
 
6
+ # Load the model and tokenizer for GGUF
7
+ model_id = "Tech-Meld/Hajax_Chat_1.0-Q3_K_S-GGUF"
8
  tokenizer = AutoTokenizer.from_pretrained(model_id)
9
  model = AutoModelForCausalLM.from_pretrained(model_id)
10
 
11
  # --- Functions ---
12
 
13
  def get_response(input_text, temperature, top_p, top_k, max_length):
14
+ inputs = tokenizer(input_text, return_tensors="pt")
15
  outputs = model.generate(
16
+ input_ids=inputs["input_ids"],
17
+ attention_mask=inputs["attention_mask"],
18
  max_length=max_length,
 
19
  temperature=temperature,
20
  top_p=top_p,
21
  top_k=top_k,
22
  )
23
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
24
  return response
25
 
26
  def analyze_text(text):
 
82
  gr.Label(label="Text Analysis", elem_id="analysis"),
83
  ],
84
  title="Chat with AI",
85
+ description="Engage in a conversation with our advanced model. Customize the response using various parameters.",
86
  theme="default", # Use a custom theme to override the default Gradio styling
87
  css=css, # Apply the CSS styles defined earlier
88
  layout="vertical",