ciyidogan commited on
Commit
d51553a
·
verified ·
1 Parent(s): 75915c2

Update llm_model.py

Browse files
Files changed (1) hide show
  1. llm_model.py +9 -9
llm_model.py CHANGED
@@ -12,30 +12,30 @@ eos_token_id = None
12
  class Message(BaseModel):
13
  user_input: str
14
 
15
- def setup_model(service_config):
16
  try:
17
  log("🧠 setup_model() başladı")
18
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
19
  log(f"📡 Kullanılan cihaz: {device}")
20
- tokenizer = AutoTokenizer.from_pretrained(service_config.MODEL_BASE, use_fast=False)
21
  log("📦 Tokenizer yüklendi. Ana model indiriliyor...")
22
- model = AutoModelForCausalLM.from_pretrained(service_config.MODEL_BASE, torch_dtype=torch.float32).to(device)
23
  log("📦 Ana model indirildi ve yüklendi. eval() çağırılıyor...")
24
  tokenizer.pad_token = tokenizer.pad_token or tokenizer.eos_token
25
  model.config.pad_token_id = tokenizer.pad_token_id
26
  eos_token_id = tokenizer("<|im_end|>", add_special_tokens=False)["input_ids"][0]
27
  model.eval()
28
  log("✅ Ana model eval() çağrıldı")
29
- log(f"📦 Intent modeli indiriliyor: {service_config.INTENT_MODEL_ID}")
30
- _ = AutoTokenizer.from_pretrained(service_config.INTENT_MODEL_ID)
31
- _ = AutoModelForSequenceClassification.from_pretrained(service_config.INTENT_MODEL_ID)
32
  log("✅ Intent modeli önbelleğe alındı.")
33
  log("✔️ Model başarıyla yüklendi ve sohbet için hazır.")
34
  except Exception as e:
35
  log(f"❌ setup_model() hatası: {e}")
36
  traceback.print_exc()
37
 
38
- async def generate_response(text, service_config):
39
  messages = [{"role": "user", "content": text}]
40
  encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
41
  eos_token = tokenizer("<|im_end|>", add_special_tokens=False)["input_ids"][0]
@@ -47,14 +47,14 @@ async def generate_response(text, service_config):
47
  input_ids=input_ids,
48
  attention_mask=attention_mask,
49
  max_new_tokens=128,
50
- do_sample=service_config.USE_SAMPLING,
51
  eos_token_id=eos_token,
52
  pad_token_id=tokenizer.pad_token_id,
53
  return_dict_in_generate=True,
54
  output_scores=True
55
  )
56
 
57
- if not service_config.USE_SAMPLING:
58
  scores = torch.stack(output.scores, dim=1)
59
  probs = torch.nn.functional.softmax(scores[0], dim=-1)
60
  top_conf = probs.max().item()
 
12
  class Message(BaseModel):
13
  user_input: str
14
 
15
+ def setup_model(s_config):
16
  try:
17
  log("🧠 setup_model() başladı")
18
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
19
  log(f"📡 Kullanılan cihaz: {device}")
20
+ tokenizer = AutoTokenizer.from_pretrained(s_config.MODEL_BASE, use_fast=False)
21
  log("📦 Tokenizer yüklendi. Ana model indiriliyor...")
22
+ model = AutoModelForCausalLM.from_pretrained(s_config.MODEL_BASE, torch_dtype=torch.float32).to(device)
23
  log("📦 Ana model indirildi ve yüklendi. eval() çağırılıyor...")
24
  tokenizer.pad_token = tokenizer.pad_token or tokenizer.eos_token
25
  model.config.pad_token_id = tokenizer.pad_token_id
26
  eos_token_id = tokenizer("<|im_end|>", add_special_tokens=False)["input_ids"][0]
27
  model.eval()
28
  log("✅ Ana model eval() çağrıldı")
29
+ log(f"📦 Intent modeli indiriliyor: {s_config.INTENT_MODEL_ID}")
30
+ _ = AutoTokenizer.from_pretrained(s_config.INTENT_MODEL_ID)
31
+ _ = AutoModelForSequenceClassification.from_pretrained(s_config.INTENT_MODEL_ID)
32
  log("✅ Intent modeli önbelleğe alındı.")
33
  log("✔️ Model başarıyla yüklendi ve sohbet için hazır.")
34
  except Exception as e:
35
  log(f"❌ setup_model() hatası: {e}")
36
  traceback.print_exc()
37
 
38
+ async def generate_response(text, app_config):
39
  messages = [{"role": "user", "content": text}]
40
  encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
41
  eos_token = tokenizer("<|im_end|>", add_special_tokens=False)["input_ids"][0]
 
47
  input_ids=input_ids,
48
  attention_mask=attention_mask,
49
  max_new_tokens=128,
50
+ do_sample=app_config.USE_SAMPLING,
51
  eos_token_id=eos_token,
52
  pad_token_id=tokenizer.pad_token_id,
53
  return_dict_in_generate=True,
54
  output_scores=True
55
  )
56
 
57
+ if not app_config.USE_SAMPLING:
58
  scores = torch.stack(output.scores, dim=1)
59
  probs = torch.nn.functional.softmax(scores[0], dim=-1)
60
  top_conf = probs.max().item()