azmisahin commited on
Commit
2d16a47
·
verified ·
1 Parent(s): 07fa61c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -4
app.py CHANGED
@@ -15,7 +15,7 @@ os.environ['TRANSFORMERS_CACHE'] = '/app/cache'
15
  os.makedirs('/app/cache', exist_ok=True)
16
 
17
  # Model konfigürasyonu
18
- MODEL_NAME = "ITU-TATL/gpt2-small-tr"
19
 
20
  @lru_cache(maxsize=1)
21
  def load_model():
@@ -30,7 +30,7 @@ def load_model():
30
  # CPU için float32 kullan
31
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
32
  model = model.to('cpu').float() # Float32 formatına dönüştür
33
- torch.set_num_threads(2)
34
  logger.info("Model başarıyla yüklendi")
35
  return model, tokenizer
36
  except Exception as e:
@@ -66,11 +66,14 @@ def generate():
66
  outputs = model.generate(
67
  inputs.input_ids,
68
  attention_mask=inputs.attention_mask,
69
- max_length=100,
70
  do_sample=True,
71
  top_k=40,
72
  temperature=0.7,
73
- pad_token_id=tokenizer.pad_token_id
 
 
 
74
  )
75
 
76
  result = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
15
  os.makedirs('/app/cache', exist_ok=True)
16
 
17
  # Model konfigürasyonu
18
+ MODEL_NAME = "redrussianarmy/gpt2-turkish-cased"
19
 
20
  @lru_cache(maxsize=1)
21
  def load_model():
 
30
  # CPU için float32 kullan
31
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
32
  model = model.to('cpu').float() # Float32 formatına dönüştür
33
+ torch.set_num_threads(1)
34
  logger.info("Model başarıyla yüklendi")
35
  return model, tokenizer
36
  except Exception as e:
 
66
  outputs = model.generate(
67
  inputs.input_ids,
68
  attention_mask=inputs.attention_mask,
69
+ max_length=80,
70
  do_sample=True,
71
  top_k=40,
72
  temperature=0.7,
73
+ pad_token_id=tokenizer.pad_token_id,
74
+ num_return_sequences=1,
75
+ early_stopping=True,
76
+ use_cache=True
77
  )
78
 
79
  result = tokenizer.decode(outputs[0], skip_special_tokens=True)