cpv2280 commited on
Commit
3f01ee9
·
verified ·
1 Parent(s): bc83871

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -1
app.py CHANGED
@@ -5,9 +5,13 @@ import gradio as gr
5
  import ftfy
6
  import language_tool_python
7
  import re
 
8
  from sentence_transformers import SentenceTransformer, util
9
  from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
10
 
 
 
 
11
  # Load fine-tuned GPT-2 model
12
  model_path = "cpv2280/gpt2_tinystories_finetuned" # Update if needed
13
  model = AutoModelForCausalLM.from_pretrained(model_path)
@@ -49,7 +53,7 @@ def detect_inconsistencies(text):
49
  def story_pipeline(prompt):
50
  """Generates a story, refines it, and checks inconsistencies."""
51
  # Generate the story
52
- generated = story_generator(prompt, max_length=200, do_sample=True, temperature=1.0, top_p=0.9, top_k=50)
53
  raw_story = generated[0]['generated_text']
54
 
55
  # Refine the generated story
 
5
  import ftfy
6
  import language_tool_python
7
  import re
8
+ import torch
9
  from sentence_transformers import SentenceTransformer, util
10
  from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
11
 
12
+
13
+ device = "cuda" if torch.cuda.is_available() else "cpu" # ✅ Use GPU if available
14
+
15
  # Load fine-tuned GPT-2 model
16
  model_path = "cpv2280/gpt2_tinystories_finetuned" # Update if needed
17
  model = AutoModelForCausalLM.from_pretrained(model_path)
 
53
  def story_pipeline(prompt):
54
  """Generates a story, refines it, and checks inconsistencies."""
55
  # Generate the story
56
+ generated = story_generator(prompt, max_length=200, do_sample=True, temperature=1.0, top_p=0.9, top_k=50, truncation=True)
57
  raw_story = generated[0]['generated_text']
58
 
59
  # Refine the generated story