random2222 commited on
Commit
f8c1ecf
·
verified ·
1 Parent(s): 5f31e7c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +93 -79
app.py CHANGED
@@ -1,5 +1,7 @@
 
1
  import gradio as gr
2
  import os
 
3
  from langchain_community.document_loaders import PyPDFLoader
4
  from langchain.text_splitter import RecursiveCharacterTextSplitter
5
  from langchain_community.embeddings import HuggingFaceEmbeddings
@@ -12,97 +14,109 @@ EMBEDDING_MODEL = "sentence-transformers/all-MiniLM-L6-v2"
12
  MODEL_NAME = "microsoft/phi-2"
13
 
14
  def initialize_system():
15
- # Verify documents
16
- if not os.path.exists(DOCS_DIR):
17
- raise FileNotFoundError(f"Missing {DOCS_DIR} folder")
 
 
 
 
 
 
 
18
 
19
- pdf_files = [os.path.join(DOCS_DIR, f) for f in os.listdir(DOCS_DIR)
20
- if f.endswith(".pdf")]
21
- if not pdf_files:
22
- raise ValueError(f"No PDFs found in {DOCS_DIR}")
23
-
24
- # Process documents
25
- text_splitter = RecursiveCharacterTextSplitter(
26
- chunk_size=800, # Reduced for Phi-2's context window
27
- chunk_overlap=100
28
- )
29
-
30
- texts = []
31
- for pdf in pdf_files:
32
- loader = PyPDFLoader(pdf)
33
- pages = loader.load_and_split(text_splitter)
34
- texts.extend(pages)
35
-
36
- # Create embeddings
37
- embeddings = HuggingFaceEmbeddings(
38
- model_name=EMBEDDING_MODEL,
39
- model_kwargs={'device': 'cpu'}, # Force CPU for compatibility
40
- encode_kwargs={'normalize_embeddings': False}
41
- )
42
-
43
- # Create vector store
44
- vector_store = FAISS.from_documents(texts, embeddings)
45
-
46
- # Load Phi-2 with 4-bit quantization
47
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
48
- model = AutoModelForCausalLM.from_pretrained(
49
- MODEL_NAME,
50
- trust_remote_code=True,
51
- device_map="auto",
52
- load_in_4bit=True,
53
- torch_dtype=torch.float16
54
- )
55
-
56
- return vector_store, model, tokenizer
 
 
 
57
 
58
  try:
59
  vector_store, model, tokenizer = initialize_system()
60
- print("System initialized successfully")
61
  except Exception as e:
62
- raise RuntimeError(f"Initialization error: {str(e)}")
 
63
 
64
  def generate_response(query):
65
- # Retrieve context
66
- docs = vector_store.similarity_search(query, k=2) # Fewer docs for Phi-2
67
- context = "\n".join([d.page_content for d in docs])
68
-
69
- # Phi-2 specific prompt format
70
- prompt = f"""Question: {query}
71
- Context: {context}
72
- Instructions:
73
- - Answer only using the context
74
- - Keep responses under 3 sentences
75
- - If unsure, say "I'll need to check with the team"
76
-
77
- Answer:"""
78
-
79
- inputs = tokenizer(prompt, return_tensors="pt", return_attention_mask=False).to(model.device)
80
- outputs = model.generate(
81
- **inputs,
82
- max_new_tokens=200,
83
- temperature=0.1,
84
- do_sample=True,
85
- pad_token_id=tokenizer.eos_token_id
86
- )
87
-
88
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
89
- return response.split("Answer:")[-1].strip()
 
 
 
 
90
 
91
- # Simplified Gradio interface
92
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
93
- gr.Markdown("# Customer Service Chatbot")
94
- chatbot = gr.Chatbot()
95
- msg = gr.Textbox(label="Your question")
96
- clear = gr.ClearButton([msg, chatbot])
97
 
98
  def respond(message, history):
99
- try:
100
- response = generate_response(message)
101
- return response
102
- except Exception as e:
103
- return "I'm having trouble answering that right now. Please try again later."
104
 
105
  msg.submit(respond, [msg, chatbot], chatbot)
106
- msg.submit(lambda: "", None, msg)
107
 
108
  demo.launch(server_port=7860)
 
1
+ # Updated app.py with torch import and error handling
2
  import gradio as gr
3
  import os
4
+ import torch # Missing import added here
5
  from langchain_community.document_loaders import PyPDFLoader
6
  from langchain.text_splitter import RecursiveCharacterTextSplitter
7
  from langchain_community.embeddings import HuggingFaceEmbeddings
 
14
  MODEL_NAME = "microsoft/phi-2"
15
 
16
  def initialize_system():
17
+ try:
18
+ # Verify documents
19
+ if not os.path.exists(DOCS_DIR):
20
+ raise FileNotFoundError(f"Missing {DOCS_DIR} folder")
21
+
22
+ pdf_files = [os.path.join(DOCS_DIR, f)
23
+ for f in os.listdir(DOCS_DIR)
24
+ if f.endswith(".pdf")]
25
+ if not pdf_files:
26
+ raise ValueError(f"No PDFs found in {DOCS_DIR}")
27
 
28
+ # Process documents
29
+ text_splitter = RecursiveCharacterTextSplitter(
30
+ chunk_size=800,
31
+ chunk_overlap=100
32
+ )
33
+
34
+ texts = []
35
+ for pdf in pdf_files:
36
+ loader = PyPDFLoader(pdf)
37
+ pages = loader.load_and_split(text_splitter)
38
+ texts.extend(pages)
39
+
40
+ # Create embeddings
41
+ embeddings = HuggingFaceEmbeddings(
42
+ model_name=EMBEDDING_MODEL,
43
+ model_kwargs={'device': 'cpu'},
44
+ encode_kwargs={'normalize_embeddings': False}
45
+ )
46
+
47
+ # Create vector store
48
+ vector_store = FAISS.from_documents(texts, embeddings)
49
+
50
+ # Load Phi-2 model
51
+ tokenizer = AutoTokenizer.from_pretrained(
52
+ MODEL_NAME,
53
+ trust_remote_code=True,
54
+ padding_side="left"
55
+ )
56
+
57
+ model = AutoModelForCausalLM.from_pretrained(
58
+ MODEL_NAME,
59
+ trust_remote_code=True,
60
+ device_map="auto",
61
+ load_in_4bit=True,
62
+ torch_dtype=torch.float16
63
+ )
64
+
65
+ return vector_store, model, tokenizer
66
+
67
+ except Exception as e:
68
+ raise RuntimeError(f"Initialization failed: {str(e)}")
69
 
70
  try:
71
  vector_store, model, tokenizer = initialize_system()
72
+ print("System initialized successfully")
73
  except Exception as e:
74
+ print(f"Initialization error: {str(e)}")
75
+ raise
76
 
77
  def generate_response(query):
78
+ try:
79
+ # Retrieve context
80
+ docs = vector_store.similarity_search(query, k=2)
81
+ context = "\n".join([d.page_content for d in docs])
82
+
83
+ # Phi-2 optimized prompt
84
+ prompt = f"""<|system|>
85
+ You are a customer service assistant. Answer ONLY using the context below.
86
+ Keep responses under 3 sentences. If unsure, say "I'll check with the team".
87
+
88
+ Context: {context}</s>
89
+ <|user|>
90
+ {query}</s>
91
+ <|assistant|>"""
92
+
93
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
94
+ outputs = model.generate(
95
+ **inputs,
96
+ max_new_tokens=200,
97
+ temperature=0.1,
98
+ do_sample=True,
99
+ pad_token_id=tokenizer.eos_token_id
100
+ )
101
+
102
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
103
+ return response.split("<|assistant|>")[-1].strip()
104
+
105
+ except Exception as e:
106
+ return "I'm having trouble answering that. Please try again later."
107
 
108
+ # Gradio interface
109
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
110
+ gr.Markdown("# Customer Support Chatbot")
111
+ chatbot = gr.Chatbot(height=400)
112
+ msg = gr.Textbox(label="Your question", placeholder="Type here...")
113
+ clear = gr.Button("Clear History")
114
 
115
  def respond(message, history):
116
+ response = generate_response(message)
117
+ return response
 
 
 
118
 
119
  msg.submit(respond, [msg, chatbot], chatbot)
120
+ clear.click(lambda: None, None, chatbot, queue=False)
121
 
122
  demo.launch(server_port=7860)