masadonline commited on
Commit
d5e32d6
·
verified ·
1 Parent(s): 9b67e66

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -22
app.py CHANGED
@@ -188,28 +188,27 @@ def setup_knowledge_base():
188
  all_text += _format_tables_internal(tables) + "\n"
189
 
190
  # Process CSVs
191
- # Process CSVs
192
- for filename in ["CustomerOrders.csv"]:
193
- csv_path = os.path.join(folder_path, filename)
194
- try:
195
- with open(csv_path, newline='', encoding='utf-8') as csvfile:
196
- reader = csv.DictReader(csvfile)
197
- for row in reader:
198
- line = f"Order ID: {row.get('OrderID')} | Customer Name: {row.get('CustomerName')} | Order Date: {row.get('OrderDate')} | ProductID: {row.get('ProductID')} | Date: {row.get('OrderDate')} | Quantity: {row.get('Quantity')} | UnitPrice(USD): {row.get('UnitPrice(USD)')} | TotalPrice(USD): {row.get('TotalPrice(USD)')} | ShippingAddress: {row.get('ShippingAddress')} | OrderStatus: {row.get('OrderStatus')}"
199
- all_text += line + "\n"
200
- except Exception as e:
201
- print(f"❌ Error reading {filename}: {e}")
202
-
203
- for filename in ["Products.csv"]:
204
- csv_path = os.path.join(folder_path, filename)
205
- try:
206
- with open(csv_path, newline='', encoding='utf-8') as csvfile:
207
- reader = csv.DictReader(csvfile)
208
- for row in reader:
209
- line = f"Product ID: {row.get('ProductID')} | Toy Name: {row.get('ToyName')} | Category: {row.get('Category')} | Price(USD): {row.get('Price(USD)')} | Stock Quantity: {row.get('StockQuantity')} | Description: {row.get('Description')}"
210
- all_text += line + "\n"
211
- except Exception as e:
212
- print(f"❌ Error reading {filename}: {e}")
213
 
214
  # Tokenization & chunking
215
  tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
@@ -222,6 +221,7 @@ for filename in ["Products.csv"]:
222
  return index, model, chunks
223
 
224
 
 
225
  # --- Monitor Conversations ---
226
  def start_conversation_monitor(client, index, embed_model, text_chunks):
227
  processed_convos = set()
 
188
  all_text += _format_tables_internal(tables) + "\n"
189
 
190
  # Process CSVs
191
+ for filename in ["CustomerOrders.csv"]:
192
+ csv_path = os.path.join(folder_path, filename)
193
+ try:
194
+ with open(csv_path, newline='', encoding='utf-8') as csvfile:
195
+ reader = csv.DictReader(csvfile)
196
+ for row in reader:
197
+ line = f"Order ID: {row.get('OrderID')} | Customer Name: {row.get('CustomerName')} | Order Date: {row.get('OrderDate')} | ProductID: {row.get('ProductID')} | Date: {row.get('OrderDate')} | Quantity: {row.get('Quantity')} | UnitPrice(USD): {row.get('UnitPrice(USD)')} | TotalPrice(USD): {row.get('TotalPrice(USD)')} | ShippingAddress: {row.get('ShippingAddress')} | OrderStatus: {row.get('OrderStatus')}"
198
+ all_text += line + "\n"
199
+ except Exception as e:
200
+ print(f"❌ Error reading {filename}: {e}")
201
+
202
+ for filename in ["Products.csv"]:
203
+ csv_path = os.path.join(folder_path, filename)
204
+ try:
205
+ with open(csv_path, newline='', encoding='utf-8') as csvfile:
206
+ reader = csv.DictReader(csvfile)
207
+ for row in reader:
208
+ line = f"Product ID: {row.get('ProductID')} | Toy Name: {row.get('ToyName')} | Category: {row.get('Category')} | Price(USD): {row.get('Price(USD)')} | Stock Quantity: {row.get('StockQuantity')} | Description: {row.get('Description')}"
209
+ all_text += line + "\n"
210
+ except Exception as e:
211
+ print(f"❌ Error reading {filename}: {e}")
 
212
 
213
  # Tokenization & chunking
214
  tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
 
221
  return index, model, chunks
222
 
223
 
224
+
225
  # --- Monitor Conversations ---
226
  def start_conversation_monitor(client, index, embed_model, text_chunks):
227
  processed_convos = set()