quoc-khanh commited on
Commit
d6edb5a
·
verified ·
1 Parent(s): fdbc0db

Update file_loader.py

Browse files
Files changed (1) hide show
  1. file_loader.py +7 -2
file_loader.py CHANGED
@@ -19,16 +19,21 @@ def get_vectorstore():
19
  docx_files = list_docx_files(folder_path)
20
 
21
  all_splits = [] # Khởi tạo danh sách lưu kết quả
 
22
  for i, file_path in enumerate(tqdm(docx_files, desc="Đang xử lý", unit="file")):
23
  output_json_path = f"output_{i}.json"
24
  splits = get_splits(file_path, output_json_path)
25
  all_splits += splits
26
-
27
  # Xử lý FAQ
28
  FAQ_path = "syllabus_nct_word_format/FAQ.json"
29
  FAQ_splits = get_json_splits_only(FAQ_path)
30
  all_splits += FAQ_splits
31
-
 
 
 
 
32
  base_urls = ['https://fda.neu.edu.vn/hoi-nghi-khoa-hoc-cong-nghe-dai-hoc-kinh-te-quoc-dan-nam-2025/']
33
  #['https://nct.neu.edu.vn/', 'https://fsf.neu.edu.vn/', 'https://mfe.neu.edu.vn/', 'https://mis.neu.edu.vn/', 'https://fda.neu.edu.vn/', 'https://khoathongke.neu.edu.vn/', 'https://fit.neu.edu.vn/']
34
  website_contents = scrape_website(base_urls='base_urls')
 
19
  docx_files = list_docx_files(folder_path)
20
 
21
  all_splits = [] # Khởi tạo danh sách lưu kết quả
22
+ print('Feeding .docx files')
23
  for i, file_path in enumerate(tqdm(docx_files, desc="Đang xử lý", unit="file")):
24
  output_json_path = f"output_{i}.json"
25
  splits = get_splits(file_path, output_json_path)
26
  all_splits += splits
27
+ print('Feeding .json files')
28
  # Xử lý FAQ
29
  FAQ_path = "syllabus_nct_word_format/FAQ.json"
30
  FAQ_splits = get_json_splits_only(FAQ_path)
31
  all_splits += FAQ_splits
32
+
33
+ FAQ_path = "syllabus_nct_word_format/FAQ2.json"
34
+ FAQ_splits = get_json_splits_only(FAQ_path)
35
+ all_splits += FAQ_splits
36
+ pprint("Feeding relevent websites' contents")
37
  base_urls = ['https://fda.neu.edu.vn/hoi-nghi-khoa-hoc-cong-nghe-dai-hoc-kinh-te-quoc-dan-nam-2025/']
38
  #['https://nct.neu.edu.vn/', 'https://fsf.neu.edu.vn/', 'https://mfe.neu.edu.vn/', 'https://mis.neu.edu.vn/', 'https://fda.neu.edu.vn/', 'https://khoathongke.neu.edu.vn/', 'https://fit.neu.edu.vn/']
39
  website_contents = scrape_website(base_urls='base_urls')