Docfile commited on
Commit
cc2e992
Β·
1 Parent(s): aa526c9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -12
app.py CHANGED
@@ -21,10 +21,10 @@ from huggingface_hub import hf_hub_download
21
  model_name_or_path = "hlhr202/llama-7B-ggml-int4"
22
  model_basename = "ggml-model-q4_0.bin" # the model is in bin format
23
 
24
- model_path = hf_hub_download(repo_id=model_name_or_path, filename=model_basename)
25
 
26
  n_gpu_layers = 40 # Change this value based on your model and your GPU VRAM pool.
27
- n_batch = 256
28
 
29
 
30
  embed_model = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl",
@@ -39,25 +39,24 @@ prompt_helper = PromptHelper(
39
  )
40
  """
41
  from langchain_g4f import G4FLLM
42
-
43
- async def main(question):
44
- llm : LLM = G4FLLM(
45
  model=models.gpt_35_turbo,
46
- provider=Provider.Acytoo,
47
- )
48
 
49
 
50
- llm = LangChainLLM(llm=llm)
51
 
52
- service_context = ServiceContext.from_defaults(llm=llm,
53
  embed_model=embed_model)
54
 
55
- documents = SimpleDirectoryReader("data/").load_data()
56
- index = VectorStoreIndex.from_documents(documents, service_context=service_context)
 
 
57
  query_engine = index.as_query_engine(service_context=service_context)
58
  response = query_engine.query(question)
59
  print(response)
60
  return response
61
 
62
  iface = Interface(fn=main, inputs="text", outputs="text")
63
- iface.launch()
 
21
  model_name_or_path = "hlhr202/llama-7B-ggml-int4"
22
  model_basename = "ggml-model-q4_0.bin" # the model is in bin format
23
 
24
+ #model_path = hf_hub_download(repo_id=model_name_or_path, filename=model_basename)
25
 
26
  n_gpu_layers = 40 # Change this value based on your model and your GPU VRAM pool.
27
+ n_batch = 256
28
 
29
 
30
  embed_model = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl",
 
39
  )
40
  """
41
  from langchain_g4f import G4FLLM
42
+ llm = LLM = G4FLLM(
 
 
43
  model=models.gpt_35_turbo,
44
+ provider=Provider.Acytoo,)
 
45
 
46
 
47
+ llm = LangChainLLM(llm=llm)
48
 
49
+ service_context = ServiceContext.from_defaults(llm=llm,
50
  embed_model=embed_model)
51
 
52
+ documents = SimpleDirectoryReader("data").load_data()
53
+ index = VectorStoreIndex.from_documents(documents, service_context=service_context)
54
+
55
+ async def main(question):
56
  query_engine = index.as_query_engine(service_context=service_context)
57
  response = query_engine.query(question)
58
  print(response)
59
  return response
60
 
61
  iface = Interface(fn=main, inputs="text", outputs="text")
62
+ iface.launch()