Bhaskar2611 commited on
Commit
01f5c07
·
verified ·
1 Parent(s): fe680f0

Update llm_tool.py

Browse files
Files changed (1) hide show
  1. llm_tool.py +23 -9
llm_tool.py CHANGED
@@ -1,19 +1,33 @@
1
  import os
2
  from huggingface_hub import InferenceClient
3
 
 
 
 
 
 
 
4
  def analyze_data(prompt):
5
  """
6
- Use Hugging Face LLM to generate insights from raw search data
7
  """
8
- HF_TOKEN = os.getenv("HF_TOKEN")
9
- if not HF_TOKEN:
10
- return "Error: Hugging Face token not found in environment variables"
11
-
12
  try:
13
- client = InferenceClient(
14
- model="Qwen/Qwen2.5-Coder-7B-Instruct",
15
- token=HF_TOKEN
 
 
 
 
 
 
 
 
 
16
  )
17
- return client.text_generation(prompt, max_new_tokens=500)
 
 
 
18
  except Exception as e:
19
  return f"LLM generation failed: {str(e)}"
 
1
  import os
2
  from huggingface_hub import InferenceClient
3
 
4
+ # Initialize client without provider (Hugging Face handles routing)
5
+ client = InferenceClient(
6
+ model="Qwen/Qwen2.5-7B-Instruct",
7
+ token=os.environ.get("HF_TOKEN") # Make sure HF_TOKEN is set in Secrets
8
+ )
9
+
10
  def analyze_data(prompt):
11
  """
12
+ Use chat completions API to generate insights from raw search data
13
  """
 
 
 
 
14
  try:
15
+ # Format prompt as a chat message
16
+ messages = [
17
+ {
18
+ "role": "user",
19
+ "content": prompt
20
+ }
21
+ ]
22
+
23
+ # Get response from LLM
24
+ completion = client.chat.completions.create(
25
+ messages=messages,
26
+ max_tokens=500 # Control response length
27
  )
28
+
29
+ # Return only the content part of the response
30
+ return completion.choices[0].message.content
31
+
32
  except Exception as e:
33
  return f"LLM generation failed: {str(e)}"