ChinarQ-AI commited on
Commit
c24bfc8
·
verified ·
1 Parent(s): b661fac

Upload 7 files

Browse files
llm/__init__.py ADDED
File without changes
llm/__pycache__/financial_chat.cpython-312.pyc ADDED
Binary file (1.41 kB). View file
 
llm/__pycache__/groq_client.cpython-312.pyc ADDED
Binary file (582 Bytes). View file
 
llm/explaination.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ def generate_explanation(transaction, tokenizer, model, device=torch.device("cpu")):
4
+ if tokenizer is None or model is None:
5
+ return "Explanation service unavailable"
6
+
7
+ try:
8
+ text = (
9
+ f"Amount: ${transaction['amount']:.2f}, "
10
+ f"Credit Limit: ${transaction['credit_limit']:.2f}, "
11
+ f"Ratio: {transaction['amount_ratio']:.2f}, "
12
+ f"Chip Usage: {transaction.get('use_chip', 'N/A')}"
13
+ )
14
+ inputs = tokenizer([text], return_tensors="pt", truncation=True, padding=True).to(device)
15
+ with torch.no_grad():
16
+ outputs = model(**inputs)
17
+ probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
18
+ return f"Risk assessment: {probs[0][1]*100:.1f}% suspicious activity likelihood"
19
+ except Exception as e:
20
+ return f"Error generating explanation: {e}"
llm/financial_chat.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .groq_client import get_groq_client
2
+
3
+ def financial_chat(user_input, groq_client=None):
4
+ if groq_client is None:
5
+ try:
6
+ groq_client = get_groq_client()
7
+ except Exception as e:
8
+ return f"Error initializing chat service: {e}"
9
+
10
+ try:
11
+ response = groq_client.chat.completions.create(
12
+ messages=[
13
+ {
14
+ "role": "system",
15
+ "content": (
16
+ "You are FinanceWise AI, a strict financial advisor. Only respond to finance-related questions. "
17
+ "If the user asks about anything unrelated to finance (e.g. languages, science, entertainment), reply: "
18
+ "'Sorry, I can only help with finance-related questions.'"
19
+ ),
20
+ },
21
+ {"role": "user", "content": user_input}
22
+ ],
23
+ model="llama-3.3-70b-versatile",
24
+ temperature=0.3,
25
+ top_p=0.7
26
+ )
27
+ return response.choices[0].message.content
28
+ except Exception as e:
29
+ return f"Error during chat: {e}"
llm/groq_client.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from groq import Groq
3
+
4
+ def get_groq_client():
5
+ api_key = os.environ.get("GROQ_API_KEY")
6
+ if not api_key:
7
+ raise ValueError("GROQ_API_KEY not set in environment variables.")
8
+ return Groq(api_key=api_key)
llm/model_loader.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
3
+
4
+ def load_llm_components(device=torch.device("cpu")):
5
+ try:
6
+ tokenizer = AutoTokenizer.from_pretrained("ProsusAI/finbert")
7
+ model = AutoModelForSequenceClassification.from_pretrained("ProsusAI/finbert")
8
+ print("LLM components loaded successfully!")
9
+ except Exception as e:
10
+ print(f"Failed to load LLM components: {e}")
11
+ tokenizer, model = None, None
12
+ return tokenizer, model