Priyanshu Kumar
commited on
Commit
·
39290f7
1
Parent(s):
5bbd902
Add Mistral model
Browse files- README.md +44 -1
- app.py +44 -0
- index.html +0 -19
- requirements.txt +4 -0
- style.css +0 -28
README.md
CHANGED
@@ -8,4 +8,47 @@ pinned: false
|
|
8 |
short_description: This API is for AnalyDocs
|
9 |
---
|
10 |
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
short_description: This API is for AnalyDocs
|
9 |
---
|
10 |
|
11 |
+
# 🧠 Mistral-7B GGUF API – Hosted LLM Inference with FastAPI
|
12 |
+
|
13 |
+
This Hugging Face Space hosts a lightweight, quantized version of the [Mistral-7B Instruct](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) large language model in **GGUF format (Q4_K_M)** using `llama-cpp-python` and `FastAPI`.
|
14 |
+
|
15 |
+
It exposes a simple `/generate` endpoint to allow easy integration of high-quality local inference into any application – no OpenAI keys, no vendor lock-in, no GPU dependency.
|
16 |
+
|
17 |
+
---
|
18 |
+
|
19 |
+
## 🔗 Live Demo
|
20 |
+
|
21 |
+
> 🚀 API is deployed and accessible here:
|
22 |
+
> **https://your-username-your-space-name.hf.space**
|
23 |
+
|
24 |
+
---
|
25 |
+
|
26 |
+
## 📄 Used In: AnalyDocs – AI-Powered Report Generator
|
27 |
+
|
28 |
+
**AnalyDocs** is a smart document and data report generation tool that uses this API as its core language generation engine.
|
29 |
+
|
30 |
+
### 🧠 What AnalyDocs Does:
|
31 |
+
AnalyDocs takes structured or unstructured business data (tables, charts, KPIs, raw CSVs) and transforms it into meaningful written insights using prompt-based LLM processing.
|
32 |
+
|
33 |
+
**Features:**
|
34 |
+
- ✨ Natural language summaries of documents, reports, and spreadsheets
|
35 |
+
- 📊 Automatic generation of key insights from graphs and charts
|
36 |
+
- 📈 Time-series growth/decline analysis with possible reasons from news sources
|
37 |
+
- 📝 Clean, editable paragraphs and bullet points for documentation
|
38 |
+
|
39 |
+
The LLM API hosted in this repo powers the natural language generation core of AnalyDocs.
|
40 |
+
|
41 |
+
> 🛠 Example use case:
|
42 |
+
> “Generate a 5-point executive summary comparing Q1 and Q2 performance, highlighting changes and probable causes.”
|
43 |
+
|
44 |
+
---
|
45 |
+
|
46 |
+
## 📡 API Documentation
|
47 |
+
|
48 |
+
### **POST** `/generate`
|
49 |
+
|
50 |
+
#### Request:
|
51 |
+
```json
|
52 |
+
{
|
53 |
+
"prompt": "Write a summary of the key growth areas in Q2 2024 for the dairy industry."
|
54 |
+
}
|
app.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, Request
|
2 |
+
from llama_cpp import Llama
|
3 |
+
from huggingface_hub import hf_hub_download
|
4 |
+
import os
|
5 |
+
|
6 |
+
app = FastAPI()
|
7 |
+
|
8 |
+
# === Model Config ===
|
9 |
+
REPO_ID = "TheBloke/Mistral-7B-Instruct-v0.1-GGUF"
|
10 |
+
FILENAME = "mistral-7b-instruct-v0.1.Q4_K_M.gguf"
|
11 |
+
MODEL_DIR = "models"
|
12 |
+
MODEL_PATH = os.path.join(MODEL_DIR, FILENAME)
|
13 |
+
|
14 |
+
# === Download only if not already present ===
|
15 |
+
if not os.path.exists(MODEL_PATH):
|
16 |
+
print(f"Downloading model {FILENAME} from Hugging Face...")
|
17 |
+
model_path = hf_hub_download(
|
18 |
+
repo_id=REPO_ID,
|
19 |
+
filename=FILENAME,
|
20 |
+
cache_dir=MODEL_DIR,
|
21 |
+
local_dir=MODEL_DIR,
|
22 |
+
local_dir_use_symlinks=False
|
23 |
+
)
|
24 |
+
else:
|
25 |
+
print(f"Model already exists at: {MODEL_PATH}")
|
26 |
+
model_path = MODEL_PATH
|
27 |
+
|
28 |
+
# === Load LLM ===
|
29 |
+
llm = Llama(
|
30 |
+
model_path=model_path,
|
31 |
+
n_ctx=1024,
|
32 |
+
n_threads=4 # Adjust for your CPU
|
33 |
+
)
|
34 |
+
|
35 |
+
@app.get("/")
|
36 |
+
def root():
|
37 |
+
return {"message": "Mistral API is live!"}
|
38 |
+
|
39 |
+
@app.post("/generate")
|
40 |
+
async def generate(request: Request):
|
41 |
+
data = await request.json()
|
42 |
+
prompt = data.get("prompt", "")
|
43 |
+
response = llm(prompt, max_tokens=128, temperature=0.7)
|
44 |
+
return {"response": response["choices"][0]["text"]}
|
index.html
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
<!doctype html>
|
2 |
-
<html>
|
3 |
-
<head>
|
4 |
-
<meta charset="utf-8" />
|
5 |
-
<meta name="viewport" content="width=device-width" />
|
6 |
-
<title>My static Space</title>
|
7 |
-
<link rel="stylesheet" href="style.css" />
|
8 |
-
</head>
|
9 |
-
<body>
|
10 |
-
<div class="card">
|
11 |
-
<h1>Welcome to your static Space!</h1>
|
12 |
-
<p>You can modify this app directly by editing <i>index.html</i> in the Files and versions tab.</p>
|
13 |
-
<p>
|
14 |
-
Also don't forget to check the
|
15 |
-
<a href="https://huggingface.co/docs/hub/spaces" target="_blank">Spaces documentation</a>.
|
16 |
-
</p>
|
17 |
-
</div>
|
18 |
-
</body>
|
19 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
llama-cpp-python==0.2.67
|
2 |
+
fastapi
|
3 |
+
uvicorn
|
4 |
+
huggingface_hub
|
style.css
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
body {
|
2 |
-
padding: 2rem;
|
3 |
-
font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
|
4 |
-
}
|
5 |
-
|
6 |
-
h1 {
|
7 |
-
font-size: 16px;
|
8 |
-
margin-top: 0;
|
9 |
-
}
|
10 |
-
|
11 |
-
p {
|
12 |
-
color: rgb(107, 114, 128);
|
13 |
-
font-size: 15px;
|
14 |
-
margin-bottom: 10px;
|
15 |
-
margin-top: 5px;
|
16 |
-
}
|
17 |
-
|
18 |
-
.card {
|
19 |
-
max-width: 620px;
|
20 |
-
margin: 0 auto;
|
21 |
-
padding: 16px;
|
22 |
-
border: 1px solid lightgray;
|
23 |
-
border-radius: 16px;
|
24 |
-
}
|
25 |
-
|
26 |
-
.card p:last-child {
|
27 |
-
margin-bottom: 0;
|
28 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|