File size: 1,735 Bytes
1fe6e3b
fafa7b0
aad84fe
 
a702cfd
aad84fe
1fe6e3b
 
 
 
2436221
1fe6e3b
2436221
1fe6e3b
20d5756
 
1fe6e3b
20d5756
981d40d
1fe6e3b
2436221
 
 
 
 
 
 
a702cfd
 
 
 
981d40d
a702cfd
981d40d
2436221
 
20d5756
 
 
 
1fe6e3b
20d5756
 
1fe6e3b
981d40d
 
20d5756
a702cfd
981d40d
 
a702cfd
981d40d
 
aad84fe
20d5756
 
e827c31
1fe6e3b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import os
from fastapi import FastAPI, Request
from sentence_transformers import SentenceTransformer, util
import torch
import requests

# πŸ›‘οΈ Pastikan cache bisa ditulis
os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf"

# πŸ” Supabase config
SUPABASE_URL = "https://olbjfxlclotxtnpjvpfj.supabase.co"
SUPABASE_KEY = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9..."

# πŸ” Load model sekali
model = SentenceTransformer("all-MiniLM-L6-v2")

# πŸš€ FastAPI app
app = FastAPI()

# πŸ”½ Ambil data FAQ dari Supabase
def get_faq_from_supabase(uid):
    url = f"{SUPABASE_URL}/rest/v1/faq_texts?uid=eq.{uid}"
    headers = {
        "apikey": SUPABASE_KEY,
        "Authorization": f"Bearer {SUPABASE_KEY}",
        "Content-Type": "application/json"
    }
    try:
        r = requests.get(url, headers=headers)
        r.raise_for_status()
        data = r.json()
        return [{"q": d["question"], "a": d["answer"]} for d in data]
    except Exception as e:
        print("❌ Supabase error:", e)
        return []

@app.post("/predict")
async def predict(request: Request):
    body = await request.json()
    uid, question = body.get("data", [None, None])

    if not uid or not question:
        return {"data": ["UID atau pertanyaan tidak valid."]}

    faqs = get_faq_from_supabase(uid)
    if not faqs:
        return {"data": ["FAQ tidak ditemukan untuk UID ini."]}

    questions = [f["q"] for f in faqs]
    answers = [f["a"] for f in faqs]

    embeddings = model.encode(questions, convert_to_tensor=True)
    query_embedding = model.encode(question, convert_to_tensor=True)

    similarity = util.pytorch_cos_sim(query_embedding, embeddings)
    best_idx = torch.argmax(similarity).item()

    return {"data": [answers[best_idx]]}