Spaces:
Sleeping
Sleeping
Update src/streamlit_app.py
Browse files- src/streamlit_app.py +38 -25
src/streamlit_app.py
CHANGED
@@ -1,29 +1,41 @@
|
|
1 |
import os
|
2 |
import tempfile
|
3 |
-
|
4 |
-
from llama_cpp import Llama
|
5 |
|
6 |
"""
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
|
|
|
|
|
|
|
|
15 |
"""
|
16 |
|
17 |
-
#
|
18 |
-
|
|
|
|
|
|
|
19 |
os.environ["HF_HOME"] = HF_TMP
|
20 |
-
os.environ["HF_CACHE_DIR"] = HF_TMP
|
21 |
|
22 |
-
#
|
23 |
-
|
24 |
-
|
25 |
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
def get_model():
|
29 |
global llm
|
@@ -32,13 +44,13 @@ def get_model():
|
|
32 |
llm = Llama.from_pretrained(
|
33 |
repo_id="13Aluminium/Sanskrit_transliteration_Qwen3",
|
34 |
filename="unsloth.Q4_K_M.gguf",
|
35 |
-
cache_dir=
|
36 |
)
|
37 |
return llm
|
38 |
|
39 |
-
#
|
40 |
# Inference helper
|
41 |
-
#
|
42 |
|
43 |
def transliterate(text: str, *, temperature: float = 0.1, max_tokens: int = 200) -> str:
|
44 |
prompt = f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
|
@@ -55,11 +67,10 @@ Transliterate Sanskrit Shlok to Latin
|
|
55 |
out = model(prompt, max_tokens=max_tokens, temperature=temperature, echo=False)
|
56 |
return out["choices"][0]["text"].strip()
|
57 |
|
58 |
-
#
|
59 |
-
#
|
60 |
-
#
|
61 |
|
62 |
-
st.set_page_config(page_title="Sanskrit Transliteration", page_icon="📜")
|
63 |
st.title("📜 Sanskrit → Latin Transliteration (Qwen3)")
|
64 |
|
65 |
sample = (
|
@@ -81,7 +92,9 @@ if st.button("🔤 Transliterate"):
|
|
81 |
result = transliterate(sanskrit_text, temperature=temp, max_tokens=max_tokens)
|
82 |
st.text(result)
|
83 |
|
84 |
-
#
|
|
|
85 |
# streamlit>=1.36
|
86 |
# llama-cpp-python>=0.2.11
|
87 |
# huggingface_hub>=0.24
|
|
|
|
1 |
import os
|
2 |
import tempfile
|
3 |
+
from pathlib import Path
|
|
|
4 |
|
5 |
"""
|
6 |
+
**Streamlit Space – minimal version (no explicit caching)**
|
7 |
+
|
8 |
+
Fixes the `StreamlitSetPageConfigMustBeFirstCommandError` **and** the
|
9 |
+
previous permission issues by:
|
10 |
+
|
11 |
+
1. Setting `HOME`, `HF_HOME`, and `HF_CACHE_DIR` **before** we import
|
12 |
+
Streamlit – that way Streamlit creates `~/.streamlit` in a writable
|
13 |
+
place instead of `/.streamlit`.
|
14 |
+
2. Moving the call to `st.set_page_config()` to be the **very first
|
15 |
+
Streamlit command**.
|
16 |
+
|
17 |
+
You can re‑add `@st.cache_resource` once everything boots.
|
18 |
"""
|
19 |
|
20 |
+
# ----------------------------------------------------------------------------
|
21 |
+
# Writable environment — must be set *before* importing streamlit
|
22 |
+
# ----------------------------------------------------------------------------
|
23 |
+
HF_TMP = tempfile.mkdtemp(prefix="hf_") # guaranteed‑writable temp dir
|
24 |
+
os.environ["HOME"] = HF_TMP # for ~/.streamlit
|
25 |
os.environ["HF_HOME"] = HF_TMP
|
26 |
+
os.environ["HF_CACHE_DIR"] = HF_TMP
|
27 |
|
28 |
+
# ----------------------------------------------------------------------------
|
29 |
+
import streamlit as st
|
30 |
+
from llama_cpp import Llama
|
31 |
|
32 |
+
# ----- Streamlit page config (must be first Streamlit command) -------------
|
33 |
+
st.set_page_config(page_title="Sanskrit Transliteration", page_icon="📜")
|
34 |
+
|
35 |
+
# ----------------------------------------------------------------------------
|
36 |
+
# Model loader (no Streamlit cache for now)
|
37 |
+
# ----------------------------------------------------------------------------
|
38 |
+
llm = None # global singleton
|
39 |
|
40 |
def get_model():
|
41 |
global llm
|
|
|
44 |
llm = Llama.from_pretrained(
|
45 |
repo_id="13Aluminium/Sanskrit_transliteration_Qwen3",
|
46 |
filename="unsloth.Q4_K_M.gguf",
|
47 |
+
cache_dir=os.environ["HF_CACHE_DIR"],
|
48 |
)
|
49 |
return llm
|
50 |
|
51 |
+
# ----------------------------------------------------------------------------
|
52 |
# Inference helper
|
53 |
+
# ----------------------------------------------------------------------------
|
54 |
|
55 |
def transliterate(text: str, *, temperature: float = 0.1, max_tokens: int = 200) -> str:
|
56 |
prompt = f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
|
|
|
67 |
out = model(prompt, max_tokens=max_tokens, temperature=temperature, echo=False)
|
68 |
return out["choices"][0]["text"].strip()
|
69 |
|
70 |
+
# ----------------------------------------------------------------------------
|
71 |
+
# UI
|
72 |
+
# ----------------------------------------------------------------------------
|
73 |
|
|
|
74 |
st.title("📜 Sanskrit → Latin Transliteration (Qwen3)")
|
75 |
|
76 |
sample = (
|
|
|
92 |
result = transliterate(sanskrit_text, temperature=temp, max_tokens=max_tokens)
|
93 |
st.text(result)
|
94 |
|
95 |
+
# ----------------------------------------------------------------------------
|
96 |
+
# requirements.txt – keep the same:
|
97 |
# streamlit>=1.36
|
98 |
# llama-cpp-python>=0.2.11
|
99 |
# huggingface_hub>=0.24
|
100 |
+
# ----------------------------------------------------------------------------
|