Spaces:
Sleeping
Sleeping
File size: 3,463 Bytes
39dd9ee d0c5706 39dd9ee 1fefd62 6f16573 d0c5706 bf876d3 d0c5706 bd2ac75 1fefd62 bb3b64b 1fefd62 d0c5706 43c8700 d0c5706 e1a7403 d0c5706 e1a7403 d0c5706 1fefd62 d0c5706 e1a7403 1fefd62 d0c5706 6f16573 637b312 6f16573 637b312 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
import gradio as gr
import numpy as np
import tensorflow as tf
import pickle
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.sequence import pad_sequences
from huggingface_hub import hf_hub_download
from fastapi import FastAPI, Request
import uvicorn
import threading
app = FastAPI()
@app.post("/reload")
async def reload_model(request: Request):
token = request.headers.get("x-hub-token")
if token != "mi-secret-token":
print("π« Unauthorized webhook request.")
return {"status": "unauthorized"}
load_latest_model()
print("π Model reloaded securely via webhook.")
return {"status": "ok"}
np.random.seed(42)
tf.random.set_seed(42)
# Load tokenizers
en_tok_path = hf_hub_download(repo_id="Juna190825/github_jeffprosise_model", filename="tokenizers/en_tokenizer.pkl")
fr_tok_path = hf_hub_download(repo_id="Juna190825/github_jeffprosise_model", filename="tokenizers/fr_tokenizer.pkl")
with open(en_tok_path, "rb") as f:
en_tokenizer = pickle.load(f)
with open(fr_tok_path, "rb") as f:
fr_tokenizer = pickle.load(f)
fr_index_lookup = fr_tokenizer.index_word
sequence_len = 20 # adjust as needed
# Load model function
def load_latest_model():
global model
model_path = hf_hub_download(
repo_id="Juna190825/github_jeffprosise_model", filename="model.keras", cache_dir="cache", force_download=True
)
en_tok_path = hf_hub_download(repo_id="Juna190825/github_jeffprosise_model", filename="tokenizers/en_tokenizer.pkl")
fr_tok_path = hf_hub_download(repo_id="Juna190825/github_jeffprosise_model", filename="tokenizers/fr_tokenizer.pkl")
model = load_model(model_path)
return "β
Model reloaded!"
# Initial load
load_latest_model()
# Translation function
def translate_text(text):
input_sequence = en_tokenizer.texts_to_sequences([text])
padded_input_sequence = pad_sequences(input_sequence, maxlen=sequence_len, padding='post')
decoded_text = '[start]'
for i in range(sequence_len):
target_sequence = fr_tokenizer.texts_to_sequences([decoded_text])
padded_target_sequence = pad_sequences(target_sequence, maxlen=sequence_len, padding='post')[:, :-1]
prediction = model([padded_input_sequence, padded_target_sequence])
idx = np.argmax(prediction[0, i, :])
token = fr_tokenizer.index_word.get(idx)
if token is None or token.strip() == '' or token == '[unk]':
return f"Translation unavailable for '{text}'"
decoded_text += ' ' + token
if token == '[end]':
break
return decoded_text.replace('[start] ', '').replace(' [end]', '')
# Gradio Interface using Blocks
with gr.Blocks() as demo:
gr.Markdown("# π English to French Translator")
with gr.Row():
input_text = gr.Textbox(label="English", placeholder="Enter text hereβ¦")
output_text = gr.Textbox(label="French Translation")
with gr.Row():
translate_btn = gr.Button("Translate")
reload_btn = gr.Button("π Reload Model")
translate_btn.click(translate_text, inputs=input_text, outputs=output_text)
reload_btn.click(load_latest_model, outputs=output_text)
gr.Examples(["What is your name?", "I love chocolate.", "Where is the nearest station?"], inputs=input_text)
def run_api():
uvicorn.run(app, host="0.0.0.0", port=7862)
threading.Thread(target=run_api, daemon=True).start()
demo.launch(share=True)
|