neovalle commited on
Commit
8fc19af
·
verified ·
1 Parent(s): 62bc5de

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -16
app.py CHANGED
@@ -1,25 +1,48 @@
1
- import requests
2
  import gradio as gr
 
3
 
4
- # The model endpoint on Hugging Face
5
- API_URL = "https://api-inference.huggingface.co/models/neovalle/H4rmoniousBreeze"
 
 
 
6
 
7
- def query_model(prompt):
8
- # If your model is public, you may not need an API token.
9
- # If it’s private, you'll need:
10
- # headers = {"Authorization": "Bearer hf_..."}
11
- # and pass them in your requests.post(...) call.
12
- payload = {"inputs": prompt}
13
- response = requests.post(API_URL, json=payload)
14
- return response.json()
 
15
 
 
 
 
 
 
 
 
 
 
 
 
16
  demo = gr.Interface(
17
- fn=query_model,
18
- inputs="text",
 
 
 
 
19
  outputs="text",
20
- title="H4rmoniousBreeze",
21
- description="Score narratives"
 
 
 
 
22
  )
23
 
24
  if __name__ == "__main__":
25
- demo.launch()
 
 
1
  import gradio as gr
2
+ from transformers import pipeline
3
 
4
+ # Load the Flan-T5-Large model and tokenizer via a pipeline.
5
+ # If you're on a GPU Space, device=0 will place it on GPU.
6
+ # If you're on CPU only, leave device=-1.
7
+ model_name = "google/flan-t5-large"
8
+ pipe = pipeline("text2text-generation", model=model_name, device=-1)
9
 
10
+ def judge_ecolinguistics(pairs_text):
11
+ """
12
+ Takes a multiline string of question–answer pairs and returns a model-generated
13
+ scoring from 1 to 10 for each pair, along with a brief explanation.
14
+ """
15
+ # Construct a single prompt that instructs the model to score each Q&A pair.
16
+ prompt = f"""
17
+ You are an ecolinguistics judge. You evaluate answers based on how thoroughly
18
+ they address ecological concerns, clarity of expression, and factual correctness.
19
 
20
+ Below is a set of question–answer pairs:
21
+
22
+ {pairs_text}
23
+
24
+ Please provide, for each pair, a single numerical score from 1 to 10 and a brief explanation.
25
+ """
26
+ # Use the pipeline to generate a response.
27
+ response = pipe(prompt, max_length=512, truncation=True)[0]["generated_text"]
28
+ return response
29
+
30
+ # Build the Gradio interface.
31
  demo = gr.Interface(
32
+ fn=judge_ecolinguistics,
33
+ inputs=gr.Textbox(
34
+ lines=10,
35
+ label="Enter Your Question–Answer Pairs",
36
+ placeholder="Example:\nQ1: What is an ecological niche?\nA1: It is the role a species plays in its environment.\n\nQ2: How does deforestation affect the climate?\nA2: It can reduce carbon sequestration and disrupt rainfall patterns.\n"
37
+ ),
38
  outputs="text",
39
+ title="Ecolinguistics Q&A Scorer (Flan-T5-Large)",
40
+ description=(
41
+ "Paste multiple question–answer pairs. The model will assign a score from 1–10 "
42
+ "to each answer, considering ecological relevance and clarity. "
43
+ "It will also provide a brief rationale for its scoring."
44
+ )
45
  )
46
 
47
  if __name__ == "__main__":
48
+ demo.launch()