neha-ai-playground commited on
Commit
118351d
·
1 Parent(s): 5616dbf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -2
app.py CHANGED
@@ -3,6 +3,7 @@ from pathlib import Path
3
  import streamlit as st
4
  from transformers import pipeline
5
  from dotenv import load_dotenv
 
6
 
7
  if Path(".env").is_file():
8
  load_dotenv(".env")
@@ -11,8 +12,29 @@ HF_TOKEN = os.getenv("HF_TOKEN")
11
 
12
  def img2Text(url):
13
  image_to_text = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large")
14
- text = image_to_text(url)[0]["generated_text"]
 
15
  st.subheader(text)
16
  print(text)
17
  return text
18
- img2Text("photo.jpg")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import streamlit as st
4
  from transformers import pipeline
5
  from dotenv import load_dotenv
6
+ from langchain import PromptTemplate, HuggingFaceHub, LLMChain
7
 
8
  if Path(".env").is_file():
9
  load_dotenv(".env")
 
12
 
13
  def img2Text(url):
14
  image_to_text = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large")
15
+ text = image_to_text(url)
16
+ st.subheader("Caption :")
17
  st.subheader(text)
18
  print(text)
19
  return text
20
+ img2Text("photo.png")
21
+
22
+ #llm
23
+ def generate_story(scenario):
24
+ template = """
25
+ You are a story teller;
26
+ You can generate a short story based on a simple narrative, the story should be no momre than 20 words;
27
+ CONTEXT: {scenario}
28
+ STORY:
29
+ """
30
+
31
+ prompt = PromptTemplate(template=template,input_variables=["scenario"])
32
+ llm_chain = LLMChain(prompt=prompt,
33
+ llm=HuggingFaceHub(repo_id="google/flan-t5-xl",
34
+ model_kwargs={"temperature":0,
35
+ "max_length":64}))
36
+ story =llm_chain.run(scenario)
37
+ st.subheader("Story :")
38
+ st.subheader(story)
39
+ print(story)
40
+ return story