Spaces:
Runtime error
Runtime error
File size: 3,061 Bytes
a255fdc c43e925 a255fdc 1f51e41 a255fdc c43e925 183aaf8 bb86c51 c43e925 bb86c51 c43e925 bb86c51 c43e925 e59763c 756935c bb86c51 1279ad7 bb86c51 6832ee5 658534a 6832ee5 183aaf8 658534a 6832ee5 183aaf8 380ddb3 ffb5d20 756935c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
import gradio as gr
import os
from transformers import pipeline
title = "❤️🧠MindfulStory📖💾MemoryMaker"
examples = [
["Music and art make me feel"],
["Feel better each day when you awake by"],
["Feel better physically by"],
["Practicing mindfulness each day"],
["Be happier by"],
["Meditation can improve health"],
["Spending time outdoors"],
["Stress is relieved by quieting your mind, getting exercise and time with nature"],
["Break the cycle of stress and anxiety"],
["Feel calm in stressful situations"],
["Deal with work pressure"],
["Learn to reduce feelings of overwhelmed"]
]
from gradio import inputs
from gradio.inputs import Textbox
from gradio import outputs
# PersistDataset -----
import os
import csv
import gradio as gr
from gradio import inputs, outputs
import huggingface_hub
from huggingface_hub import Repository, hf_hub_download, upload_file
from datetime import datetime
# created new dataset as awacke1/MindfulStory.csv
DATASET_REPO_URL = "https://huggingface.co/datasets/awacke1/MindfulStory.csv"
DATASET_REPO_ID = "awacke1/MindfulStory.csv"
DATA_FILENAME = "MindfulStory.csv"
DATA_FILE = os.path.join("data", DATA_FILENAME)
HF_TOKEN = os.environ.get("HF_TOKEN")
# Download dataset repo using hub download
try:
hf_hub_download(
repo_id=DATASET_REPO_ID,
filename=DATA_FILENAME,
cache_dir=DATA_DIRNAME,
force_filename=DATA_FILENAME
)
except:
print("file not found")
# Set up cloned dataset from repo for operations
repo = Repository(
local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN
)
generator1 = gr.Interface.load("huggingface/gpt2-large", api_key=HF_TOKEN)
generator2 = gr.Interface.load("huggingface/EleutherAI/gpt-neo-2.7B", api_key=HF_TOKEN)
generator3 = gr.Interface.load("huggingface/EleutherAI/gpt-j-6B", api_key=HF_TOKEN)
SplitterInputBox = gr.inputs.Textbox(lines=5, label="Enter a sentence to get another sentence.")
def AIMemory(name: str):
if name and message:
with open(DATA_FILE, "a") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=["name", "message", "time"])
writer.writerow({"name": name, "message": message, "time": str(datetime.now())})
commit_url = repo.push_to_hub()
return message
with open('Mindfulness.txt', 'r') as file:
context = file.read()
#parallelModel = gr.Parallel(generator1, generator2, generator3,
parallelModel = gr.Parallel(fn=AIMemory, generator1, generator2, generator3,
#inputs = SplitterInputBox,
inputs=[
#gr.inputs.Textbox(lines=7, default=context, label=""),
gr.inputs.Textbox(lines=3, default=context, label="Story starter")],
examples=examples,
title="Mindfulness Story Generation with Persistent Dataset Memory",
description=f"Mindfulness Story Generation with Persistent Dataset Memory",
article=f"Memory Dataset URL: [{DATASET_REPO_URL}]({DATASET_REPO_URL})" )
parallelModel.launch(share=False) |