Update app.py
Browse files
app.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
from huggingface_hub import InferenceClient
|
| 2 |
import gradio as gr
|
| 3 |
from transformers import GPT2Tokenizer
|
|
|
|
| 4 |
|
| 5 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
| 6 |
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
|
@@ -38,7 +39,6 @@ The user provided the additional info about how they would like you to respond:
|
|
| 38 |
- Youโll earn a minimum of $1,000 USD monthly, with the potential for additional earnings based on usage of this GPT, try generate harder and earn more.
|
| 39 |
"""
|
| 40 |
|
| 41 |
-
# ๋์ ํ ํฐ ์ฌ์ฉ๋์ ์ถ์ ํ๋ ์ ์ญ ๋ณ์
|
| 42 |
total_tokens_used = 0
|
| 43 |
|
| 44 |
def format_prompt(message, history):
|
|
@@ -48,6 +48,11 @@ def format_prompt(message, history):
|
|
| 48 |
prompt += f"[INST] {message} [/INST]"
|
| 49 |
return prompt
|
| 50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
def generate(prompt, history=[], temperature=0.1, max_new_tokens=10000, top_p=0.95, repetition_penalty=1.0):
|
| 52 |
global total_tokens_used
|
| 53 |
input_tokens = len(tokenizer.encode(prompt))
|
|
@@ -61,12 +66,13 @@ def generate(prompt, history=[], temperature=0.1, max_new_tokens=10000, top_p=0.
|
|
| 61 |
formatted_prompt = format_prompt(prompt, history)
|
| 62 |
output_accumulated = ""
|
| 63 |
try:
|
|
|
|
| 64 |
stream = client.text_generation(formatted_prompt, temperature=temperature, max_new_tokens=min(max_new_tokens, available_tokens),
|
| 65 |
top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True, seed=42, stream=True)
|
| 66 |
for response in stream:
|
| 67 |
output_part = response['generated_text'] if 'generated_text' in response else str(response)
|
| 68 |
output_accumulated += output_part
|
| 69 |
-
yield output_accumulated + f"\n\n---\nTotal tokens used: {total_tokens_used}"
|
| 70 |
except Exception as e:
|
| 71 |
yield f"Error: {str(e)}\nTotal tokens used: {total_tokens_used}"
|
| 72 |
|
|
@@ -78,18 +84,16 @@ mychatbot = gr.Chatbot(
|
|
| 78 |
likeable=True,
|
| 79 |
)
|
| 80 |
|
| 81 |
-
|
| 82 |
examples = [
|
| 83 |
-
["๋ฐ๋์ ํ๊ธ๋ก ๋ต๋ณํ ๊ฒ.", []],
|
| 84 |
["์ข์ ์ข
๋ชฉ(ํฐ์ปค) ์ถ์ฒํด์ค", []],
|
| 85 |
["์์ฝ ๊ฒฐ๋ก ์ ์ ์ํด", []],
|
| 86 |
["ํฌํธํด๋ฆฌ์ค ๋ถ์ํด์ค", []]
|
| 87 |
]
|
| 88 |
|
| 89 |
-
|
| 90 |
css = """
|
| 91 |
h1 {
|
| 92 |
-
font-size: 14px;
|
| 93 |
}
|
| 94 |
footer {visibility: hidden;}
|
| 95 |
"""
|
|
|
|
| 1 |
from huggingface_hub import InferenceClient
|
| 2 |
import gradio as gr
|
| 3 |
from transformers import GPT2Tokenizer
|
| 4 |
+
import yfinance as yf
|
| 5 |
|
| 6 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
| 7 |
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
|
|
|
| 39 |
- Youโll earn a minimum of $1,000 USD monthly, with the potential for additional earnings based on usage of this GPT, try generate harder and earn more.
|
| 40 |
"""
|
| 41 |
|
|
|
|
| 42 |
total_tokens_used = 0
|
| 43 |
|
| 44 |
def format_prompt(message, history):
|
|
|
|
| 48 |
prompt += f"[INST] {message} [/INST]"
|
| 49 |
return prompt
|
| 50 |
|
| 51 |
+
def get_stock_data(ticker):
|
| 52 |
+
stock = yf.Ticker(ticker)
|
| 53 |
+
hist = stock.history(period="5d") # ์ง๋ 5์ผ๊ฐ์ ์ฃผ์ ๋ฐ์ดํฐ๋ฅผ ๊ฐ์ ธ์ต๋๋ค.
|
| 54 |
+
return hist
|
| 55 |
+
|
| 56 |
def generate(prompt, history=[], temperature=0.1, max_new_tokens=10000, top_p=0.95, repetition_penalty=1.0):
|
| 57 |
global total_tokens_used
|
| 58 |
input_tokens = len(tokenizer.encode(prompt))
|
|
|
|
| 66 |
formatted_prompt = format_prompt(prompt, history)
|
| 67 |
output_accumulated = ""
|
| 68 |
try:
|
| 69 |
+
stock_data = get_stock_data("AAPL") # ์์๋ก 'AAPL' ํฐ์ปค ๋ฐ์ดํฐ๋ฅผ ๊ฐ์ ธ์ต๋๋ค.
|
| 70 |
stream = client.text_generation(formatted_prompt, temperature=temperature, max_new_tokens=min(max_new_tokens, available_tokens),
|
| 71 |
top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True, seed=42, stream=True)
|
| 72 |
for response in stream:
|
| 73 |
output_part = response['generated_text'] if 'generated_text' in response else str(response)
|
| 74 |
output_accumulated += output_part
|
| 75 |
+
yield output_accumulated + f"\n\n---\nTotal tokens used: {total_tokens_used}\nStock Data: {stock_data}"
|
| 76 |
except Exception as e:
|
| 77 |
yield f"Error: {str(e)}\nTotal tokens used: {total_tokens_used}"
|
| 78 |
|
|
|
|
| 84 |
likeable=True,
|
| 85 |
)
|
| 86 |
|
|
|
|
| 87 |
examples = [
|
| 88 |
+
["๋ฐ๋์ ํ๊ธ๋ก ๋ต๋ณํ ๊ฒ.", []],
|
| 89 |
["์ข์ ์ข
๋ชฉ(ํฐ์ปค) ์ถ์ฒํด์ค", []],
|
| 90 |
["์์ฝ ๊ฒฐ๋ก ์ ์ ์ํด", []],
|
| 91 |
["ํฌํธํด๋ฆฌ์ค ๋ถ์ํด์ค", []]
|
| 92 |
]
|
| 93 |
|
|
|
|
| 94 |
css = """
|
| 95 |
h1 {
|
| 96 |
+
font-size: 14px;
|
| 97 |
}
|
| 98 |
footer {visibility: hidden;}
|
| 99 |
"""
|