Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -555,7 +555,7 @@ def ginigen_app():
|
|
555 |
|
556 |
# Set default session state
|
557 |
if "ai_model" not in st.session_state:
|
558 |
-
st.session_state.ai_model = "gpt-4.1-mini" #
|
559 |
if "messages" not in st.session_state:
|
560 |
st.session_state.messages = []
|
561 |
if "auto_save" not in st.session_state:
|
@@ -575,9 +575,7 @@ def ginigen_app():
|
|
575 |
sb = st.sidebar
|
576 |
sb.title("Blog Settings")
|
577 |
|
578 |
-
# λͺ¨λΈ μ ν
|
579 |
-
available_models = ["gpt-4.1-mini", "gpt-4o", "gpt-4-turbo"]
|
580 |
-
sb.selectbox("AI Model", available_models, key="ai_model")
|
581 |
|
582 |
sb.subheader("Blog Style Settings")
|
583 |
sb.selectbox(
|
@@ -595,6 +593,7 @@ def ginigen_app():
|
|
595 |
)
|
596 |
|
597 |
sb.slider("Blog Length (word count)", 800, 3000, key="word_count")
|
|
|
598 |
|
599 |
# Example topics
|
600 |
sb.subheader("Example Topics")
|
@@ -734,7 +733,8 @@ def process_input(prompt: str, uploaded_files):
|
|
734 |
|
735 |
with st.chat_message("assistant"):
|
736 |
placeholder = st.empty()
|
737 |
-
|
|
|
738 |
|
739 |
use_web_search = st.session_state.web_search_enabled
|
740 |
has_uploaded_files = bool(uploaded_files) and len(uploaded_files) > 0
|
@@ -742,7 +742,7 @@ def process_input(prompt: str, uploaded_files):
|
|
742 |
try:
|
743 |
# μν νμλ₯Ό μν μν μ»΄ν¬λνΈ
|
744 |
status = st.status("Preparing to generate blog...")
|
745 |
-
status.update(label="Initializing
|
746 |
|
747 |
client = get_openai_client()
|
748 |
|
@@ -764,7 +764,7 @@ def process_input(prompt: str, uploaded_files):
|
|
764 |
file_content = process_uploaded_files(uploaded_files)
|
765 |
|
766 |
# Build system prompt
|
767 |
-
status.update(label="Preparing
|
768 |
sys_prompt = get_system_prompt(
|
769 |
template=st.session_state.blog_template,
|
770 |
tone=st.session_state.blog_tone,
|
@@ -774,7 +774,7 @@ def process_input(prompt: str, uploaded_files):
|
|
774 |
)
|
775 |
|
776 |
# OpenAI API νΈμΆ μ€λΉ
|
777 |
-
status.update(label="
|
778 |
|
779 |
# λ©μμ§ κ΅¬μ±
|
780 |
api_messages = [
|
@@ -794,98 +794,47 @@ def process_input(prompt: str, uploaded_files):
|
|
794 |
# μ¬μ©μ λ©μμ§ μΆκ°
|
795 |
api_messages.append({"role": "user", "content": user_content})
|
796 |
|
797 |
-
# OpenAI API νΈμΆ -
|
798 |
-
|
799 |
-
|
800 |
-
|
801 |
-
|
802 |
-
|
803 |
-
|
804 |
-
|
805 |
-
|
806 |
-
|
807 |
-
|
808 |
-
|
809 |
-
|
810 |
-
|
811 |
-
|
812 |
-
|
813 |
-
|
814 |
-
|
815 |
-
|
816 |
-
|
817 |
-
|
818 |
-
|
819 |
-
|
820 |
-
|
821 |
-
|
822 |
-
|
823 |
-
|
824 |
-
|
825 |
-
if retry < max_retries - 1:
|
826 |
-
wait_time = retry_delay * (retry + 1) # μ μ κΈΈμ΄μ§λ λκΈ° μκ°
|
827 |
-
status.update(label=f"API Error: {error_message}. Retrying in {wait_time}s... ({retry+1}/{max_retries})")
|
828 |
-
time.sleep(wait_time)
|
829 |
-
else:
|
830 |
-
# λ§μ§λ§ μλμμλ μ€ν¨νλ©΄ λ체 λͺ¨λΈ μλ
|
831 |
-
fallback_models = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k"]
|
832 |
-
if used_model in fallback_models:
|
833 |
-
fallback_models.remove(used_model)
|
834 |
-
|
835 |
-
# λ체 λͺ¨λΈ μλ
|
836 |
-
if fallback_models:
|
837 |
-
status.update(label="Primary model failed. Trying fallback models...")
|
838 |
-
for fb_model in fallback_models:
|
839 |
-
try:
|
840 |
-
status.update(label=f"Trying with fallback model: {fb_model}...")
|
841 |
-
response = client.chat.completions.create(
|
842 |
-
model=fb_model,
|
843 |
-
messages=api_messages,
|
844 |
-
temperature=1,
|
845 |
-
max_tokens=min(MAX_TOKENS, 4096 if "16k" not in fb_model else 16000),
|
846 |
-
top_p=1
|
847 |
-
)
|
848 |
-
answer = response.choices[0].message.content
|
849 |
-
used_model = fb_model
|
850 |
-
status.update(label=f"Blog generated with fallback model: {used_model}", state="complete")
|
851 |
-
break
|
852 |
-
except Exception as fb_error:
|
853 |
-
logging.error(f"Fallback model {fb_model} also failed: {str(fb_error)}")
|
854 |
-
|
855 |
-
# λ체 λͺ¨λΈ μ€ νλκ° μ±κ³΅νλμ§ νμΈ
|
856 |
-
if answer:
|
857 |
-
break
|
858 |
-
|
859 |
-
# λͺ¨λ λͺ¨λΈμ΄ μ€ν¨ν κ²½μ°
|
860 |
-
if not answer:
|
861 |
-
status.update(label="All models failed to generate content", state="error")
|
862 |
-
user_message = "OpenAI API μ°κ²° λ¬Έμ κ° λ°μνμ΅λλ€. μΈν°λ· μ°κ²°μ νμΈνκ±°λ λμ€μ λ€μ μλν΄ μ£ΌμΈμ."
|
863 |
-
if "rate limit" in error_message.lower():
|
864 |
-
user_message = "OpenAI API μλ μ νμ λλ¬νμ΅λλ€. μ μ ν λ€μ μλν΄ μ£ΌμΈμ."
|
865 |
-
elif "invalid api key" in error_message.lower():
|
866 |
-
user_message = "μ ν¨νμ§ μμ API ν€μ
λλ€. API ν€λ₯Ό νμΈν΄ μ£ΌμΈμ."
|
867 |
-
|
868 |
-
raise Exception(user_message)
|
869 |
-
|
870 |
-
# κ²°κ³Όκ° μμ±λμ§ μμ κ²½μ° μ€λ₯ λ°μ
|
871 |
-
if not answer:
|
872 |
-
raise Exception("λΈλ‘κ·Έ μ½ν
μΈ λ₯Ό μμ±νμ§ λͺ»νμ΅λλ€.")
|
873 |
|
874 |
-
# κ²°κ³Ό νμ
|
875 |
-
placeholder.markdown(answer)
|
876 |
-
|
877 |
# μ΄λ―Έμ§ μμ±
|
878 |
answer_entry_saved = False
|
879 |
-
if st.session_state.generate_image and
|
880 |
with st.spinner("Generating image..."):
|
881 |
try:
|
882 |
-
ip = extract_image_prompt(
|
883 |
img, cap = generate_image(ip)
|
884 |
if img:
|
885 |
st.image(img, caption=cap)
|
886 |
st.session_state.messages.append({
|
887 |
"role": "assistant",
|
888 |
-
"content":
|
889 |
"image": img,
|
890 |
"image_caption": cap
|
891 |
})
|
@@ -895,22 +844,22 @@ def process_input(prompt: str, uploaded_files):
|
|
895 |
st.warning("μ΄λ―Έμ§ μμ±μ μ€ν¨νμ΅λλ€. λΈλ‘κ·Έ μ½ν
μΈ λ§ μ μ₯λ©λλ€.")
|
896 |
|
897 |
# Save the answer if not saved above
|
898 |
-
if not answer_entry_saved and
|
899 |
-
st.session_state.messages.append({"role": "assistant", "content":
|
900 |
|
901 |
# Download buttons
|
902 |
-
if
|
903 |
st.subheader("Download This Blog")
|
904 |
c1, c2 = st.columns(2)
|
905 |
c1.download_button(
|
906 |
"Markdown",
|
907 |
-
data=
|
908 |
file_name=f"{prompt[:30]}.md",
|
909 |
mime="text/markdown"
|
910 |
)
|
911 |
c2.download_button(
|
912 |
"HTML",
|
913 |
-
data=md_to_html(
|
914 |
file_name=f"{prompt[:30]}.html",
|
915 |
mime="text/html"
|
916 |
)
|
@@ -932,7 +881,6 @@ def process_input(prompt: str, uploaded_files):
|
|
932 |
st.session_state.messages.append({"role": "assistant", "content": ans})
|
933 |
|
934 |
|
935 |
-
|
936 |
# ββββββββββββββββββββββββββββββββ main ββββββββββββββββββββββββββββββββββββ
|
937 |
def main():
|
938 |
ginigen_app()
|
|
|
555 |
|
556 |
# Set default session state
|
557 |
if "ai_model" not in st.session_state:
|
558 |
+
st.session_state.ai_model = "gpt-4.1-mini" # κ³ μ λͺ¨λΈ μ€μ
|
559 |
if "messages" not in st.session_state:
|
560 |
st.session_state.messages = []
|
561 |
if "auto_save" not in st.session_state:
|
|
|
575 |
sb = st.sidebar
|
576 |
sb.title("Blog Settings")
|
577 |
|
578 |
+
# λͺ¨λΈ μ ν μ κ±° (κ³ μ λͺ¨λΈ μ¬μ©)
|
|
|
|
|
579 |
|
580 |
sb.subheader("Blog Style Settings")
|
581 |
sb.selectbox(
|
|
|
593 |
)
|
594 |
|
595 |
sb.slider("Blog Length (word count)", 800, 3000, key="word_count")
|
596 |
+
|
597 |
|
598 |
# Example topics
|
599 |
sb.subheader("Example Topics")
|
|
|
733 |
|
734 |
with st.chat_message("assistant"):
|
735 |
placeholder = st.empty()
|
736 |
+
message_placeholder = st.empty()
|
737 |
+
full_response = ""
|
738 |
|
739 |
use_web_search = st.session_state.web_search_enabled
|
740 |
has_uploaded_files = bool(uploaded_files) and len(uploaded_files) > 0
|
|
|
742 |
try:
|
743 |
# μν νμλ₯Ό μν μν μ»΄ν¬λνΈ
|
744 |
status = st.status("Preparing to generate blog...")
|
745 |
+
status.update(label="Initializing client...")
|
746 |
|
747 |
client = get_openai_client()
|
748 |
|
|
|
764 |
file_content = process_uploaded_files(uploaded_files)
|
765 |
|
766 |
# Build system prompt
|
767 |
+
status.update(label="Preparing blog draft...")
|
768 |
sys_prompt = get_system_prompt(
|
769 |
template=st.session_state.blog_template,
|
770 |
tone=st.session_state.blog_tone,
|
|
|
774 |
)
|
775 |
|
776 |
# OpenAI API νΈμΆ μ€λΉ
|
777 |
+
status.update(label="Writing blog content...")
|
778 |
|
779 |
# λ©μμ§ κ΅¬μ±
|
780 |
api_messages = [
|
|
|
794 |
# μ¬μ©μ λ©μμ§ μΆκ°
|
795 |
api_messages.append({"role": "user", "content": user_content})
|
796 |
|
797 |
+
# OpenAI API μ€νΈλ¦¬λ° νΈμΆ - κ³ μ λͺ¨λΈ "gpt-4.1-mini" μ¬μ©
|
798 |
+
try:
|
799 |
+
# μ€νΈλ¦¬λ° λ°©μμΌλ‘ API νΈμΆ
|
800 |
+
stream = client.chat.completions.create(
|
801 |
+
model="gpt-4.1-mini", # κ³ μ λͺ¨λΈ μ¬μ©
|
802 |
+
messages=api_messages,
|
803 |
+
temperature=1,
|
804 |
+
max_tokens=MAX_TOKENS,
|
805 |
+
top_p=1,
|
806 |
+
stream=True # μ€νΈλ¦¬λ° νμ±ν
|
807 |
+
)
|
808 |
+
|
809 |
+
# μ€νΈλ¦¬λ° μλ΅ μ²λ¦¬
|
810 |
+
for chunk in stream:
|
811 |
+
if chunk.choices and len(chunk.choices) > 0 and chunk.choices[0].delta.content is not None:
|
812 |
+
content_delta = chunk.choices[0].delta.content
|
813 |
+
full_response += content_delta
|
814 |
+
message_placeholder.markdown(full_response + "β")
|
815 |
+
|
816 |
+
# μ΅μ’
μλ΅ νμ (컀μ μ κ±°)
|
817 |
+
message_placeholder.markdown(full_response)
|
818 |
+
status.update(label="Blog completed!", state="complete")
|
819 |
+
|
820 |
+
except Exception as api_error:
|
821 |
+
error_message = str(api_error)
|
822 |
+
logging.error(f"API error: {error_message}")
|
823 |
+
status.update(label=f"Error: {error_message}", state="error")
|
824 |
+
raise Exception(f"Blog generation error: {error_message}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
825 |
|
|
|
|
|
|
|
826 |
# μ΄λ―Έμ§ μμ±
|
827 |
answer_entry_saved = False
|
828 |
+
if st.session_state.generate_image and full_response:
|
829 |
with st.spinner("Generating image..."):
|
830 |
try:
|
831 |
+
ip = extract_image_prompt(full_response, prompt)
|
832 |
img, cap = generate_image(ip)
|
833 |
if img:
|
834 |
st.image(img, caption=cap)
|
835 |
st.session_state.messages.append({
|
836 |
"role": "assistant",
|
837 |
+
"content": full_response,
|
838 |
"image": img,
|
839 |
"image_caption": cap
|
840 |
})
|
|
|
844 |
st.warning("μ΄λ―Έμ§ μμ±μ μ€ν¨νμ΅λλ€. λΈλ‘κ·Έ μ½ν
μΈ λ§ μ μ₯λ©λλ€.")
|
845 |
|
846 |
# Save the answer if not saved above
|
847 |
+
if not answer_entry_saved and full_response:
|
848 |
+
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
849 |
|
850 |
# Download buttons
|
851 |
+
if full_response:
|
852 |
st.subheader("Download This Blog")
|
853 |
c1, c2 = st.columns(2)
|
854 |
c1.download_button(
|
855 |
"Markdown",
|
856 |
+
data=full_response,
|
857 |
file_name=f"{prompt[:30]}.md",
|
858 |
mime="text/markdown"
|
859 |
)
|
860 |
c2.download_button(
|
861 |
"HTML",
|
862 |
+
data=md_to_html(full_response, prompt[:30]),
|
863 |
file_name=f"{prompt[:30]}.html",
|
864 |
mime="text/html"
|
865 |
)
|
|
|
881 |
st.session_state.messages.append({"role": "assistant", "content": ans})
|
882 |
|
883 |
|
|
|
884 |
# ββββββββββββββββββββββββββββββββ main ββββββββββββββββββββββββββββββββββββ
|
885 |
def main():
|
886 |
ginigen_app()
|