ginipick commited on
Commit
24cd13f
Β·
verified Β·
1 Parent(s): 4f52bbc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -96
app.py CHANGED
@@ -555,7 +555,7 @@ def ginigen_app():
555
 
556
  # Set default session state
557
  if "ai_model" not in st.session_state:
558
- st.session_state.ai_model = "gpt-4.1-mini" # κΈ°λ³Έ λͺ¨λΈλ‘œ μ„€μ •
559
  if "messages" not in st.session_state:
560
  st.session_state.messages = []
561
  if "auto_save" not in st.session_state:
@@ -575,9 +575,7 @@ def ginigen_app():
575
  sb = st.sidebar
576
  sb.title("Blog Settings")
577
 
578
- # λͺ¨λΈ 선택 μΆ”κ°€
579
- available_models = ["gpt-4.1-mini", "gpt-4o", "gpt-4-turbo"]
580
- sb.selectbox("AI Model", available_models, key="ai_model")
581
 
582
  sb.subheader("Blog Style Settings")
583
  sb.selectbox(
@@ -595,6 +593,7 @@ def ginigen_app():
595
  )
596
 
597
  sb.slider("Blog Length (word count)", 800, 3000, key="word_count")
 
598
 
599
  # Example topics
600
  sb.subheader("Example Topics")
@@ -734,7 +733,8 @@ def process_input(prompt: str, uploaded_files):
734
 
735
  with st.chat_message("assistant"):
736
  placeholder = st.empty()
737
- answer = ""
 
738
 
739
  use_web_search = st.session_state.web_search_enabled
740
  has_uploaded_files = bool(uploaded_files) and len(uploaded_files) > 0
@@ -742,7 +742,7 @@ def process_input(prompt: str, uploaded_files):
742
  try:
743
  # μƒνƒœ ν‘œμ‹œλ₯Ό μœ„ν•œ μƒνƒœ μ»΄ν¬λ„ŒνŠΈ
744
  status = st.status("Preparing to generate blog...")
745
- status.update(label="Initializing OpenAI client...")
746
 
747
  client = get_openai_client()
748
 
@@ -764,7 +764,7 @@ def process_input(prompt: str, uploaded_files):
764
  file_content = process_uploaded_files(uploaded_files)
765
 
766
  # Build system prompt
767
- status.update(label="Preparing system prompt...")
768
  sys_prompt = get_system_prompt(
769
  template=st.session_state.blog_template,
770
  tone=st.session_state.blog_tone,
@@ -774,7 +774,7 @@ def process_input(prompt: str, uploaded_files):
774
  )
775
 
776
  # OpenAI API 호좜 μ€€λΉ„
777
- status.update(label="Generating blog content...")
778
 
779
  # λ©”μ‹œμ§€ ꡬ성
780
  api_messages = [
@@ -794,98 +794,47 @@ def process_input(prompt: str, uploaded_files):
794
  # μ‚¬μš©μž λ©”μ‹œμ§€ μΆ”κ°€
795
  api_messages.append({"role": "user", "content": user_content})
796
 
797
- # OpenAI API 호좜 - μž¬μ‹œλ„ 둜직 μΆ”κ°€
798
- max_retries = 3
799
- retry_delay = 2 # 초 λ‹¨μœ„
800
- answer = None
801
- used_model = st.session_state.ai_model
802
-
803
- for retry in range(max_retries):
804
- try:
805
- status.update(label=f"Generating blog with {used_model} (attempt {retry+1}/{max_retries})...")
806
-
807
- # νƒ€μž„μ•„μ›ƒ 인자 없이 API 호좜
808
- response = client.chat.completions.create(
809
- model=used_model,
810
- messages=api_messages,
811
- temperature=1,
812
- max_tokens=MAX_TOKENS,
813
- top_p=1
814
- )
815
-
816
- # 응닡 μΆ”μΆœ
817
- answer = response.choices[0].message.content
818
- status.update(label=f"Blog generated successfully with {used_model}!", state="complete")
819
- break # μ„±κ³΅ν•˜λ©΄ 반볡문 μ’…λ£Œ
820
-
821
- except Exception as api_error:
822
- error_message = str(api_error)
823
- logging.error(f"OpenAI API error (attempt {retry+1}/{max_retries}): {error_message}")
824
-
825
- if retry < max_retries - 1:
826
- wait_time = retry_delay * (retry + 1) # 점점 κΈΈμ–΄μ§€λŠ” λŒ€κΈ° μ‹œκ°„
827
- status.update(label=f"API Error: {error_message}. Retrying in {wait_time}s... ({retry+1}/{max_retries})")
828
- time.sleep(wait_time)
829
- else:
830
- # λ§ˆμ§€λ§‰ μ‹œλ„μ—μ„œλ„ μ‹€νŒ¨ν•˜λ©΄ λŒ€μ²΄ λͺ¨λΈ μ‹œλ„
831
- fallback_models = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k"]
832
- if used_model in fallback_models:
833
- fallback_models.remove(used_model)
834
-
835
- # λŒ€μ²΄ λͺ¨λΈ μ‹œλ„
836
- if fallback_models:
837
- status.update(label="Primary model failed. Trying fallback models...")
838
- for fb_model in fallback_models:
839
- try:
840
- status.update(label=f"Trying with fallback model: {fb_model}...")
841
- response = client.chat.completions.create(
842
- model=fb_model,
843
- messages=api_messages,
844
- temperature=1,
845
- max_tokens=min(MAX_TOKENS, 4096 if "16k" not in fb_model else 16000),
846
- top_p=1
847
- )
848
- answer = response.choices[0].message.content
849
- used_model = fb_model
850
- status.update(label=f"Blog generated with fallback model: {used_model}", state="complete")
851
- break
852
- except Exception as fb_error:
853
- logging.error(f"Fallback model {fb_model} also failed: {str(fb_error)}")
854
-
855
- # λŒ€μ²΄ λͺ¨λΈ 쀑 ν•˜λ‚˜κ°€ μ„±κ³΅ν–ˆλŠ”μ§€ 확인
856
- if answer:
857
- break
858
-
859
- # λͺ¨λ“  λͺ¨λΈμ΄ μ‹€νŒ¨ν•œ 경우
860
- if not answer:
861
- status.update(label="All models failed to generate content", state="error")
862
- user_message = "OpenAI API μ—°κ²° λ¬Έμ œκ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€. 인터넷 연결을 ν™•μΈν•˜κ±°λ‚˜ λ‚˜μ€‘μ— λ‹€μ‹œ μ‹œλ„ν•΄ μ£Όμ„Έμš”."
863
- if "rate limit" in error_message.lower():
864
- user_message = "OpenAI API 속도 μ œν•œμ— λ„λ‹¬ν–ˆμŠ΅λ‹ˆλ‹€. μž μ‹œ ν›„ λ‹€μ‹œ μ‹œλ„ν•΄ μ£Όμ„Έμš”."
865
- elif "invalid api key" in error_message.lower():
866
- user_message = "μœ νš¨ν•˜μ§€ μ•Šμ€ API ν‚€μž…λ‹ˆλ‹€. API ν‚€λ₯Ό 확인해 μ£Όμ„Έμš”."
867
-
868
- raise Exception(user_message)
869
-
870
- # κ²°κ³Όκ°€ μƒμ„±λ˜μ§€ μ•Šμ€ 경우 였λ₯˜ λ°œμƒ
871
- if not answer:
872
- raise Exception("λΈ”λ‘œκ·Έ μ½˜ν…μΈ λ₯Ό μƒμ„±ν•˜μ§€ λͺ»ν–ˆμŠ΅λ‹ˆλ‹€.")
873
 
874
- # κ²°κ³Ό ν‘œμ‹œ
875
- placeholder.markdown(answer)
876
-
877
  # 이미지 생성
878
  answer_entry_saved = False
879
- if st.session_state.generate_image and answer:
880
  with st.spinner("Generating image..."):
881
  try:
882
- ip = extract_image_prompt(answer, prompt)
883
  img, cap = generate_image(ip)
884
  if img:
885
  st.image(img, caption=cap)
886
  st.session_state.messages.append({
887
  "role": "assistant",
888
- "content": answer,
889
  "image": img,
890
  "image_caption": cap
891
  })
@@ -895,22 +844,22 @@ def process_input(prompt: str, uploaded_files):
895
  st.warning("이미지 생성에 μ‹€νŒ¨ν–ˆμŠ΅λ‹ˆλ‹€. λΈ”λ‘œκ·Έ μ½˜ν…μΈ λ§Œ μ €μž₯λ©λ‹ˆλ‹€.")
896
 
897
  # Save the answer if not saved above
898
- if not answer_entry_saved and answer:
899
- st.session_state.messages.append({"role": "assistant", "content": answer})
900
 
901
  # Download buttons
902
- if answer:
903
  st.subheader("Download This Blog")
904
  c1, c2 = st.columns(2)
905
  c1.download_button(
906
  "Markdown",
907
- data=answer,
908
  file_name=f"{prompt[:30]}.md",
909
  mime="text/markdown"
910
  )
911
  c2.download_button(
912
  "HTML",
913
- data=md_to_html(answer, prompt[:30]),
914
  file_name=f"{prompt[:30]}.html",
915
  mime="text/html"
916
  )
@@ -932,7 +881,6 @@ def process_input(prompt: str, uploaded_files):
932
  st.session_state.messages.append({"role": "assistant", "content": ans})
933
 
934
 
935
-
936
  # ──────────────────────────────── main ────────────────────────────────────
937
  def main():
938
  ginigen_app()
 
555
 
556
  # Set default session state
557
  if "ai_model" not in st.session_state:
558
+ st.session_state.ai_model = "gpt-4.1-mini" # κ³ μ • λͺ¨λΈ μ„€μ •
559
  if "messages" not in st.session_state:
560
  st.session_state.messages = []
561
  if "auto_save" not in st.session_state:
 
575
  sb = st.sidebar
576
  sb.title("Blog Settings")
577
 
578
+ # λͺ¨λΈ 선택 제거 (κ³ μ • λͺ¨λΈ μ‚¬μš©)
 
 
579
 
580
  sb.subheader("Blog Style Settings")
581
  sb.selectbox(
 
593
  )
594
 
595
  sb.slider("Blog Length (word count)", 800, 3000, key="word_count")
596
+
597
 
598
  # Example topics
599
  sb.subheader("Example Topics")
 
733
 
734
  with st.chat_message("assistant"):
735
  placeholder = st.empty()
736
+ message_placeholder = st.empty()
737
+ full_response = ""
738
 
739
  use_web_search = st.session_state.web_search_enabled
740
  has_uploaded_files = bool(uploaded_files) and len(uploaded_files) > 0
 
742
  try:
743
  # μƒνƒœ ν‘œμ‹œλ₯Ό μœ„ν•œ μƒνƒœ μ»΄ν¬λ„ŒνŠΈ
744
  status = st.status("Preparing to generate blog...")
745
+ status.update(label="Initializing client...")
746
 
747
  client = get_openai_client()
748
 
 
764
  file_content = process_uploaded_files(uploaded_files)
765
 
766
  # Build system prompt
767
+ status.update(label="Preparing blog draft...")
768
  sys_prompt = get_system_prompt(
769
  template=st.session_state.blog_template,
770
  tone=st.session_state.blog_tone,
 
774
  )
775
 
776
  # OpenAI API 호좜 μ€€λΉ„
777
+ status.update(label="Writing blog content...")
778
 
779
  # λ©”μ‹œμ§€ ꡬ성
780
  api_messages = [
 
794
  # μ‚¬μš©μž λ©”μ‹œμ§€ μΆ”κ°€
795
  api_messages.append({"role": "user", "content": user_content})
796
 
797
+ # OpenAI API 슀트리밍 호좜 - κ³ μ • λͺ¨λΈ "gpt-4.1-mini" μ‚¬μš©
798
+ try:
799
+ # 슀트리밍 λ°©μ‹μœΌλ‘œ API 호좜
800
+ stream = client.chat.completions.create(
801
+ model="gpt-4.1-mini", # κ³ μ • λͺ¨λΈ μ‚¬μš©
802
+ messages=api_messages,
803
+ temperature=1,
804
+ max_tokens=MAX_TOKENS,
805
+ top_p=1,
806
+ stream=True # 슀트리밍 ν™œμ„±ν™”
807
+ )
808
+
809
+ # 슀트리밍 응닡 처리
810
+ for chunk in stream:
811
+ if chunk.choices and len(chunk.choices) > 0 and chunk.choices[0].delta.content is not None:
812
+ content_delta = chunk.choices[0].delta.content
813
+ full_response += content_delta
814
+ message_placeholder.markdown(full_response + "β–Œ")
815
+
816
+ # μ΅œμ’… 응닡 ν‘œμ‹œ (μ»€μ„œ 제거)
817
+ message_placeholder.markdown(full_response)
818
+ status.update(label="Blog completed!", state="complete")
819
+
820
+ except Exception as api_error:
821
+ error_message = str(api_error)
822
+ logging.error(f"API error: {error_message}")
823
+ status.update(label=f"Error: {error_message}", state="error")
824
+ raise Exception(f"Blog generation error: {error_message}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
825
 
 
 
 
826
  # 이미지 생성
827
  answer_entry_saved = False
828
+ if st.session_state.generate_image and full_response:
829
  with st.spinner("Generating image..."):
830
  try:
831
+ ip = extract_image_prompt(full_response, prompt)
832
  img, cap = generate_image(ip)
833
  if img:
834
  st.image(img, caption=cap)
835
  st.session_state.messages.append({
836
  "role": "assistant",
837
+ "content": full_response,
838
  "image": img,
839
  "image_caption": cap
840
  })
 
844
  st.warning("이미지 생성에 μ‹€νŒ¨ν–ˆμŠ΅λ‹ˆλ‹€. λΈ”λ‘œκ·Έ μ½˜ν…μΈ λ§Œ μ €μž₯λ©λ‹ˆλ‹€.")
845
 
846
  # Save the answer if not saved above
847
+ if not answer_entry_saved and full_response:
848
+ st.session_state.messages.append({"role": "assistant", "content": full_response})
849
 
850
  # Download buttons
851
+ if full_response:
852
  st.subheader("Download This Blog")
853
  c1, c2 = st.columns(2)
854
  c1.download_button(
855
  "Markdown",
856
+ data=full_response,
857
  file_name=f"{prompt[:30]}.md",
858
  mime="text/markdown"
859
  )
860
  c2.download_button(
861
  "HTML",
862
+ data=md_to_html(full_response, prompt[:30]),
863
  file_name=f"{prompt[:30]}.html",
864
  mime="text/html"
865
  )
 
881
  st.session_state.messages.append({"role": "assistant", "content": ans})
882
 
883
 
 
884
  # ──────────────────────────────── main ────────────────────────────────────
885
  def main():
886
  ginigen_app()