ginipick commited on
Commit
e41b021
Β·
verified Β·
1 Parent(s): 6d12f29

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -118
app.py CHANGED
@@ -3,16 +3,13 @@ import os, json, re, logging, requests, markdown, time, io
3
  from datetime import datetime
4
 
5
  import streamlit as st
6
- # >>> Anthropic λΆ€λΆ„ μ‚­μ œ
7
- # import anthropic
8
- from openai import OpenAI # μ»€μŠ€ν…€ 래퍼(λ˜λŠ” 별도 라이브러리)라고 κ°€μ •
9
 
10
  from gradio_client import Client
11
  import pandas as pd
12
  import PyPDF2 # For handling PDF files
13
 
14
  # ──────────────────────────────── Environment Variables / Constants ─────────────────────────
15
- # κΈ°μ‘΄ ANTHROPIC_KEY -> OPENAI_API_KEY 둜 λ³€κ²½
16
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
17
  BRAVE_KEY = os.getenv("SERPHOUSE_API_KEY", "") # Keep this name
18
  BRAVE_ENDPOINT = "https://api.search.brave.com/res/v1/web/search"
@@ -50,15 +47,10 @@ logging.basicConfig(level=logging.INFO,
50
  # ──────────────────────────────── OpenAI Client ──────────────────────────
51
  @st.cache_resource
52
  def get_openai_client():
53
- """
54
- μ»€μŠ€ν…€ OpenAI 객체λ₯Ό μƒμ„±ν•œλ‹€κ³  κ°€μ •ν•©λ‹ˆλ‹€.
55
- μ‹€μ œλ‘œλŠ” openai.api_key = OPENAI_API_KEY 둜만 μ„€μ •ν•˜λŠ” κ²½μš°κ°€ λ§ŽμŠ΅λ‹ˆλ‹€.
56
- """
57
  if not OPENAI_API_KEY:
58
  raise RuntimeError("⚠️ OPENAI_API_KEY ν™˜κ²½ λ³€μˆ˜κ°€ μ„€μ •λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.")
59
- # μ—¬κΈ°μ„œλŠ” μ˜ˆμ‹œλ‘œμ„œ λ‹€μŒκ³Ό 같이 μ΄ˆκΈ°ν™”:
60
- client = OpenAI(api_key=OPENAI_API_KEY)
61
- return client
62
 
63
  # ──────────────────────────────── Blog Creation System Prompt ─────────────
64
  def get_system_prompt(template="ginigen", tone="professional", word_count=1750, include_search_results=False, include_uploaded_files=False) -> str:
@@ -521,56 +513,23 @@ def generate_image(prompt, w=768, h=768, g=3.5, steps=30, seed=3):
521
 
522
  def extract_image_prompt(blog_text: str, topic: str):
523
  """
524
- Analyze the blog content (blog_text) to generate a one-line English image prompt
525
- related to the topic.
526
  """
527
- # κΈ°μ‘΄ anthropic ν΄λΌμ΄μ–ΈνŠΈ μ‚¬μš© -> OpenAI 호좜둜 λŒ€μ²΄
528
  client = get_openai_client()
529
-
530
- system_msg = (
531
- f"Generate a single-line English image prompt from the following text:\n"
532
- f"Topic: {topic}\n\n"
533
- f"---\n"
534
- f"{blog_text}\n\n"
535
- f"---\n"
536
- f"Return only the prompt text, nothing else."
537
- )
538
-
539
- # μ‹€μ œλ‘œλŠ” openai APIλ₯Ό μ–΄λ–»κ²Œ ν˜ΈμΆœν•˜λŠλƒμ— 따라 λ‹¬λΌμ§‘λ‹ˆλ‹€.
540
- # μ—¬κΈ°μ„œλŠ” μ˜ˆμ‹œλ‘œ client.responses.create()λ₯Ό μ‚¬μš©ν•œλ‹€κ³  κ°€μ •
541
  try:
542
- response = client.responses.create(
543
- model="gpt-4.1-mini",
544
- input=[
545
- {
546
- "role": "system",
547
- "content": [
548
- {
549
- "type": "input_text",
550
- "text": system_msg
551
- }
552
- ]
553
- }
554
  ],
555
- text={"format": {"type": "text"}},
556
  temperature=1,
557
- max_output_tokens=80,
558
  top_p=1
559
  )
560
- # μ˜ˆμ‹œ: response κ°μ²΄μ—μ„œ 첫 번째 좜λ ₯만 κ°€μ Έμ˜¨λ‹€κ³  κ°€μ •
561
- # response κ΅¬μ‘°λŠ” μ‚¬μš© 쀑인 λž˜νΌμ— 따라 λ‹€λ¦…λ‹ˆλ‹€.
562
- content = ""
563
- if "responses" in dir(response):
564
- # 가상 μ˜ˆμ‹œ: response.responses[0].content[0].text
565
- # λ˜λŠ” response["choices"][0]["text"] ν˜•νƒœμΌ μˆ˜λ„ 있음
566
- first_resp = response.responses[0] # κ°€μ •
567
- # λ³Έ μ˜ˆμ‹œλŠ” "content" ν•„λ“œκ°€ list둜 있고, κ·Έ 쀑 [0]["text"]에 값이 μžˆλ‹€κ³  κ°€μ •
568
- content = first_resp.content[0]["text"].strip()
569
- else:
570
- content = "A professional photo related to the topic, high quality"
571
-
572
- return content
573
-
574
  except Exception as e:
575
  logging.error(f"OpenAI image prompt generation error: {e}")
576
  return f"A professional photo related to {topic}, high quality"
@@ -590,8 +549,7 @@ def ginigen_app():
590
 
591
  # Set default session state
592
  if "ai_model" not in st.session_state:
593
- # κΈ°μ‘΄ anthropic λͺ¨λΈλͺ… λŒ€μ‹ , gpt-4.1-mini
594
- st.session_state.ai_model = "gpt-4.1-mini"
595
  if "messages" not in st.session_state:
596
  st.session_state.messages = []
597
  if "auto_save" not in st.session_state:
@@ -611,6 +569,10 @@ def ginigen_app():
611
  sb = st.sidebar
612
  sb.title("Blog Settings")
613
 
 
 
 
 
614
  sb.subheader("Blog Style Settings")
615
  sb.selectbox(
616
  "Blog Template",
@@ -772,25 +734,31 @@ def process_input(prompt: str, uploaded_files):
772
  has_uploaded_files = bool(uploaded_files) and len(uploaded_files) > 0
773
 
774
  try:
 
 
 
 
775
  client = get_openai_client()
776
 
777
  # Prepare conversation messages
778
- messages = [{"role": m["role"], "content": m["content"]} for m in st.session_state.messages]
779
 
780
  # Web search
 
781
  if use_web_search:
782
- with st.spinner("Performing web search..."):
783
- sr = do_web_search(keywords(prompt, top=5))
784
- if sr:
785
- messages.append({"role": "user", "content": sr})
786
 
787
  # Process uploaded files β†’ content
788
  file_content = None
789
  if has_uploaded_files:
790
- with st.spinner("Analyzing uploaded files..."):
 
791
  file_content = process_uploaded_files(uploaded_files)
792
 
793
  # Build system prompt
 
794
  sys_prompt = get_system_prompt(
795
  template=st.session_state.blog_template,
796
  tone=st.session_state.blog_tone,
@@ -799,68 +767,48 @@ def process_input(prompt: str, uploaded_files):
799
  include_uploaded_files=has_uploaded_files
800
  )
801
 
802
- # 파일 λ‚΄μš©μ΄ μžˆλ‹€λ©΄ system prompt에 μΆ”κ°€
 
 
 
 
 
 
 
 
 
 
 
 
 
 
803
  if file_content:
804
- sys_prompt += (
805
- "\n\n"
806
- "Below is the content of the uploaded file(s). Please make sure to integrate it thoroughly in the blog:\n\n"
807
- f"{file_content}\n\n"
808
- "Ensure the file content is accurately reflected in the blog.\n"
809
- )
810
 
811
- # μ‚¬μš©μžκ°€ μž…λ ₯ν•œ prompt와 파일 μ°Έκ³  λ©”μ‹œμ§€
812
- if has_uploaded_files:
813
- extra_user_msg = (
814
- f"{prompt}\n\n"
815
- "Additional note: Please make sure to reference the uploaded file content in the blog. "
816
- "Use and analyze any data, statistics, or text included in the file(s)."
817
- )
818
- messages.append({"role": "user", "content": extra_user_msg})
819
 
820
- # 이제 OpenAI client둜 μš”μ²­μ„ λ³΄λƒ…λ‹ˆλ‹€.
821
- with st.spinner("Generating blog content..."):
822
- response = client.responses.create(
823
  model=st.session_state.ai_model,
824
- # OpenAI λž˜νΌμ— λ§žμΆ°μ„œ λŒ€λž΅μ μœΌλ‘œ μž¬κ΅¬μ„±ν•œ μ˜ˆμž…λ‹ˆλ‹€.
825
- input=[
826
- {
827
- "role": "system",
828
- "content": [
829
- {
830
- "type": "input_text",
831
- "text": sys_prompt
832
- }
833
- ]
834
- },
835
- {
836
- "role": "user",
837
- "content": [
838
- {
839
- "type": "input_text",
840
- "text": prompt
841
- }
842
- ]
843
- }
844
- # ν•„μš”ν•˜λ©΄ messages 전체λ₯Ό 넣을 μˆ˜λ„ μžˆμ§€λ§Œ,
845
- # μ—¬κΈ°μ„œλŠ” prompt와 system prompt만 λ„£λŠ” μ˜ˆμ‹œ
846
- ],
847
- text={"format": {"type": "text"}},
848
  temperature=1,
849
- max_output_tokens=MAX_TOKENS,
850
- top_p=1,
851
- store=True
852
  )
853
-
854
- # μ˜ˆμ‹œμƒ response κ°μ²΄μ—μ„œ μ΅œμ’… ν…μŠ€νŠΈλ₯Ό κΊΌλ‚΄λŠ” 둜직
855
- # μ‹€μ œ κ΅¬μ‘°λŠ” 라이브러리 κ΅¬ν˜„μ— 따라 λ‹€λ¦…λ‹ˆλ‹€
856
- if "responses" in dir(response):
857
- # κ°€μ •: response.responses[0].content[0].text ν˜•νƒœ
858
- content_blocks = response.responses[0].content
859
- answer = "\n".join(block["text"] for block in content_blocks if block["type"] == "output_text")
860
- else:
861
- answer = "Error: Unable to parse the OpenAI response."
 
862
 
863
- # 슀트리밍이 μ•„λ‹ˆλ―€λ‘œ ν•œλ²ˆμ— answer νšλ“ ν›„ 좜λ ₯
864
  placeholder.markdown(answer)
865
 
866
  # 이미지 생성
@@ -911,6 +859,7 @@ def process_input(prompt: str, uploaded_files):
911
  except Exception as e:
912
  error_message = str(e)
913
  placeholder.error(f"An error occurred: {error_message}")
 
914
  ans = f"An error occurred while processing your request: {error_message}"
915
  st.session_state.messages.append({"role": "assistant", "content": ans})
916
 
@@ -919,4 +868,4 @@ def main():
919
  ginigen_app()
920
 
921
  if __name__ == "__main__":
922
- main()
 
3
  from datetime import datetime
4
 
5
  import streamlit as st
6
+ from openai import OpenAI # OpenAI 라이브러리
 
 
7
 
8
  from gradio_client import Client
9
  import pandas as pd
10
  import PyPDF2 # For handling PDF files
11
 
12
  # ──────────────────────────────── Environment Variables / Constants ─────────────────────────
 
13
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
14
  BRAVE_KEY = os.getenv("SERPHOUSE_API_KEY", "") # Keep this name
15
  BRAVE_ENDPOINT = "https://api.search.brave.com/res/v1/web/search"
 
47
  # ──────────────────────────────── OpenAI Client ──────────────────────────
48
  @st.cache_resource
49
  def get_openai_client():
50
+ """Create an OpenAI client."""
 
 
 
51
  if not OPENAI_API_KEY:
52
  raise RuntimeError("⚠️ OPENAI_API_KEY ν™˜κ²½ λ³€μˆ˜κ°€ μ„€μ •λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.")
53
+ return OpenAI(api_key=OPENAI_API_KEY)
 
 
54
 
55
  # ──────────────────────────────── Blog Creation System Prompt ─────────────
56
  def get_system_prompt(template="ginigen", tone="professional", word_count=1750, include_search_results=False, include_uploaded_files=False) -> str:
 
513
 
514
  def extract_image_prompt(blog_text: str, topic: str):
515
  """
516
+ Generate a single-line English image prompt from the blog content.
 
517
  """
 
518
  client = get_openai_client()
519
+
 
 
 
 
 
 
 
 
 
 
 
520
  try:
521
+ response = client.chat.completions.create(
522
+ model="gpt-3.5-turbo", # 일반적으둜 μ‚¬μš© κ°€λŠ₯ν•œ λͺ¨λΈλ‘œ μ„€μ •
523
+ messages=[
524
+ {"role": "system", "content": "Generate a single-line English image prompt from the following text. Return only the prompt text, nothing else."},
525
+ {"role": "user", "content": f"Topic: {topic}\n\n---\n{blog_text}\n\n---"}
 
 
 
 
 
 
 
526
  ],
 
527
  temperature=1,
528
+ max_tokens=80,
529
  top_p=1
530
  )
531
+
532
+ return response.choices[0].message.content.strip()
 
 
 
 
 
 
 
 
 
 
 
 
533
  except Exception as e:
534
  logging.error(f"OpenAI image prompt generation error: {e}")
535
  return f"A professional photo related to {topic}, high quality"
 
549
 
550
  # Set default session state
551
  if "ai_model" not in st.session_state:
552
+ st.session_state.ai_model = "gpt-3.5-turbo" # κΈ°λ³Έ λͺ¨λΈλ‘œ μ„€μ •
 
553
  if "messages" not in st.session_state:
554
  st.session_state.messages = []
555
  if "auto_save" not in st.session_state:
 
569
  sb = st.sidebar
570
  sb.title("Blog Settings")
571
 
572
+ # λͺ¨λΈ 선택 μΆ”κ°€
573
+ available_models = ["gpt-3.5-turbo", "gpt-4o", "gpt-4-turbo"]
574
+ sb.selectbox("AI Model", available_models, key="ai_model")
575
+
576
  sb.subheader("Blog Style Settings")
577
  sb.selectbox(
578
  "Blog Template",
 
734
  has_uploaded_files = bool(uploaded_files) and len(uploaded_files) > 0
735
 
736
  try:
737
+ # μƒνƒœ ν‘œμ‹œλ₯Ό μœ„ν•œ μƒνƒœ μ»΄ν¬λ„ŒνŠΈ
738
+ status = st.status("Preparing to generate blog...")
739
+ status.update(label="Initializing OpenAI client...")
740
+
741
  client = get_openai_client()
742
 
743
  # Prepare conversation messages
744
+ messages = []
745
 
746
  # Web search
747
+ search_content = None
748
  if use_web_search:
749
+ status.update(label="Performing web search...")
750
+ with st.spinner("Searching the web..."):
751
+ search_content = do_web_search(keywords(prompt, top=5))
 
752
 
753
  # Process uploaded files β†’ content
754
  file_content = None
755
  if has_uploaded_files:
756
+ status.update(label="Processing uploaded files...")
757
+ with st.spinner("Analyzing files..."):
758
  file_content = process_uploaded_files(uploaded_files)
759
 
760
  # Build system prompt
761
+ status.update(label="Preparing system prompt...")
762
  sys_prompt = get_system_prompt(
763
  template=st.session_state.blog_template,
764
  tone=st.session_state.blog_tone,
 
767
  include_uploaded_files=has_uploaded_files
768
  )
769
 
770
+ # OpenAI API 호좜 μ€€λΉ„
771
+ status.update(label="Generating blog content...")
772
+
773
+ # λ©”μ‹œμ§€ ꡬ성
774
+ api_messages = [
775
+ {"role": "system", "content": sys_prompt}
776
+ ]
777
+
778
+ user_content = prompt
779
+
780
+ # 검색 κ²°κ³Όκ°€ 있으면 μ‚¬μš©μž ν”„λ‘¬ν”„νŠΈμ— μΆ”κ°€
781
+ if search_content:
782
+ user_content += "\n\n" + search_content
783
+
784
+ # 파일 λ‚΄μš©μ΄ 있으면 μ‚¬μš©μž ν”„λ‘¬ν”„νŠΈμ— μΆ”κ°€
785
  if file_content:
786
+ user_content += "\n\n" + file_content
 
 
 
 
 
787
 
788
+ # μ‚¬μš©μž λ©”μ‹œμ§€ μΆ”κ°€
789
+ api_messages.append({"role": "user", "content": user_content})
 
 
 
 
 
 
790
 
791
+ # OpenAI API 호좜
792
+ try:
793
+ response = client.chat.completions.create(
794
  model=st.session_state.ai_model,
795
+ messages=api_messages,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
796
  temperature=1,
797
+ max_tokens=MAX_TOKENS,
798
+ top_p=1
 
799
  )
800
+
801
+ # 응닡 μΆ”μΆœ
802
+ answer = response.choices[0].message.content
803
+ status.update(label="Blog generated successfully!", state="complete")
804
+
805
+ except Exception as api_error:
806
+ error_message = str(api_error)
807
+ logging.error(f"OpenAI API error: {error_message}")
808
+ status.update(label=f"API Error: {error_message}", state="error")
809
+ raise Exception(f"OpenAI API error: {error_message}")
810
 
811
+ # κ²°κ³Ό ν‘œμ‹œ
812
  placeholder.markdown(answer)
813
 
814
  # 이미지 생성
 
859
  except Exception as e:
860
  error_message = str(e)
861
  placeholder.error(f"An error occurred: {error_message}")
862
+ logging.error(f"Process input error: {error_message}")
863
  ans = f"An error occurred while processing your request: {error_message}"
864
  st.session_state.messages.append({"role": "assistant", "content": ans})
865
 
 
868
  ginigen_app()
869
 
870
  if __name__ == "__main__":
871
+ main()