Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -808,8 +808,9 @@ def process_example(topic):
|
|
808 |
"""Handle example prompts."""
|
809 |
process_input(topic, [])
|
810 |
|
|
|
811 |
def process_input(prompt: str, uploaded_files):
|
812 |
-
#
|
813 |
if not any(m["role"] == "user" and m["content"] == prompt
|
814 |
for m in st.session_state.messages):
|
815 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
@@ -818,9 +819,9 @@ def process_input(prompt: str, uploaded_files):
|
|
818 |
st.markdown(prompt)
|
819 |
|
820 |
with st.chat_message("assistant"):
|
821 |
-
placeholder
|
822 |
-
message_placeholder
|
823 |
-
full_response
|
824 |
|
825 |
use_web_search = st.session_state.web_search_enabled
|
826 |
has_uploaded = bool(uploaded_files)
|
@@ -831,13 +832,13 @@ def process_input(prompt: str, uploaded_files):
|
|
831 |
|
832 |
client = get_openai_client()
|
833 |
|
834 |
-
#
|
835 |
selected_cat = st.session_state.get("category_focus", "(None)")
|
836 |
if selected_cat == "(None)":
|
837 |
selected_cat = None
|
838 |
sys_prompt = get_idea_system_prompt(selected_category=selected_cat)
|
839 |
|
840 |
-
#
|
841 |
search_content = None
|
842 |
if use_web_search:
|
843 |
status.update(label="Searching the webβ¦")
|
@@ -850,27 +851,38 @@ def process_input(prompt: str, uploaded_files):
|
|
850 |
with st.spinner("Processing filesβ¦"):
|
851 |
file_content = process_uploaded_files(uploaded_files)
|
852 |
|
853 |
-
#
|
854 |
user_content = prompt
|
855 |
if search_content:
|
856 |
user_content += "\n\n" + search_content
|
857 |
if file_content:
|
858 |
user_content += "\n\n" + file_content
|
859 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
860 |
api_messages = [
|
861 |
{"role": "system", "content": sys_prompt},
|
|
|
|
|
862 |
{"role": "user", "content": user_content},
|
863 |
]
|
|
|
864 |
|
865 |
-
#
|
866 |
status.update(label="Generating ideasβ¦")
|
867 |
stream = client.chat.completions.create(
|
868 |
model="gpt-4.1-mini",
|
869 |
messages=api_messages,
|
870 |
-
temperature=1,
|
871 |
-
|
872 |
-
top_p=1,
|
873 |
-
stream=True
|
874 |
)
|
875 |
for chunk in stream:
|
876 |
if chunk.choices and chunk.choices[0].delta.content:
|
@@ -879,69 +891,57 @@ def process_input(prompt: str, uploaded_files):
|
|
879 |
message_placeholder.markdown(full_response)
|
880 |
status.update(label="Ideas created!", state="complete")
|
881 |
|
882 |
-
#
|
883 |
-
# ββ β€ μ΄λ―Έμ§ μμ± (κΈ°μ‘΄ λΈλ‘ μ λΆ κ΅μ²΄) βββββββββββββββββββββββββββββ
|
884 |
if st.session_state.generate_image and full_response:
|
885 |
-
|
886 |
-
# 1οΈβ£ βΆ CCM μ€νμΌ: "### μ΄λ―Έμ§ ν둬ννΈ" ν€λ© μλ μ€
|
887 |
ccm_match = re.search(
|
888 |
r"###\s*μ΄λ―Έμ§\s*ν둬ννΈ\s*\n+([^\n]+)",
|
889 |
full_response, flags=re.IGNORECASE)
|
890 |
-
|
891 |
-
# 2οΈβ£ β· μ΄μ μ€νμΌ: ν
μ΄λΈ or "Image Prompt:"
|
892 |
legacy_match = None
|
893 |
if not ccm_match:
|
894 |
legacy_match = re.search(
|
895 |
r"\|\s*(?:\*\*)?Image\s+Prompt(?:\*\*)?\s*\|\s*([^|\n]+)",
|
896 |
-
full_response, flags=re.IGNORECASE)
|
897 |
-
|
898 |
-
|
899 |
-
r"(?i)Image\s+Prompt\s*[:\-]\s*([^\n]+)",
|
900 |
-
full_response)
|
901 |
-
|
902 |
-
# 3οΈβ£ μ΅μ’
ν둬ννΈ μΆμΆ
|
903 |
match = ccm_match or legacy_match
|
904 |
if match:
|
905 |
raw_prompt = re.sub(r"[\r\n`\"'\\]", " ",
|
906 |
match.group(1)).strip()
|
907 |
-
|
908 |
with st.spinner("μμ΄λμ΄ μ΄λ―Έμ§ μμ± μ€β¦"):
|
909 |
img, cap = generate_image(raw_prompt)
|
910 |
-
|
911 |
if img:
|
912 |
st.image(img, caption=f"μμ΄λμ΄ μκ°ν β {cap}")
|
913 |
-
# λν κΈ°λ‘μ μ΄λ―Έμ§λ μ μ₯
|
914 |
st.session_state.messages.append({
|
915 |
"role": "assistant",
|
916 |
"content": "",
|
917 |
"image": img,
|
918 |
"image_caption": f"μμ΄λμ΄ μκ°ν β {cap}"
|
919 |
})
|
920 |
-
|
921 |
|
|
|
|
|
|
|
|
|
|
|
922 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
923 |
|
924 |
-
# ββ β₯ κ²°κ³Ό μ μ₯
|
925 |
-
st.session_state.messages.append(
|
926 |
-
{"role": "assistant", "content": full_response})
|
927 |
-
|
928 |
-
# ββ β¦ λ€μ΄λ‘λ λ²νΌ
|
929 |
-
st.subheader("Download This Output")
|
930 |
-
c1, c2 = st.columns(2)
|
931 |
-
c1.download_button("Markdown", full_response,
|
932 |
-
file_name=f"{prompt[:30]}.md",
|
933 |
-
mime="text/markdown")
|
934 |
-
c2.download_button("HTML",
|
935 |
-
md_to_html(full_response, prompt[:30]),
|
936 |
-
file_name=f"{prompt[:30]}.html",
|
937 |
-
mime="text/html")
|
938 |
-
|
939 |
-
# ββ β§ μλ JSON μ μ₯
|
940 |
-
if st.session_state.auto_save:
|
941 |
-
fn = f"chat_history_auto_{datetime.now():%Y%m%d_%H%M%S}.json"
|
942 |
-
with open(fn, "w", encoding="utf-8") as fp:
|
943 |
-
json.dump(st.session_state.messages, fp,
|
944 |
-
ensure_ascii=False, indent=2)
|
945 |
|
946 |
except Exception as e:
|
947 |
err = str(e)
|
|
|
808 |
"""Handle example prompts."""
|
809 |
process_input(topic, [])
|
810 |
|
811 |
+
# ββ μμ λ process_input μ 체 ββββββββββββββββββββββββββββββββββββββββββββ
|
812 |
def process_input(prompt: str, uploaded_files):
|
813 |
+
# μ¬μ©μ λ©μμ§ μ μ₯
|
814 |
if not any(m["role"] == "user" and m["content"] == prompt
|
815 |
for m in st.session_state.messages):
|
816 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
|
|
819 |
st.markdown(prompt)
|
820 |
|
821 |
with st.chat_message("assistant"):
|
822 |
+
placeholder = st.empty()
|
823 |
+
message_placeholder = st.empty()
|
824 |
+
full_response = ""
|
825 |
|
826 |
use_web_search = st.session_state.web_search_enabled
|
827 |
has_uploaded = bool(uploaded_files)
|
|
|
832 |
|
833 |
client = get_openai_client()
|
834 |
|
835 |
+
# β μμ€ν
ν둬ννΈ
|
836 |
selected_cat = st.session_state.get("category_focus", "(None)")
|
837 |
if selected_cat == "(None)":
|
838 |
selected_cat = None
|
839 |
sys_prompt = get_idea_system_prompt(selected_category=selected_cat)
|
840 |
|
841 |
+
# β‘ (μ ν) μΉ κ²μ & νμΌ
|
842 |
search_content = None
|
843 |
if use_web_search:
|
844 |
status.update(label="Searching the webβ¦")
|
|
|
851 |
with st.spinner("Processing filesβ¦"):
|
852 |
file_content = process_uploaded_files(uploaded_files)
|
853 |
|
854 |
+
# β’ μ μ λ©μμ§ κ΅¬μ±
|
855 |
user_content = prompt
|
856 |
if search_content:
|
857 |
user_content += "\n\n" + search_content
|
858 |
if file_content:
|
859 |
user_content += "\n\n" + file_content
|
860 |
|
861 |
+
# ---- μΉ΄ν
κ³ λ¦¬/νμνλͺ© 컨ν
μ€νΈ μΆκ° ---------------------------
|
862 |
+
def category_context(sel):
|
863 |
+
if sel: # νΉμ μΉ΄ν
κ³ λ¦¬ μ ν μ
|
864 |
+
return json.dumps(
|
865 |
+
{sel: physical_transformation_categories[sel]},
|
866 |
+
ensure_ascii=False)
|
867 |
+
# (None) β ν€ λͺ©λ‘λ§ μ λ¬
|
868 |
+
return "ALL_CATEGORIES: " + ", ".join(
|
869 |
+
physical_transformation_categories.keys())
|
870 |
+
|
871 |
api_messages = [
|
872 |
{"role": "system", "content": sys_prompt},
|
873 |
+
{"role": "system", "name": "category_db",
|
874 |
+
"content": category_context(selected_cat)},
|
875 |
{"role": "user", "content": user_content},
|
876 |
]
|
877 |
+
# --------------------------------------------------------------
|
878 |
|
879 |
+
# β£ OpenAI μ€νΈλ¦¬λ° νΈμΆ
|
880 |
status.update(label="Generating ideasβ¦")
|
881 |
stream = client.chat.completions.create(
|
882 |
model="gpt-4.1-mini",
|
883 |
messages=api_messages,
|
884 |
+
temperature=1, max_tokens=MAX_TOKENS,
|
885 |
+
top_p=1, stream=True
|
|
|
|
|
886 |
)
|
887 |
for chunk in stream:
|
888 |
if chunk.choices and chunk.choices[0].delta.content:
|
|
|
891 |
message_placeholder.markdown(full_response)
|
892 |
status.update(label="Ideas created!", state="complete")
|
893 |
|
894 |
+
# β€ μ΄λ―Έμ§ μμ± (CCM ν€λ© or λ κ±°μ ν¨ν΄ λͺ¨λ μ§μ)
|
|
|
895 |
if st.session_state.generate_image and full_response:
|
896 |
+
# CCM ν€λ©
|
|
|
897 |
ccm_match = re.search(
|
898 |
r"###\s*μ΄λ―Έμ§\s*ν둬ννΈ\s*\n+([^\n]+)",
|
899 |
full_response, flags=re.IGNORECASE)
|
|
|
|
|
900 |
legacy_match = None
|
901 |
if not ccm_match:
|
902 |
legacy_match = re.search(
|
903 |
r"\|\s*(?:\*\*)?Image\s+Prompt(?:\*\*)?\s*\|\s*([^|\n]+)",
|
904 |
+
full_response, flags=re.IGNORECASE) \
|
905 |
+
or re.search(r"(?i)Image\s+Prompt\s*[:\-]\s*([^\n]+)",
|
906 |
+
full_response)
|
|
|
|
|
|
|
|
|
907 |
match = ccm_match or legacy_match
|
908 |
if match:
|
909 |
raw_prompt = re.sub(r"[\r\n`\"'\\]", " ",
|
910 |
match.group(1)).strip()
|
|
|
911 |
with st.spinner("μμ΄λμ΄ μ΄λ―Έμ§ μμ± μ€β¦"):
|
912 |
img, cap = generate_image(raw_prompt)
|
|
|
913 |
if img:
|
914 |
st.image(img, caption=f"μμ΄λμ΄ μκ°ν β {cap}")
|
|
|
915 |
st.session_state.messages.append({
|
916 |
"role": "assistant",
|
917 |
"content": "",
|
918 |
"image": img,
|
919 |
"image_caption": f"μμ΄λμ΄ μκ°ν β {cap}"
|
920 |
})
|
|
|
921 |
|
922 |
+
# ββ μ κ· ν¬νΌ: κ²°κ³Ό λ€μ΄λ‘λΒ·μ μ₯Β·μ±ν
κΈ°λ‘μ μΆκ° ββββββββββββββββββββββββββ
|
923 |
+
def write_output(md_text: str, prompt: str):
|
924 |
+
"""μ΅μ’
λ§ν¬λ€μ΄μ ν λ²λ§ UIμ μ°κ²°νκ³ , νμΌ λ€μ΄Β·JSON μλμ μ₯κΉμ§ μν."""
|
925 |
+
# β λν κΈ°λ‘μ λ΅λ³ μ μ₯
|
926 |
+
st.session_state.messages.append({"role": "assistant", "content": md_text})
|
927 |
|
928 |
+
# β‘ λ€μ΄λ‘λ λ²νΌ
|
929 |
+
st.subheader("Download This Output")
|
930 |
+
col_md, col_html = st.columns(2)
|
931 |
+
col_md.download_button(
|
932 |
+
"Markdown", md_text,
|
933 |
+
file_name=f"{prompt[:30]}.md", mime="text/markdown")
|
934 |
+
col_html.download_button(
|
935 |
+
"HTML", md_to_html(md_text, prompt[:30]),
|
936 |
+
file_name=f"{prompt[:30]}.html", mime="text/html")
|
937 |
+
|
938 |
+
# β’ JSON μλ μ μ₯
|
939 |
+
if st.session_state.auto_save:
|
940 |
+
fn = f"chat_history_auto_{datetime.now():%Y%m%d_%H%M%S}.json"
|
941 |
+
with open(fn, "w", encoding="utf-8") as fp:
|
942 |
+
json.dump(st.session_state.messages, fp,
|
943 |
+
ensure_ascii=False, indent=2)
|
944 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
945 |
|
946 |
except Exception as e:
|
947 |
err = str(e)
|