S-Dreamer commited on
Commit
17e370a
·
verified ·
1 Parent(s): f6e874b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -16
app.py CHANGED
@@ -3,6 +3,7 @@ from llama_cpp import Llama
3
 
4
  st.set_page_config(page_title="Cybertron Chat Interface", layout="wide")
5
  st.title("🧠 Cybertron Chat: Generalist vs Specialist Mode")
 
6
  st.markdown("""
7
  Welcome to the Cybertron Chat Interface. Choose between:
8
 
@@ -10,10 +11,23 @@ Welcome to the Cybertron Chat Interface. Choose between:
10
  - 🛡️ **Specialist Mode** for precise, tactical cyber security and pentesting insights
11
  """)
12
 
13
- # Sidebar toggle for mode
14
- model_choice = st.sidebar.radio("Choose Mode:", ["Generalist 🤖", "Specialist 🛡️"], help="Switch between general reasoning and focused cybersec models")
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
- # Display model tips
17
  if model_choice == "Generalist 🤖":
18
  st.sidebar.markdown("""
19
  **Best for:**
@@ -21,15 +35,16 @@ if model_choice == "Generalist 🤖":
21
  - Brainstorming tools or strategies
22
  - Simulating conversations
23
  """)
24
- llm = Llama.from_pretrained(
25
  repo_id="bartowski/cybertron-v4-qw7B-MGS-GGUF",
26
- filename="cybertron-v4-qw7B-MGS-IQ2_M.gguf",
27
  )
28
  example_prompts = [
29
  "Simulate a hacker group planning a phishing campaign",
30
  "Explain how to exploit a misconfigured NGINX server",
31
  "Write a Python script that scrapes threat intel feeds"
32
  ]
 
33
  else:
34
  st.sidebar.markdown("""
35
  **Best for:**
@@ -37,9 +52,9 @@ else:
37
  - Red/blue team planning
38
  - Shell scripting and command-line tasks
39
  """)
40
- llm = Llama.from_pretrained(
41
  repo_id="TheBloke/una-cybertron-7B-v2-GGUF",
42
- filename="una-cybertron-7b-v2-bf16.Q2_K.gguf",
43
  )
44
  example_prompts = [
45
  "List enumeration commands for Active Directory",
@@ -47,21 +62,27 @@ else:
47
  "Generate a Bash reverse shell with obfuscation"
48
  ]
49
 
50
- # Prompt input and example selector
51
  col1, col2 = st.columns([2, 1])
52
 
53
  with col1:
54
- user_input = st.text_area("\U0001F4AC Enter your query below:", height=150)
55
  with col2:
56
- st.markdown("**\U0001F4D6 Prompt Examples:**")
57
  selected_example = st.selectbox("Try an example:", ["-- Select an example --"] + example_prompts)
58
  if selected_example != "-- Select an example --":
59
  user_input = selected_example
60
 
61
- # Run inference
62
  if st.button("Submit", use_container_width=True):
63
- with st.spinner("Generating response..."):
64
- output = llm(user_input, max_tokens=512, echo=True)
65
- st.markdown("---")
66
- st.markdown("**\U0001F4C4 Response:**")
67
- st.code(output["choices"][0]["text"].strip())
 
 
 
 
 
 
 
3
 
4
  st.set_page_config(page_title="Cybertron Chat Interface", layout="wide")
5
  st.title("🧠 Cybertron Chat: Generalist vs Specialist Mode")
6
+
7
  st.markdown("""
8
  Welcome to the Cybertron Chat Interface. Choose between:
9
 
 
11
  - 🛡️ **Specialist Mode** for precise, tactical cyber security and pentesting insights
12
  """)
13
 
14
+ # -- Sidebar toggle for mode --
15
+ model_choice = st.sidebar.radio(
16
+ "Choose Mode:",
17
+ ["Generalist 🤖", "Specialist 🛡️"],
18
+ help="Switch between general reasoning and focused cybersec models"
19
+ )
20
+
21
+ # -- Load model only once with caching --
22
+ @st.cache_resource(show_spinner=False)
23
+ def load_model(repo_id, filename):
24
+ return Llama.from_pretrained(
25
+ repo_id=repo_id,
26
+ filename=filename,
27
+ n_ctx=2048, # Default context window
28
+ )
29
 
30
+ # -- Set mode-specific models and prompts --
31
  if model_choice == "Generalist 🤖":
32
  st.sidebar.markdown("""
33
  **Best for:**
 
35
  - Brainstorming tools or strategies
36
  - Simulating conversations
37
  """)
38
+ llm = load_model(
39
  repo_id="bartowski/cybertron-v4-qw7B-MGS-GGUF",
40
+ filename="cybertron-v4-qw7B-MGS-IQ2_M.gguf"
41
  )
42
  example_prompts = [
43
  "Simulate a hacker group planning a phishing campaign",
44
  "Explain how to exploit a misconfigured NGINX server",
45
  "Write a Python script that scrapes threat intel feeds"
46
  ]
47
+
48
  else:
49
  st.sidebar.markdown("""
50
  **Best for:**
 
52
  - Red/blue team planning
53
  - Shell scripting and command-line tasks
54
  """)
55
+ llm = load_model(
56
  repo_id="TheBloke/una-cybertron-7B-v2-GGUF",
57
+ filename="una-cybertron-7b-v2-bf16.Q2_K.gguf"
58
  )
59
  example_prompts = [
60
  "List enumeration commands for Active Directory",
 
62
  "Generate a Bash reverse shell with obfuscation"
63
  ]
64
 
65
+ # -- Prompt input and example selector --
66
  col1, col2 = st.columns([2, 1])
67
 
68
  with col1:
69
+ user_input = st.text_area("💬 Enter your query below:", height=150)
70
  with col2:
71
+ st.markdown("**📚 Prompt Examples:**")
72
  selected_example = st.selectbox("Try an example:", ["-- Select an example --"] + example_prompts)
73
  if selected_example != "-- Select an example --":
74
  user_input = selected_example
75
 
76
+ # -- Run inference --
77
  if st.button("Submit", use_container_width=True):
78
+ if user_input.strip() == "":
79
+ st.warning("Please enter a prompt or select an example.")
80
+ else:
81
+ with st.spinner("🚀 Generating response..."):
82
+ try:
83
+ output = llm(user_input, max_tokens=512, echo=False)
84
+ st.markdown("---")
85
+ st.markdown("**📄 Response:**")
86
+ st.code(output["choices"][0]["text"].strip())
87
+ except Exception as e:
88
+ st.error(f"⚠️ Error: {str(e)}")