arvind6599 commited on
Commit
9c745ef
·
1 Parent(s): 937c9f0

gitignore + layout update

Browse files
Files changed (2) hide show
  1. .gitignore +5 -0
  2. app.py +11 -6
.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ test/
2
+
3
+ hg_secrets.py
4
+
5
+ test.py
app.py CHANGED
@@ -97,9 +97,6 @@ def validate_email(email):
97
  return re.match(email_regex, email) is not None
98
 
99
 
100
-
101
-
102
-
103
  def submit_prompt(email, name, system_prompt_1, system_prompt_2, system_prompt_3):
104
  """
105
  Handles the full submission process:
@@ -185,7 +182,7 @@ def submit_prompt(email, name, system_prompt_1, system_prompt_2, system_prompt_3
185
  model="gpt-4o-mini",
186
  messages=[
187
  {"role": "system", "content": system_prompt_2},
188
- {"role": "user", "content": f"Target company context: \n{output1} \n\nDocument:\n {doc}"}
189
  ]
190
  )
191
  output2 += "\n" + response.choices[0].message.content.strip()
@@ -194,13 +191,13 @@ def submit_prompt(email, name, system_prompt_1, system_prompt_2, system_prompt_3
194
 
195
  # Prepare the final output for LLM3.
196
 
197
- answer = output2.strip()
198
  try:
199
  response = client.chat.completions.create(
200
  model="gpt-4o-mini",
201
  messages=[
202
  {"role": "system", "content": system_prompt_3},
203
- {"role": "user", "content": f"Extracted information: \n{answer}"}
204
  ]
205
  )
206
  answer = response.choices[0].message.content.strip()
@@ -471,6 +468,14 @@ Target Company Mentioned: No
471
  "contains_target_firm": false
472
  }
473
  ```
 
 
 
 
 
 
 
 
474
  """)
475
 
476
  # Challenge instructions and testing guidance
 
97
  return re.match(email_regex, email) is not None
98
 
99
 
 
 
 
100
  def submit_prompt(email, name, system_prompt_1, system_prompt_2, system_prompt_3):
101
  """
102
  Handles the full submission process:
 
182
  model="gpt-4o-mini",
183
  messages=[
184
  {"role": "system", "content": system_prompt_2},
185
+ {"role": "user", "content": f"Target company context: \n{output1} \n\n Paragraph:\n {doc}"}
186
  ]
187
  )
188
  output2 += "\n" + response.choices[0].message.content.strip()
 
191
 
192
  # Prepare the final output for LLM3.
193
 
194
+ output2 = output2.strip()
195
  try:
196
  response = client.chat.completions.create(
197
  model="gpt-4o-mini",
198
  messages=[
199
  {"role": "system", "content": system_prompt_3},
200
+ {"role": "user", "content": f"Extracted information: \n{output2}"}
201
  ]
202
  )
203
  answer = response.choices[0].message.content.strip()
 
468
  "contains_target_firm": false
469
  }
470
  ```
471
+
472
+ | Field | Default Value if Missing | Type |
473
+ | ---------------------- | ------------------------ | --------- |
474
+ | `buyer_firm` | `"unknown"` | `string` |
475
+ | `seller_firm` | `"unknown"` | `string` |
476
+ | `third_party` | `"unknown"` | `string` |
477
+ | `contains_target_firm` | `false` | `boolean` |
478
+
479
  """)
480
 
481
  # Challenge instructions and testing guidance