David Chu
commited on
fix: handle semantic scholar request err
Browse files
main.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import os
|
|
|
2 |
|
3 |
import httpx
|
4 |
import streamlit as st
|
@@ -60,17 +61,30 @@ def generate_answer(
|
|
60 |
def semantic_scholar(
|
61 |
client: httpx.Client, query: str, top_k: int = 10
|
62 |
) -> list[Article]:
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
results = resp.json()
|
75 |
articles = []
|
76 |
for i, article in enumerate(results.get("data", []), 1):
|
|
|
1 |
import os
|
2 |
+
import time
|
3 |
|
4 |
import httpx
|
5 |
import streamlit as st
|
|
|
61 |
def semantic_scholar(
|
62 |
client: httpx.Client, query: str, top_k: int = 10
|
63 |
) -> list[Article]:
|
64 |
+
max_retries = 5
|
65 |
+
for attempt in range(max_retries):
|
66 |
+
try:
|
67 |
+
resp = client.get(
|
68 |
+
"https://api.semanticscholar.org/graph/v1/paper/search",
|
69 |
+
params={
|
70 |
+
"query": query,
|
71 |
+
"limit": top_k,
|
72 |
+
"fields": "title,tldr,abstract,externalIds,url,venue,year,citationCount",
|
73 |
+
"fieldsOfStudy": "Medicine,Biology",
|
74 |
+
"minCitationCount": 20,
|
75 |
+
},
|
76 |
+
timeout=10.0,
|
77 |
+
)
|
78 |
+
resp.raise_for_status()
|
79 |
+
break
|
80 |
+
except (httpx.HTTPStatusError, httpx.TimeoutException) as err:
|
81 |
+
if attempt < max_retries - 1:
|
82 |
+
time.sleep(1)
|
83 |
+
else:
|
84 |
+
raise err
|
85 |
+
else:
|
86 |
+
raise RuntimeError()
|
87 |
+
|
88 |
results = resp.json()
|
89 |
articles = []
|
90 |
for i, article in enumerate(results.get("data", []), 1):
|