Spaces:
Running
on
L4
Running
on
L4
Upload folder using huggingface_hub
Browse files- frontend/app.py +3 -2
- main.py +10 -6
frontend/app.py
CHANGED
|
@@ -181,12 +181,13 @@ def Search(request, search_results=[]):
|
|
| 181 |
print(
|
| 182 |
f"Search: Fetching results for query: {query_value}, ranking: {ranking_value}"
|
| 183 |
)
|
| 184 |
-
|
| 185 |
return Div(
|
| 186 |
Div(
|
| 187 |
SearchBox(query_value=query_value, ranking_value=ranking_value),
|
| 188 |
Div(
|
| 189 |
-
LoadingMessage()
|
|
|
|
|
|
|
| 190 |
id="search-results", # This will be replaced by the search results
|
| 191 |
),
|
| 192 |
cls="grid",
|
|
|
|
| 181 |
print(
|
| 182 |
f"Search: Fetching results for query: {query_value}, ranking: {ranking_value}"
|
| 183 |
)
|
|
|
|
| 184 |
return Div(
|
| 185 |
Div(
|
| 186 |
SearchBox(query_value=query_value, ranking_value=ranking_value),
|
| 187 |
Div(
|
| 188 |
+
LoadingMessage()
|
| 189 |
+
if not search_results
|
| 190 |
+
else SearchResult(search_results),
|
| 191 |
id="search-results", # This will be replaced by the search results
|
| 192 |
),
|
| 193 |
cls="grid",
|
main.py
CHANGED
|
@@ -98,7 +98,14 @@ def get(request):
|
|
| 98 |
cls="grid",
|
| 99 |
)
|
| 100 |
)
|
| 101 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
# Show the loading message if a query is provided
|
| 103 |
return Layout(Search(request)) # Show SearchBox and Loading message initially
|
| 104 |
|
|
@@ -116,15 +123,12 @@ async def get(request, query: str, nn: bool = True):
|
|
| 116 |
# Generate a unique query_id based on the query and ranking value
|
| 117 |
query_id = generate_query_id(query + ranking_value)
|
| 118 |
# See if results are already in cache
|
| 119 |
-
if result_cache.get(query_id):
|
| 120 |
print(f"Results for query_id {query_id} already in cache")
|
| 121 |
result = result_cache.get(query_id)
|
| 122 |
search_results = get_results_children(result)
|
| 123 |
-
# If task is completed, return the results, but no query_id
|
| 124 |
-
if task_cache.get(query_id):
|
| 125 |
-
return SearchResult(search_results, None)
|
| 126 |
-
# If task is not completed, return the results with query_id
|
| 127 |
return SearchResult(search_results, query_id)
|
|
|
|
| 128 |
task_cache.set(query_id, False)
|
| 129 |
model = app.manager.model
|
| 130 |
processor = app.manager.processor
|
|
|
|
| 98 |
cls="grid",
|
| 99 |
)
|
| 100 |
)
|
| 101 |
+
# Generate a unique query_id based on the query and ranking value
|
| 102 |
+
query_id = generate_query_id(query_value + ranking_value)
|
| 103 |
+
# See if results are already in cache
|
| 104 |
+
if result_cache.get(query_id) is not None:
|
| 105 |
+
print(f"Results for query_id {query_id} already in cache")
|
| 106 |
+
result = result_cache.get(query_id)
|
| 107 |
+
search_results = get_results_children(result)
|
| 108 |
+
return Layout(Search(request, search_results))
|
| 109 |
# Show the loading message if a query is provided
|
| 110 |
return Layout(Search(request)) # Show SearchBox and Loading message initially
|
| 111 |
|
|
|
|
| 123 |
# Generate a unique query_id based on the query and ranking value
|
| 124 |
query_id = generate_query_id(query + ranking_value)
|
| 125 |
# See if results are already in cache
|
| 126 |
+
if result_cache.get(query_id) is not None:
|
| 127 |
print(f"Results for query_id {query_id} already in cache")
|
| 128 |
result = result_cache.get(query_id)
|
| 129 |
search_results = get_results_children(result)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 130 |
return SearchResult(search_results, query_id)
|
| 131 |
+
# Run the embedding and query against Vespa app
|
| 132 |
task_cache.set(query_id, False)
|
| 133 |
model = app.manager.model
|
| 134 |
processor = app.manager.processor
|