Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -64,14 +64,14 @@ from requests import get
|
|
| 64 |
import urllib.request
|
| 65 |
|
| 66 |
|
| 67 |
-
DATASET_REPO_URL = "https://huggingface.co/datasets/Seetha/visual_files/raw/main/level2.json"
|
| 68 |
-
DATA_FILENAME = "level2.json"
|
| 69 |
-
#DATA_FILE = os.path.join(DATASET_REPO_URL, DATA_FILENAME)
|
| 70 |
|
| 71 |
-
# feedback_file = Path("https://huggingface.co/datasets/Seetha/visual_files/") / f"level2.json"
|
| 72 |
-
st.write(DATASET_REPO_URL)
|
| 73 |
|
| 74 |
-
HF_TOKEN = os.environ.get("HF_TOKEN")
|
| 75 |
|
| 76 |
# repo = Repository(
|
| 77 |
# local_dir="huggingface-hub", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN
|
|
@@ -487,11 +487,11 @@ def main():
|
|
| 487 |
|
| 488 |
# Write JSON to file
|
| 489 |
#with open(DATA_FILE, 'w') as f: #w+
|
| 490 |
-
with urllib.request.urlopen(DATASET_REPO_URL) as response:
|
| 491 |
-
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
|
| 495 |
|
| 496 |
|
| 497 |
df_final1.to_csv('predictions.csv')
|
|
|
|
| 64 |
import urllib.request
|
| 65 |
|
| 66 |
|
| 67 |
+
# DATASET_REPO_URL = "https://huggingface.co/datasets/Seetha/visual_files/raw/main/level2.json"
|
| 68 |
+
# DATA_FILENAME = "level2.json"
|
| 69 |
+
# #DATA_FILE = os.path.join(DATASET_REPO_URL, DATA_FILENAME)
|
| 70 |
|
| 71 |
+
# # feedback_file = Path("https://huggingface.co/datasets/Seetha/visual_files/") / f"level2.json"
|
| 72 |
+
# st.write(DATASET_REPO_URL)
|
| 73 |
|
| 74 |
+
# HF_TOKEN = os.environ.get("HF_TOKEN")
|
| 75 |
|
| 76 |
# repo = Repository(
|
| 77 |
# local_dir="huggingface-hub", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN
|
|
|
|
| 487 |
|
| 488 |
# Write JSON to file
|
| 489 |
#with open(DATA_FILE, 'w') as f: #w+
|
| 490 |
+
# with urllib.request.urlopen(DATASET_REPO_URL) as response:
|
| 491 |
+
# data = response.read()
|
| 492 |
+
with open('level2.json','w') as f:
|
| 493 |
+
#st.write(f.write(json.dump(json_data)))
|
| 494 |
+
f.write(json.dump(json_data))
|
| 495 |
|
| 496 |
|
| 497 |
df_final1.to_csv('predictions.csv')
|