saba9 HF Staff commited on
Commit
5cd4404
·
verified ·
1 Parent(s): 96a881d

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. __init__.py +302 -0
  3. __pycache__/__init__.cpython-312.pyc +0 -0
  4. __pycache__/__init__.cpython-313.pyc +0 -0
  5. __pycache__/cli.cpython-312.pyc +0 -0
  6. __pycache__/commit_scheduler.cpython-312.pyc +0 -0
  7. __pycache__/commit_scheduler.cpython-313.pyc +0 -0
  8. __pycache__/context_vars.cpython-312.pyc +0 -0
  9. __pycache__/context_vars.cpython-313.pyc +0 -0
  10. __pycache__/deploy.cpython-312.pyc +0 -0
  11. __pycache__/deploy.cpython-313.pyc +0 -0
  12. __pycache__/dummy_commit_scheduler.cpython-312.pyc +0 -0
  13. __pycache__/dummy_commit_scheduler.cpython-313.pyc +0 -0
  14. __pycache__/file_storage.cpython-312.pyc +0 -0
  15. __pycache__/imports.cpython-312.pyc +0 -0
  16. __pycache__/imports.cpython-313.pyc +0 -0
  17. __pycache__/media.cpython-312.pyc +0 -0
  18. __pycache__/media_commit_scheduler.cpython-312.pyc +0 -0
  19. __pycache__/run.cpython-312.pyc +0 -0
  20. __pycache__/run.cpython-313.pyc +0 -0
  21. __pycache__/sqlite_storage.cpython-312.pyc +0 -0
  22. __pycache__/sqlite_storage.cpython-313.pyc +0 -0
  23. __pycache__/sqlite_types.cpython-312.pyc +0 -0
  24. __pycache__/table.cpython-312.pyc +0 -0
  25. __pycache__/typehints.cpython-312.pyc +0 -0
  26. __pycache__/ui.cpython-312.pyc +0 -0
  27. __pycache__/ui.cpython-313.pyc +0 -0
  28. __pycache__/utils.cpython-312.pyc +0 -0
  29. __pycache__/utils.cpython-313.pyc +0 -0
  30. __pycache__/video_writer.cpython-312.pyc +0 -0
  31. assets/trackio_logo_dark.png +0 -0
  32. assets/trackio_logo_light.png +0 -0
  33. assets/trackio_logo_old.png +3 -0
  34. assets/trackio_logo_type_dark.png +0 -0
  35. assets/trackio_logo_type_dark_transparent.png +0 -0
  36. assets/trackio_logo_type_light.png +0 -0
  37. assets/trackio_logo_type_light_transparent.png +0 -0
  38. cli.py +32 -0
  39. commit_scheduler.py +391 -0
  40. context_vars.py +18 -0
  41. deploy.py +225 -0
  42. dummy_commit_scheduler.py +12 -0
  43. file_storage.py +37 -0
  44. imports.py +302 -0
  45. media.py +286 -0
  46. py.typed +0 -0
  47. run.py +180 -0
  48. sqlite_storage.py +580 -0
  49. table.py +53 -0
  50. typehints.py +18 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/trackio_logo_old.png filter=lfs diff=lfs merge=lfs -text
__init__.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ import os
3
+ import warnings
4
+ import webbrowser
5
+ from pathlib import Path
6
+ from typing import Any
7
+
8
+ from gradio.blocks import BUILT_IN_THEMES
9
+ from gradio.themes import Default as DefaultTheme
10
+ from gradio.themes import ThemeClass
11
+ from gradio_client import Client
12
+ from huggingface_hub import SpaceStorage
13
+
14
+ from trackio import context_vars, deploy, utils
15
+ from trackio.imports import import_csv, import_tf_events
16
+ from trackio.media import TrackioImage, TrackioVideo
17
+ from trackio.run import Run
18
+ from trackio.sqlite_storage import SQLiteStorage
19
+ from trackio.table import Table
20
+ from trackio.ui.main import demo
21
+ from trackio.utils import TRACKIO_DIR, TRACKIO_LOGO_DIR
22
+
23
+ __version__ = Path(__file__).parent.joinpath("version.txt").read_text().strip()
24
+
25
+ __all__ = [
26
+ "init",
27
+ "log",
28
+ "finish",
29
+ "show",
30
+ "import_csv",
31
+ "import_tf_events",
32
+ "Image",
33
+ "Video",
34
+ "Table",
35
+ ]
36
+
37
+ Image = TrackioImage
38
+ Video = TrackioVideo
39
+
40
+
41
+ config = {}
42
+
43
+ DEFAULT_THEME = "citrus"
44
+
45
+
46
+ def init(
47
+ project: str,
48
+ name: str | None = None,
49
+ group: str | None = None,
50
+ space_id: str | None = None,
51
+ space_storage: SpaceStorage | None = None,
52
+ dataset_id: str | None = None,
53
+ config: dict | None = None,
54
+ resume: str = "never",
55
+ settings: Any = None,
56
+ private: bool | None = None,
57
+ embed: bool = True,
58
+ ) -> Run:
59
+ """
60
+ Creates a new Trackio project and returns a [`Run`] object.
61
+
62
+ Args:
63
+ project (`str`):
64
+ The name of the project (can be an existing project to continue tracking or
65
+ a new project to start tracking from scratch).
66
+ name (`str`, *optional*):
67
+ The name of the run (if not provided, a default name will be generated).
68
+ group (`str`, *optional*):
69
+ The name of the group which this run belongs to in order to help organize
70
+ related runs together. You can toggle the entire group's visibilitiy in the
71
+ dashboard.
72
+ space_id (`str`, *optional*):
73
+ If provided, the project will be logged to a Hugging Face Space instead of
74
+ a local directory. Should be a complete Space name like
75
+ `"username/reponame"` or `"orgname/reponame"`, or just `"reponame"` in which
76
+ case the Space will be created in the currently-logged-in Hugging Face
77
+ user's namespace. If the Space does not exist, it will be created. If the
78
+ Space already exists, the project will be logged to it.
79
+ space_storage ([`~huggingface_hub.SpaceStorage`], *optional*):
80
+ Choice of persistent storage tier.
81
+ dataset_id (`str`, *optional*):
82
+ If a `space_id` is provided, a persistent Hugging Face Dataset will be
83
+ created and the metrics will be synced to it every 5 minutes. Specify a
84
+ Dataset with name like `"username/datasetname"` or `"orgname/datasetname"`,
85
+ or `"datasetname"` (uses currently-logged-in Hugging Face user's namespace),
86
+ or `None` (uses the same name as the Space but with the `"_dataset"`
87
+ suffix). If the Dataset does not exist, it will be created. If the Dataset
88
+ already exists, the project will be appended to it.
89
+ config (`dict`, *optional*):
90
+ A dictionary of configuration options. Provided for compatibility with
91
+ `wandb.init()`.
92
+ resume (`str`, *optional*, defaults to `"never"`):
93
+ Controls how to handle resuming a run. Can be one of:
94
+
95
+ - `"must"`: Must resume the run with the given name, raises error if run
96
+ doesn't exist
97
+ - `"allow"`: Resume the run if it exists, otherwise create a new run
98
+ - `"never"`: Never resume a run, always create a new one
99
+ private (`bool`, *optional*):
100
+ Whether to make the Space private. If None (default), the repo will be
101
+ public unless the organization's default is private. This value is ignored
102
+ if the repo already exists.
103
+ settings (`Any`, *optional*):
104
+ Not used. Provided for compatibility with `wandb.init()`.
105
+ embed (`bool`, *optional*, defaults to `True`):
106
+ If running inside a jupyter/Colab notebook, whether the dashboard should
107
+ automatically be embedded in the cell when trackio.init() is called.
108
+
109
+ Returns:
110
+ `Run`: A [`Run`] object that can be used to log metrics and finish the run.
111
+ """
112
+ if settings is not None:
113
+ warnings.warn(
114
+ "* Warning: settings is not used. Provided for compatibility with wandb.init(). Please create an issue at: https://github.com/gradio-app/trackio/issues if you need a specific feature implemented."
115
+ )
116
+
117
+ if space_id is None and dataset_id is not None:
118
+ raise ValueError("Must provide a `space_id` when `dataset_id` is provided.")
119
+ space_id, dataset_id = utils.preprocess_space_and_dataset_ids(space_id, dataset_id)
120
+ url = context_vars.current_server.get()
121
+ share_url = context_vars.current_share_server.get()
122
+
123
+ if url is None:
124
+ if space_id is None:
125
+ _, url, share_url = demo.launch(
126
+ show_api=False,
127
+ inline=False,
128
+ quiet=True,
129
+ prevent_thread_lock=True,
130
+ show_error=True,
131
+ favicon_path=TRACKIO_LOGO_DIR / "trackio_logo_light.png",
132
+ allowed_paths=[TRACKIO_LOGO_DIR],
133
+ )
134
+ else:
135
+ url = space_id
136
+ share_url = None
137
+ context_vars.current_server.set(url)
138
+ context_vars.current_share_server.set(share_url)
139
+ if (
140
+ context_vars.current_project.get() is None
141
+ or context_vars.current_project.get() != project
142
+ ):
143
+ print(f"* Trackio project initialized: {project}")
144
+
145
+ if dataset_id is not None:
146
+ os.environ["TRACKIO_DATASET_ID"] = dataset_id
147
+ print(
148
+ f"* Trackio metrics will be synced to Hugging Face Dataset: {dataset_id}"
149
+ )
150
+ if space_id is None:
151
+ print(f"* Trackio metrics logged to: {TRACKIO_DIR}")
152
+ if utils.is_in_notebook() and embed:
153
+ base_url = share_url + "/" if share_url else url
154
+ full_url = utils.get_full_url(
155
+ base_url, project=project, write_token=demo.write_token
156
+ )
157
+ utils.embed_url_in_notebook(full_url)
158
+ else:
159
+ utils.print_dashboard_instructions(project)
160
+ else:
161
+ deploy.create_space_if_not_exists(
162
+ space_id, space_storage, dataset_id, private
163
+ )
164
+ user_name, space_name = space_id.split("/")
165
+ space_url = deploy.SPACE_HOST_URL.format(
166
+ user_name=user_name, space_name=space_name
167
+ )
168
+ print(f"* View dashboard by going to: {space_url}")
169
+ if utils.is_in_notebook() and embed:
170
+ utils.embed_url_in_notebook(space_url)
171
+ context_vars.current_project.set(project)
172
+
173
+ client = None
174
+ if not space_id:
175
+ client = Client(url, verbose=False)
176
+
177
+ if resume == "must":
178
+ if name is None:
179
+ raise ValueError("Must provide a run name when resume='must'")
180
+ if name not in SQLiteStorage.get_runs(project):
181
+ raise ValueError(f"Run '{name}' does not exist in project '{project}'")
182
+ resumed = True
183
+ elif resume == "allow":
184
+ resumed = name is not None and name in SQLiteStorage.get_runs(project)
185
+ elif resume == "never":
186
+ if name is not None and name in SQLiteStorage.get_runs(project):
187
+ warnings.warn(
188
+ f"* Warning: resume='never' but a run '{name}' already exists in "
189
+ f"project '{project}'. Generating a new name and instead. If you want "
190
+ "to resume this run, call init() with resume='must' or resume='allow'."
191
+ )
192
+ name = None
193
+ resumed = False
194
+ else:
195
+ raise ValueError("resume must be one of: 'must', 'allow', or 'never'")
196
+
197
+ run = Run(
198
+ url=url,
199
+ project=project,
200
+ client=client,
201
+ name=name,
202
+ group=group,
203
+ config=config,
204
+ space_id=space_id,
205
+ )
206
+
207
+ if resumed:
208
+ print(f"* Resumed existing run: {run.name}")
209
+ else:
210
+ print(f"* Created new run: {run.name}")
211
+
212
+ context_vars.current_run.set(run)
213
+ globals()["config"] = run.config
214
+ return run
215
+
216
+
217
+ def log(metrics: dict, step: int | None = None) -> None:
218
+ """
219
+ Logs metrics to the current run.
220
+
221
+ Args:
222
+ metrics (`dict`):
223
+ A dictionary of metrics to log.
224
+ step (`int`, *optional*):
225
+ The step number. If not provided, the step will be incremented
226
+ automatically.
227
+ """
228
+ run = context_vars.current_run.get()
229
+ if run is None:
230
+ raise RuntimeError("Call trackio.init() before trackio.log().")
231
+ run.log(
232
+ metrics=metrics,
233
+ step=step,
234
+ )
235
+
236
+
237
+ def finish():
238
+ """
239
+ Finishes the current run.
240
+ """
241
+ run = context_vars.current_run.get()
242
+ if run is None:
243
+ raise RuntimeError("Call trackio.init() before trackio.finish().")
244
+ run.finish()
245
+
246
+
247
+ def show(project: str | None = None, theme: str | ThemeClass = DEFAULT_THEME):
248
+ """
249
+ Launches the Trackio dashboard.
250
+
251
+ Args:
252
+ project (`str`, *optional*):
253
+ The name of the project whose runs to show. If not provided, all projects
254
+ will be shown and the user can select one.
255
+ theme (`str` or `ThemeClass`, *optional*, defaults to `"citrus"`):
256
+ A Gradio Theme to use for the dashboard instead of the default `"citrus"`,
257
+ can be a built-in theme (e.g. `'soft'`, `'default'`), a theme from the Hub
258
+ (e.g. `"gstaff/xkcd"`), or a custom Theme class.
259
+ """
260
+ if theme != DEFAULT_THEME:
261
+ # TODO: It's a little hacky to reproduce this theme-setting logic from Gradio Blocks,
262
+ # but in Gradio 6.0, the theme will be set in `launch()` instead, which means that we
263
+ # will be able to remove this code.
264
+ if isinstance(theme, str):
265
+ if theme.lower() in BUILT_IN_THEMES:
266
+ theme = BUILT_IN_THEMES[theme.lower()]
267
+ else:
268
+ try:
269
+ theme = ThemeClass.from_hub(theme)
270
+ except Exception as e:
271
+ warnings.warn(f"Cannot load {theme}. Caught Exception: {str(e)}")
272
+ theme = DefaultTheme()
273
+ if not isinstance(theme, ThemeClass):
274
+ warnings.warn("Theme should be a class loaded from gradio.themes")
275
+ theme = DefaultTheme()
276
+ demo.theme: ThemeClass = theme
277
+ demo.theme_css = theme._get_theme_css()
278
+ demo.stylesheets = theme._stylesheets
279
+ theme_hasher = hashlib.sha256()
280
+ theme_hasher.update(demo.theme_css.encode("utf-8"))
281
+ demo.theme_hash = theme_hasher.hexdigest()
282
+
283
+ _, url, share_url = demo.launch(
284
+ show_api=False,
285
+ quiet=True,
286
+ inline=False,
287
+ prevent_thread_lock=True,
288
+ favicon_path=TRACKIO_LOGO_DIR / "trackio_logo_light.png",
289
+ allowed_paths=[TRACKIO_LOGO_DIR],
290
+ )
291
+
292
+ base_url = share_url + "/" if share_url else url
293
+ full_url = utils.get_full_url(
294
+ base_url, project=project, write_token=demo.write_token
295
+ )
296
+
297
+ if not utils.is_in_notebook():
298
+ print(f"* Trackio UI launched at: {full_url}")
299
+ webbrowser.open(full_url)
300
+ utils.block_main_thread_until_keyboard_interrupt()
301
+ else:
302
+ utils.embed_url_in_notebook(full_url)
__pycache__/__init__.cpython-312.pyc ADDED
Binary file (13.3 kB). View file
 
__pycache__/__init__.cpython-313.pyc ADDED
Binary file (7.35 kB). View file
 
__pycache__/cli.cpython-312.pyc ADDED
Binary file (1.43 kB). View file
 
__pycache__/commit_scheduler.cpython-312.pyc ADDED
Binary file (18.8 kB). View file
 
__pycache__/commit_scheduler.cpython-313.pyc ADDED
Binary file (18.3 kB). View file
 
__pycache__/context_vars.cpython-312.pyc ADDED
Binary file (917 Bytes). View file
 
__pycache__/context_vars.cpython-313.pyc ADDED
Binary file (745 Bytes). View file
 
__pycache__/deploy.cpython-312.pyc ADDED
Binary file (8.8 kB). View file
 
__pycache__/deploy.cpython-313.pyc ADDED
Binary file (6.27 kB). View file
 
__pycache__/dummy_commit_scheduler.cpython-312.pyc ADDED
Binary file (1.01 kB). View file
 
__pycache__/dummy_commit_scheduler.cpython-313.pyc ADDED
Binary file (1.1 kB). View file
 
__pycache__/file_storage.cpython-312.pyc ADDED
Binary file (1.63 kB). View file
 
__pycache__/imports.cpython-312.pyc ADDED
Binary file (13.2 kB). View file
 
__pycache__/imports.cpython-313.pyc ADDED
Binary file (11.6 kB). View file
 
__pycache__/media.cpython-312.pyc ADDED
Binary file (14.1 kB). View file
 
__pycache__/media_commit_scheduler.cpython-312.pyc ADDED
Binary file (3.66 kB). View file
 
__pycache__/run.cpython-312.pyc ADDED
Binary file (8.97 kB). View file
 
__pycache__/run.cpython-313.pyc ADDED
Binary file (1.37 kB). View file
 
__pycache__/sqlite_storage.cpython-312.pyc ADDED
Binary file (27.8 kB). View file
 
__pycache__/sqlite_storage.cpython-313.pyc ADDED
Binary file (13.8 kB). View file
 
__pycache__/sqlite_types.cpython-312.pyc ADDED
Binary file (1.35 kB). View file
 
__pycache__/table.cpython-312.pyc ADDED
Binary file (2.32 kB). View file
 
__pycache__/typehints.cpython-312.pyc ADDED
Binary file (904 Bytes). View file
 
__pycache__/ui.cpython-312.pyc ADDED
Binary file (30.7 kB). View file
 
__pycache__/ui.cpython-313.pyc ADDED
Binary file (5.37 kB). View file
 
__pycache__/utils.cpython-312.pyc ADDED
Binary file (23.6 kB). View file
 
__pycache__/utils.cpython-313.pyc ADDED
Binary file (9.8 kB). View file
 
__pycache__/video_writer.cpython-312.pyc ADDED
Binary file (5.32 kB). View file
 
assets/trackio_logo_dark.png ADDED
assets/trackio_logo_light.png ADDED
assets/trackio_logo_old.png ADDED

Git LFS Details

  • SHA256: 3922c4d1e465270ad4d8abb12023f3beed5d9f7f338528a4c0ac21dcf358a1c8
  • Pointer size: 131 Bytes
  • Size of remote file: 487 kB
assets/trackio_logo_type_dark.png ADDED
assets/trackio_logo_type_dark_transparent.png ADDED
assets/trackio_logo_type_light.png ADDED
assets/trackio_logo_type_light_transparent.png ADDED
cli.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+
3
+ from trackio import show
4
+
5
+
6
+ def main():
7
+ parser = argparse.ArgumentParser(description="Trackio CLI")
8
+ subparsers = parser.add_subparsers(dest="command")
9
+
10
+ ui_parser = subparsers.add_parser(
11
+ "show", help="Show the Trackio dashboard UI for a project"
12
+ )
13
+ ui_parser.add_argument(
14
+ "--project", required=False, help="Project name to show in the dashboard"
15
+ )
16
+ ui_parser.add_argument(
17
+ "--theme",
18
+ required=False,
19
+ default="citrus",
20
+ help="A Gradio Theme to use for the dashboard instead of the default 'citrus', can be a built-in theme (e.g. 'soft', 'default'), a theme from the Hub (e.g. 'gstaff/xkcd').",
21
+ )
22
+
23
+ args = parser.parse_args()
24
+
25
+ if args.command == "show":
26
+ show(args.project, args.theme)
27
+ else:
28
+ parser.print_help()
29
+
30
+
31
+ if __name__ == "__main__":
32
+ main()
commit_scheduler.py ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Originally copied from https://github.com/huggingface/huggingface_hub/blob/d0a948fc2a32ed6e557042a95ef3e4af97ec4a7c/src/huggingface_hub/_commit_scheduler.py
2
+
3
+ import atexit
4
+ import logging
5
+ import os
6
+ import time
7
+ from concurrent.futures import Future
8
+ from dataclasses import dataclass
9
+ from io import SEEK_END, SEEK_SET, BytesIO
10
+ from pathlib import Path
11
+ from threading import Lock, Thread
12
+ from typing import Callable, Dict, List, Union
13
+
14
+ from huggingface_hub.hf_api import (
15
+ DEFAULT_IGNORE_PATTERNS,
16
+ CommitInfo,
17
+ CommitOperationAdd,
18
+ HfApi,
19
+ )
20
+ from huggingface_hub.utils import filter_repo_objects
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+
25
+ @dataclass(frozen=True)
26
+ class _FileToUpload:
27
+ """Temporary dataclass to store info about files to upload. Not meant to be used directly."""
28
+
29
+ local_path: Path
30
+ path_in_repo: str
31
+ size_limit: int
32
+ last_modified: float
33
+
34
+
35
+ class CommitScheduler:
36
+ """
37
+ Scheduler to upload a local folder to the Hub at regular intervals (e.g. push to hub every 5 minutes).
38
+
39
+ The recommended way to use the scheduler is to use it as a context manager. This ensures that the scheduler is
40
+ properly stopped and the last commit is triggered when the script ends. The scheduler can also be stopped manually
41
+ with the `stop` method. Checkout the [upload guide](https://huggingface.co/docs/huggingface_hub/guides/upload#scheduled-uploads)
42
+ to learn more about how to use it.
43
+
44
+ Args:
45
+ repo_id (`str`):
46
+ The id of the repo to commit to.
47
+ folder_path (`str` or `Path`):
48
+ Path to the local folder to upload regularly.
49
+ every (`int` or `float`, *optional*):
50
+ The number of minutes between each commit. Defaults to 5 minutes.
51
+ path_in_repo (`str`, *optional*):
52
+ Relative path of the directory in the repo, for example: `"checkpoints/"`. Defaults to the root folder
53
+ of the repository.
54
+ repo_type (`str`, *optional*):
55
+ The type of the repo to commit to. Defaults to `model`.
56
+ revision (`str`, *optional*):
57
+ The revision of the repo to commit to. Defaults to `main`.
58
+ private (`bool`, *optional*):
59
+ Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists.
60
+ token (`str`, *optional*):
61
+ The token to use to commit to the repo. Defaults to the token saved on the machine.
62
+ allow_patterns (`List[str]` or `str`, *optional*):
63
+ If provided, only files matching at least one pattern are uploaded.
64
+ ignore_patterns (`List[str]` or `str`, *optional*):
65
+ If provided, files matching any of the patterns are not uploaded.
66
+ squash_history (`bool`, *optional*):
67
+ Whether to squash the history of the repo after each commit. Defaults to `False`. Squashing commits is
68
+ useful to avoid degraded performances on the repo when it grows too large.
69
+ hf_api (`HfApi`, *optional*):
70
+ The [`HfApi`] client to use to commit to the Hub. Can be set with custom settings (user agent, token,...).
71
+ on_before_commit (`Callable[[], None]`, *optional*):
72
+ If specified, a function that will be called before the CommitScheduler lists files to create a commit.
73
+
74
+ Example:
75
+ ```py
76
+ >>> from pathlib import Path
77
+ >>> from huggingface_hub import CommitScheduler
78
+
79
+ # Scheduler uploads every 10 minutes
80
+ >>> csv_path = Path("watched_folder/data.csv")
81
+ >>> CommitScheduler(repo_id="test_scheduler", repo_type="dataset", folder_path=csv_path.parent, every=10)
82
+
83
+ >>> with csv_path.open("a") as f:
84
+ ... f.write("first line")
85
+
86
+ # Some time later (...)
87
+ >>> with csv_path.open("a") as f:
88
+ ... f.write("second line")
89
+ ```
90
+
91
+ Example using a context manager:
92
+ ```py
93
+ >>> from pathlib import Path
94
+ >>> from huggingface_hub import CommitScheduler
95
+
96
+ >>> with CommitScheduler(repo_id="test_scheduler", repo_type="dataset", folder_path="watched_folder", every=10) as scheduler:
97
+ ... csv_path = Path("watched_folder/data.csv")
98
+ ... with csv_path.open("a") as f:
99
+ ... f.write("first line")
100
+ ... (...)
101
+ ... with csv_path.open("a") as f:
102
+ ... f.write("second line")
103
+
104
+ # Scheduler is now stopped and last commit have been triggered
105
+ ```
106
+ """
107
+
108
+ def __init__(
109
+ self,
110
+ *,
111
+ repo_id: str,
112
+ folder_path: Union[str, Path],
113
+ every: Union[int, float] = 5,
114
+ path_in_repo: str | None = None,
115
+ repo_type: str | None = None,
116
+ revision: str | None = None,
117
+ private: bool | None = None,
118
+ token: str | None = None,
119
+ allow_patterns: list[str] | str | None = None,
120
+ ignore_patterns: list[str] | str | None = None,
121
+ squash_history: bool = False,
122
+ hf_api: HfApi | None = None,
123
+ on_before_commit: Callable[[], None] | None = None,
124
+ ) -> None:
125
+ self.api = hf_api or HfApi(token=token)
126
+ self.on_before_commit = on_before_commit
127
+
128
+ # Folder
129
+ self.folder_path = Path(folder_path).expanduser().resolve()
130
+ self.path_in_repo = path_in_repo or ""
131
+ self.allow_patterns = allow_patterns
132
+
133
+ if ignore_patterns is None:
134
+ ignore_patterns = []
135
+ elif isinstance(ignore_patterns, str):
136
+ ignore_patterns = [ignore_patterns]
137
+ self.ignore_patterns = ignore_patterns + DEFAULT_IGNORE_PATTERNS
138
+
139
+ if self.folder_path.is_file():
140
+ raise ValueError(
141
+ f"'folder_path' must be a directory, not a file: '{self.folder_path}'."
142
+ )
143
+ self.folder_path.mkdir(parents=True, exist_ok=True)
144
+
145
+ # Repository
146
+ repo_url = self.api.create_repo(
147
+ repo_id=repo_id, private=private, repo_type=repo_type, exist_ok=True
148
+ )
149
+ self.repo_id = repo_url.repo_id
150
+ self.repo_type = repo_type
151
+ self.revision = revision
152
+ self.token = token
153
+
154
+ self.last_uploaded: Dict[Path, float] = {}
155
+ self.last_push_time: float | None = None
156
+
157
+ if not every > 0:
158
+ raise ValueError(f"'every' must be a positive integer, not '{every}'.")
159
+ self.lock = Lock()
160
+ self.every = every
161
+ self.squash_history = squash_history
162
+
163
+ logger.info(
164
+ f"Scheduled job to push '{self.folder_path}' to '{self.repo_id}' every {self.every} minutes."
165
+ )
166
+ self._scheduler_thread = Thread(target=self._run_scheduler, daemon=True)
167
+ self._scheduler_thread.start()
168
+ atexit.register(self._push_to_hub)
169
+
170
+ self.__stopped = False
171
+
172
+ def stop(self) -> None:
173
+ """Stop the scheduler.
174
+
175
+ A stopped scheduler cannot be restarted. Mostly for tests purposes.
176
+ """
177
+ self.__stopped = True
178
+
179
+ def __enter__(self) -> "CommitScheduler":
180
+ return self
181
+
182
+ def __exit__(self, exc_type, exc_value, traceback) -> None:
183
+ # Upload last changes before exiting
184
+ self.trigger().result()
185
+ self.stop()
186
+ return
187
+
188
+ def _run_scheduler(self) -> None:
189
+ """Dumb thread waiting between each scheduled push to Hub."""
190
+ while True:
191
+ self.last_future = self.trigger()
192
+ time.sleep(self.every * 60)
193
+ if self.__stopped:
194
+ break
195
+
196
+ def trigger(self) -> Future:
197
+ """Trigger a `push_to_hub` and return a future.
198
+
199
+ This method is automatically called every `every` minutes. You can also call it manually to trigger a commit
200
+ immediately, without waiting for the next scheduled commit.
201
+ """
202
+ return self.api.run_as_future(self._push_to_hub)
203
+
204
+ def _push_to_hub(self) -> CommitInfo | None:
205
+ if self.__stopped: # If stopped, already scheduled commits are ignored
206
+ return None
207
+
208
+ logger.info("(Background) scheduled commit triggered.")
209
+ try:
210
+ value = self.push_to_hub()
211
+ if self.squash_history:
212
+ logger.info("(Background) squashing repo history.")
213
+ self.api.super_squash_history(
214
+ repo_id=self.repo_id, repo_type=self.repo_type, branch=self.revision
215
+ )
216
+ return value
217
+ except Exception as e:
218
+ logger.error(
219
+ f"Error while pushing to Hub: {e}"
220
+ ) # Depending on the setup, error might be silenced
221
+ raise
222
+
223
+ def push_to_hub(self) -> CommitInfo | None:
224
+ """
225
+ Push folder to the Hub and return the commit info.
226
+
227
+ <Tip warning={true}>
228
+
229
+ This method is not meant to be called directly. It is run in the background by the scheduler, respecting a
230
+ queue mechanism to avoid concurrent commits. Making a direct call to the method might lead to concurrency
231
+ issues.
232
+
233
+ </Tip>
234
+
235
+ The default behavior of `push_to_hub` is to assume an append-only folder. It lists all files in the folder and
236
+ uploads only changed files. If no changes are found, the method returns without committing anything. If you want
237
+ to change this behavior, you can inherit from [`CommitScheduler`] and override this method. This can be useful
238
+ for example to compress data together in a single file before committing. For more details and examples, check
239
+ out our [integration guide](https://huggingface.co/docs/huggingface_hub/main/en/guides/upload#scheduled-uploads).
240
+ """
241
+ # Check files to upload (with lock)
242
+ with self.lock:
243
+ if self.on_before_commit is not None:
244
+ self.on_before_commit()
245
+
246
+ logger.debug("Listing files to upload for scheduled commit.")
247
+
248
+ # List files from folder (taken from `_prepare_upload_folder_additions`)
249
+ relpath_to_abspath = {
250
+ path.relative_to(self.folder_path).as_posix(): path
251
+ for path in sorted(
252
+ self.folder_path.glob("**/*")
253
+ ) # sorted to be deterministic
254
+ if path.is_file()
255
+ }
256
+ prefix = f"{self.path_in_repo.strip('/')}/" if self.path_in_repo else ""
257
+
258
+ # Filter with pattern + filter out unchanged files + retrieve current file size
259
+ files_to_upload: List[_FileToUpload] = []
260
+ for relpath in filter_repo_objects(
261
+ relpath_to_abspath.keys(),
262
+ allow_patterns=self.allow_patterns,
263
+ ignore_patterns=self.ignore_patterns,
264
+ ):
265
+ local_path = relpath_to_abspath[relpath]
266
+ stat = local_path.stat()
267
+ if (
268
+ self.last_uploaded.get(local_path) is None
269
+ or self.last_uploaded[local_path] != stat.st_mtime
270
+ ):
271
+ files_to_upload.append(
272
+ _FileToUpload(
273
+ local_path=local_path,
274
+ path_in_repo=prefix + relpath,
275
+ size_limit=stat.st_size,
276
+ last_modified=stat.st_mtime,
277
+ )
278
+ )
279
+
280
+ # Return if nothing to upload
281
+ if len(files_to_upload) == 0:
282
+ logger.debug("Dropping schedule commit: no changed file to upload.")
283
+ return None
284
+
285
+ # Convert `_FileToUpload` as `CommitOperationAdd` (=> compute file shas + limit to file size)
286
+ logger.debug("Removing unchanged files since previous scheduled commit.")
287
+ add_operations = [
288
+ CommitOperationAdd(
289
+ # TODO: Cap the file to its current size, even if the user append data to it while a scheduled commit is happening
290
+ # (requires an upstream fix for XET-535: `hf_xet` should support `BinaryIO` for upload)
291
+ path_or_fileobj=file_to_upload.local_path,
292
+ path_in_repo=file_to_upload.path_in_repo,
293
+ )
294
+ for file_to_upload in files_to_upload
295
+ ]
296
+
297
+ # Upload files (append mode expected - no need for lock)
298
+ logger.debug("Uploading files for scheduled commit.")
299
+ commit_info = self.api.create_commit(
300
+ repo_id=self.repo_id,
301
+ repo_type=self.repo_type,
302
+ operations=add_operations,
303
+ commit_message="Scheduled Commit",
304
+ revision=self.revision,
305
+ )
306
+
307
+ for file in files_to_upload:
308
+ self.last_uploaded[file.local_path] = file.last_modified
309
+
310
+ self.last_push_time = time.time()
311
+
312
+ return commit_info
313
+
314
+
315
+ class PartialFileIO(BytesIO):
316
+ """A file-like object that reads only the first part of a file.
317
+
318
+ Useful to upload a file to the Hub when the user might still be appending data to it. Only the first part of the
319
+ file is uploaded (i.e. the part that was available when the filesystem was first scanned).
320
+
321
+ In practice, only used internally by the CommitScheduler to regularly push a folder to the Hub with minimal
322
+ disturbance for the user. The object is passed to `CommitOperationAdd`.
323
+
324
+ Only supports `read`, `tell` and `seek` methods.
325
+
326
+ Args:
327
+ file_path (`str` or `Path`):
328
+ Path to the file to read.
329
+ size_limit (`int`):
330
+ The maximum number of bytes to read from the file. If the file is larger than this, only the first part
331
+ will be read (and uploaded).
332
+ """
333
+
334
+ def __init__(self, file_path: Union[str, Path], size_limit: int) -> None:
335
+ self._file_path = Path(file_path)
336
+ self._file = self._file_path.open("rb")
337
+ self._size_limit = min(size_limit, os.fstat(self._file.fileno()).st_size)
338
+
339
+ def __del__(self) -> None:
340
+ self._file.close()
341
+ return super().__del__()
342
+
343
+ def __repr__(self) -> str:
344
+ return (
345
+ f"<PartialFileIO file_path={self._file_path} size_limit={self._size_limit}>"
346
+ )
347
+
348
+ def __len__(self) -> int:
349
+ return self._size_limit
350
+
351
+ def __getattribute__(self, name: str):
352
+ if name.startswith("_") or name in (
353
+ "read",
354
+ "tell",
355
+ "seek",
356
+ ): # only 3 public methods supported
357
+ return super().__getattribute__(name)
358
+ raise NotImplementedError(f"PartialFileIO does not support '{name}'.")
359
+
360
+ def tell(self) -> int:
361
+ """Return the current file position."""
362
+ return self._file.tell()
363
+
364
+ def seek(self, __offset: int, __whence: int = SEEK_SET) -> int:
365
+ """Change the stream position to the given offset.
366
+
367
+ Behavior is the same as a regular file, except that the position is capped to the size limit.
368
+ """
369
+ if __whence == SEEK_END:
370
+ # SEEK_END => set from the truncated end
371
+ __offset = len(self) + __offset
372
+ __whence = SEEK_SET
373
+
374
+ pos = self._file.seek(__offset, __whence)
375
+ if pos > self._size_limit:
376
+ return self._file.seek(self._size_limit)
377
+ return pos
378
+
379
+ def read(self, __size: int | None = -1) -> bytes:
380
+ """Read at most `__size` bytes from the file.
381
+
382
+ Behavior is the same as a regular file, except that it is capped to the size limit.
383
+ """
384
+ current = self._file.tell()
385
+ if __size is None or __size < 0:
386
+ # Read until file limit
387
+ truncated_size = self._size_limit - current
388
+ else:
389
+ # Read until file limit or __size
390
+ truncated_size = min(__size, self._size_limit - current)
391
+ return self._file.read(truncated_size)
context_vars.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextvars
2
+ from typing import TYPE_CHECKING
3
+
4
+ if TYPE_CHECKING:
5
+ from trackio.run import Run
6
+
7
+ current_run: contextvars.ContextVar["Run | None"] = contextvars.ContextVar(
8
+ "current_run", default=None
9
+ )
10
+ current_project: contextvars.ContextVar[str | None] = contextvars.ContextVar(
11
+ "current_project", default=None
12
+ )
13
+ current_server: contextvars.ContextVar[str | None] = contextvars.ContextVar(
14
+ "current_server", default=None
15
+ )
16
+ current_share_server: contextvars.ContextVar[str | None] = contextvars.ContextVar(
17
+ "current_share_server", default=None
18
+ )
deploy.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib.metadata
2
+ import io
3
+ import os
4
+ import time
5
+ from importlib.resources import files
6
+ from pathlib import Path
7
+
8
+ import gradio
9
+ import huggingface_hub
10
+ from gradio_client import Client, handle_file
11
+ from httpx import ReadTimeout
12
+ from huggingface_hub.errors import RepositoryNotFoundError
13
+ from requests import HTTPError
14
+
15
+ import trackio
16
+ from trackio.sqlite_storage import SQLiteStorage
17
+
18
+ SPACE_HOST_URL = "https://{user_name}-{space_name}.hf.space/"
19
+ SPACE_URL = "https://huggingface.co/spaces/{space_id}"
20
+
21
+
22
+ def _is_trackio_installed_from_source() -> bool:
23
+ """Check if trackio is installed from source/editable install vs PyPI."""
24
+ try:
25
+ trackio_file = trackio.__file__
26
+ if "site-packages" not in trackio_file:
27
+ return True
28
+
29
+ dist = importlib.metadata.distribution("trackio")
30
+ if dist.files:
31
+ files = list(dist.files)
32
+ has_pth = any(".pth" in str(f) for f in files)
33
+ if has_pth:
34
+ return True
35
+
36
+ return False
37
+ except (
38
+ AttributeError,
39
+ importlib.metadata.PackageNotFoundError,
40
+ importlib.metadata.MetadataError,
41
+ ValueError,
42
+ TypeError,
43
+ ):
44
+ return True
45
+
46
+
47
+ def deploy_as_space(
48
+ space_id: str,
49
+ space_storage: huggingface_hub.SpaceStorage | None = None,
50
+ dataset_id: str | None = None,
51
+ private: bool | None = None,
52
+ ):
53
+ if (
54
+ os.getenv("SYSTEM") == "spaces"
55
+ ): # in case a repo with this function is uploaded to spaces
56
+ return
57
+
58
+ trackio_path = files("trackio")
59
+
60
+ hf_api = huggingface_hub.HfApi()
61
+
62
+ try:
63
+ huggingface_hub.create_repo(
64
+ space_id,
65
+ private=private,
66
+ space_sdk="gradio",
67
+ space_storage=space_storage,
68
+ repo_type="space",
69
+ exist_ok=True,
70
+ )
71
+ except HTTPError as e:
72
+ if e.response.status_code in [401, 403]: # unauthorized or forbidden
73
+ print("Need 'write' access token to create a Spaces repo.")
74
+ huggingface_hub.login(add_to_git_credential=False)
75
+ huggingface_hub.create_repo(
76
+ space_id,
77
+ private=private,
78
+ space_sdk="gradio",
79
+ space_storage=space_storage,
80
+ repo_type="space",
81
+ exist_ok=True,
82
+ )
83
+ else:
84
+ raise ValueError(f"Failed to create Space: {e}")
85
+
86
+ with open(Path(trackio_path, "README.md"), "r") as f:
87
+ readme_content = f.read()
88
+ readme_content = readme_content.replace("{GRADIO_VERSION}", gradio.__version__)
89
+ readme_buffer = io.BytesIO(readme_content.encode("utf-8"))
90
+ hf_api.upload_file(
91
+ path_or_fileobj=readme_buffer,
92
+ path_in_repo="README.md",
93
+ repo_id=space_id,
94
+ repo_type="space",
95
+ )
96
+
97
+ # We can assume pandas, gradio, and huggingface-hub are already installed in a Gradio Space.
98
+ # Make sure necessary dependencies are installed by creating a requirements.txt.
99
+ is_source_install = _is_trackio_installed_from_source()
100
+
101
+ if is_source_install:
102
+ requirements_content = """pyarrow>=21.0"""
103
+ else:
104
+ requirements_content = f"""pyarrow>=21.0
105
+ trackio=={trackio.__version__}"""
106
+
107
+ requirements_buffer = io.BytesIO(requirements_content.encode("utf-8"))
108
+ hf_api.upload_file(
109
+ path_or_fileobj=requirements_buffer,
110
+ path_in_repo="requirements.txt",
111
+ repo_id=space_id,
112
+ repo_type="space",
113
+ )
114
+
115
+ huggingface_hub.utils.disable_progress_bars()
116
+
117
+ if is_source_install:
118
+ hf_api.upload_folder(
119
+ repo_id=space_id,
120
+ repo_type="space",
121
+ folder_path=trackio_path,
122
+ ignore_patterns=["README.md"],
123
+ )
124
+ else:
125
+ app_file_content = """import trackio
126
+ trackio.show()"""
127
+ app_file_buffer = io.BytesIO(app_file_content.encode("utf-8"))
128
+ hf_api.upload_file(
129
+ path_or_fileobj=app_file_buffer,
130
+ path_in_repo="ui/main.py",
131
+ repo_id=space_id,
132
+ repo_type="space",
133
+ )
134
+
135
+ if hf_token := huggingface_hub.utils.get_token():
136
+ huggingface_hub.add_space_secret(space_id, "HF_TOKEN", hf_token)
137
+ if dataset_id is not None:
138
+ huggingface_hub.add_space_variable(space_id, "TRACKIO_DATASET_ID", dataset_id)
139
+
140
+
141
+ def create_space_if_not_exists(
142
+ space_id: str,
143
+ space_storage: huggingface_hub.SpaceStorage | None = None,
144
+ dataset_id: str | None = None,
145
+ private: bool | None = None,
146
+ ) -> None:
147
+ """
148
+ Creates a new Hugging Face Space if it does not exist. If a dataset_id is provided, it will be added as a space variable.
149
+
150
+ Args:
151
+ space_id: The ID of the Space to create.
152
+ dataset_id: The ID of the Dataset to add to the Space.
153
+ private: Whether to make the Space private. If None (default), the repo will be
154
+ public unless the organization's default is private. This value is ignored if
155
+ the repo already exists.
156
+ """
157
+ if "/" not in space_id:
158
+ raise ValueError(
159
+ f"Invalid space ID: {space_id}. Must be in the format: username/reponame or orgname/reponame."
160
+ )
161
+ if dataset_id is not None and "/" not in dataset_id:
162
+ raise ValueError(
163
+ f"Invalid dataset ID: {dataset_id}. Must be in the format: username/datasetname or orgname/datasetname."
164
+ )
165
+ try:
166
+ huggingface_hub.repo_info(space_id, repo_type="space")
167
+ print(f"* Found existing space: {SPACE_URL.format(space_id=space_id)}")
168
+ if dataset_id is not None:
169
+ huggingface_hub.add_space_variable(
170
+ space_id, "TRACKIO_DATASET_ID", dataset_id
171
+ )
172
+ return
173
+ except RepositoryNotFoundError:
174
+ pass
175
+ except HTTPError as e:
176
+ if e.response.status_code in [401, 403]: # unauthorized or forbidden
177
+ print("Need 'write' access token to create a Spaces repo.")
178
+ huggingface_hub.login(add_to_git_credential=False)
179
+ huggingface_hub.add_space_variable(
180
+ space_id, "TRACKIO_DATASET_ID", dataset_id
181
+ )
182
+ else:
183
+ raise ValueError(f"Failed to create Space: {e}")
184
+
185
+ print(f"* Creating new space: {SPACE_URL.format(space_id=space_id)}")
186
+ deploy_as_space(space_id, space_storage, dataset_id, private)
187
+
188
+
189
+ def wait_until_space_exists(
190
+ space_id: str,
191
+ ) -> None:
192
+ """
193
+ Blocks the current thread until the space exists.
194
+ May raise a TimeoutError if this takes quite a while.
195
+
196
+ Args:
197
+ space_id: The ID of the Space to wait for.
198
+ """
199
+ delay = 1
200
+ for _ in range(10):
201
+ try:
202
+ Client(space_id, verbose=False)
203
+ return
204
+ except (ReadTimeout, ValueError):
205
+ time.sleep(delay)
206
+ delay = min(delay * 2, 30)
207
+ raise TimeoutError("Waiting for space to exist took longer than expected")
208
+
209
+
210
+ def upload_db_to_space(project: str, space_id: str) -> None:
211
+ """
212
+ Uploads the database of a local Trackio project to a Hugging Face Space.
213
+
214
+ Args:
215
+ project: The name of the project to upload.
216
+ space_id: The ID of the Space to upload to.
217
+ """
218
+ db_path = SQLiteStorage.get_project_db_path(project)
219
+ client = Client(space_id, verbose=False)
220
+ client.predict(
221
+ api_name="/upload_db_to_space",
222
+ project=project,
223
+ uploaded_db=handle_file(db_path),
224
+ hf_token=huggingface_hub.utils.get_token(),
225
+ )
dummy_commit_scheduler.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A dummy object to fit the interface of huggingface_hub's CommitScheduler
2
+ class DummyCommitSchedulerLock:
3
+ def __enter__(self):
4
+ return None
5
+
6
+ def __exit__(self, exception_type, exception_value, exception_traceback):
7
+ pass
8
+
9
+
10
+ class DummyCommitScheduler:
11
+ def __init__(self):
12
+ self.lock = DummyCommitSchedulerLock()
file_storage.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+
3
+ try: # absolute imports when installed
4
+ from trackio.utils import MEDIA_DIR
5
+ except ImportError: # relative imports for local execution on Spaces
6
+ from utils import MEDIA_DIR
7
+
8
+
9
+ class FileStorage:
10
+ @staticmethod
11
+ def get_project_media_path(
12
+ project: str,
13
+ run: str | None = None,
14
+ step: int | None = None,
15
+ filename: str | None = None,
16
+ ) -> Path:
17
+ if filename is not None and step is None:
18
+ raise ValueError("filename requires step")
19
+ if step is not None and run is None:
20
+ raise ValueError("step requires run")
21
+
22
+ path = MEDIA_DIR / project
23
+ if run:
24
+ path /= run
25
+ if step is not None:
26
+ path /= str(step)
27
+ if filename:
28
+ path /= filename
29
+ return path
30
+
31
+ @staticmethod
32
+ def init_project_media_path(
33
+ project: str, run: str | None = None, step: int | None = None
34
+ ) -> Path:
35
+ path = FileStorage.get_project_media_path(project, run, step)
36
+ path.mkdir(parents=True, exist_ok=True)
37
+ return path
imports.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+
4
+ import pandas as pd
5
+
6
+ from trackio import deploy, utils
7
+ from trackio.sqlite_storage import SQLiteStorage
8
+
9
+
10
+ def import_csv(
11
+ csv_path: str | Path,
12
+ project: str,
13
+ name: str | None = None,
14
+ space_id: str | None = None,
15
+ dataset_id: str | None = None,
16
+ private: bool | None = None,
17
+ ) -> None:
18
+ """
19
+ Imports a CSV file into a Trackio project. The CSV file must contain a `"step"`
20
+ column, may optionally contain a `"timestamp"` column, and any other columns will be
21
+ treated as metrics. It should also include a header row with the column names.
22
+
23
+ TODO: call init() and return a Run object so that the user can continue to log metrics to it.
24
+
25
+ Args:
26
+ csv_path (`str` or `Path`):
27
+ The str or Path to the CSV file to import.
28
+ project (`str`):
29
+ The name of the project to import the CSV file into. Must not be an existing
30
+ project.
31
+ name (`str`, *optional*):
32
+ The name of the Run to import the CSV file into. If not provided, a default
33
+ name will be generated.
34
+ name (`str`, *optional*):
35
+ The name of the run (if not provided, a default name will be generated).
36
+ space_id (`str`, *optional*):
37
+ If provided, the project will be logged to a Hugging Face Space instead of a
38
+ local directory. Should be a complete Space name like `"username/reponame"`
39
+ or `"orgname/reponame"`, or just `"reponame"` in which case the Space will
40
+ be created in the currently-logged-in Hugging Face user's namespace. If the
41
+ Space does not exist, it will be created. If the Space already exists, the
42
+ project will be logged to it.
43
+ dataset_id (`str`, *optional*):
44
+ If provided, a persistent Hugging Face Dataset will be created and the
45
+ metrics will be synced to it every 5 minutes. Should be a complete Dataset
46
+ name like `"username/datasetname"` or `"orgname/datasetname"`, or just
47
+ `"datasetname"` in which case the Dataset will be created in the
48
+ currently-logged-in Hugging Face user's namespace. If the Dataset does not
49
+ exist, it will be created. If the Dataset already exists, the project will
50
+ be appended to it. If not provided, the metrics will be logged to a local
51
+ SQLite database, unless a `space_id` is provided, in which case a Dataset
52
+ will be automatically created with the same name as the Space but with the
53
+ `"_dataset"` suffix.
54
+ private (`bool`, *optional*):
55
+ Whether to make the Space private. If None (default), the repo will be
56
+ public unless the organization's default is private. This value is ignored
57
+ if the repo already exists.
58
+ """
59
+ if SQLiteStorage.get_runs(project):
60
+ raise ValueError(
61
+ f"Project '{project}' already exists. Cannot import CSV into existing project."
62
+ )
63
+
64
+ csv_path = Path(csv_path)
65
+ if not csv_path.exists():
66
+ raise FileNotFoundError(f"CSV file not found: {csv_path}")
67
+
68
+ df = pd.read_csv(csv_path)
69
+ if df.empty:
70
+ raise ValueError("CSV file is empty")
71
+
72
+ column_mapping = utils.simplify_column_names(df.columns.tolist())
73
+ df = df.rename(columns=column_mapping)
74
+
75
+ step_column = None
76
+ for col in df.columns:
77
+ if col.lower() == "step":
78
+ step_column = col
79
+ break
80
+
81
+ if step_column is None:
82
+ raise ValueError("CSV file must contain a 'step' or 'Step' column")
83
+
84
+ if name is None:
85
+ name = csv_path.stem
86
+
87
+ metrics_list = []
88
+ steps = []
89
+ timestamps = []
90
+
91
+ numeric_columns = []
92
+ for column in df.columns:
93
+ if column == step_column:
94
+ continue
95
+ if column == "timestamp":
96
+ continue
97
+
98
+ try:
99
+ pd.to_numeric(df[column], errors="raise")
100
+ numeric_columns.append(column)
101
+ except (ValueError, TypeError):
102
+ continue
103
+
104
+ for _, row in df.iterrows():
105
+ metrics = {}
106
+ for column in numeric_columns:
107
+ value = row[column]
108
+ if bool(pd.notna(value)):
109
+ metrics[column] = float(value)
110
+
111
+ if metrics:
112
+ metrics_list.append(metrics)
113
+ steps.append(int(row[step_column]))
114
+
115
+ if "timestamp" in df.columns and bool(pd.notna(row["timestamp"])):
116
+ timestamps.append(str(row["timestamp"]))
117
+ else:
118
+ timestamps.append("")
119
+
120
+ if metrics_list:
121
+ SQLiteStorage.bulk_log(
122
+ project=project,
123
+ run=name,
124
+ metrics_list=metrics_list,
125
+ steps=steps,
126
+ timestamps=timestamps,
127
+ )
128
+
129
+ print(
130
+ f"* Imported {len(metrics_list)} rows from {csv_path} into project '{project}' as run '{name}'"
131
+ )
132
+ print(f"* Metrics found: {', '.join(metrics_list[0].keys())}")
133
+
134
+ space_id, dataset_id = utils.preprocess_space_and_dataset_ids(space_id, dataset_id)
135
+ if dataset_id is not None:
136
+ os.environ["TRACKIO_DATASET_ID"] = dataset_id
137
+ print(f"* Trackio metrics will be synced to Hugging Face Dataset: {dataset_id}")
138
+
139
+ if space_id is None:
140
+ utils.print_dashboard_instructions(project)
141
+ else:
142
+ deploy.create_space_if_not_exists(
143
+ space_id=space_id, dataset_id=dataset_id, private=private
144
+ )
145
+ deploy.wait_until_space_exists(space_id=space_id)
146
+ deploy.upload_db_to_space(project=project, space_id=space_id)
147
+ print(
148
+ f"* View dashboard by going to: {deploy.SPACE_URL.format(space_id=space_id)}"
149
+ )
150
+
151
+
152
+ def import_tf_events(
153
+ log_dir: str | Path,
154
+ project: str,
155
+ name: str | None = None,
156
+ space_id: str | None = None,
157
+ dataset_id: str | None = None,
158
+ private: bool | None = None,
159
+ ) -> None:
160
+ """
161
+ Imports TensorFlow Events files from a directory into a Trackio project. Each
162
+ subdirectory in the log directory will be imported as a separate run.
163
+
164
+ Args:
165
+ log_dir (`str` or `Path`):
166
+ The str or Path to the directory containing TensorFlow Events files.
167
+ project (`str`):
168
+ The name of the project to import the TensorFlow Events files into. Must not
169
+ be an existing project.
170
+ name (`str`, *optional*):
171
+ The name prefix for runs (if not provided, will use directory names). Each
172
+ subdirectory will create a separate run.
173
+ space_id (`str`, *optional*):
174
+ If provided, the project will be logged to a Hugging Face Space instead of a
175
+ local directory. Should be a complete Space name like `"username/reponame"`
176
+ or `"orgname/reponame"`, or just `"reponame"` in which case the Space will
177
+ be created in the currently-logged-in Hugging Face user's namespace. If the
178
+ Space does not exist, it will be created. If the Space already exists, the
179
+ project will be logged to it.
180
+ dataset_id (`str`, *optional*):
181
+ If provided, a persistent Hugging Face Dataset will be created and the
182
+ metrics will be synced to it every 5 minutes. Should be a complete Dataset
183
+ name like `"username/datasetname"` or `"orgname/datasetname"`, or just
184
+ `"datasetname"` in which case the Dataset will be created in the
185
+ currently-logged-in Hugging Face user's namespace. If the Dataset does not
186
+ exist, it will be created. If the Dataset already exists, the project will
187
+ be appended to it. If not provided, the metrics will be logged to a local
188
+ SQLite database, unless a `space_id` is provided, in which case a Dataset
189
+ will be automatically created with the same name as the Space but with the
190
+ `"_dataset"` suffix.
191
+ private (`bool`, *optional*):
192
+ Whether to make the Space private. If None (default), the repo will be
193
+ public unless the organization's default is private. This value is ignored
194
+ if the repo already exists.
195
+ """
196
+ try:
197
+ from tbparse import SummaryReader
198
+ except ImportError:
199
+ raise ImportError(
200
+ "The `tbparse` package is not installed but is required for `import_tf_events`. Please install trackio with the `tensorboard` extra: `pip install trackio[tensorboard]`."
201
+ )
202
+
203
+ if SQLiteStorage.get_runs(project):
204
+ raise ValueError(
205
+ f"Project '{project}' already exists. Cannot import TF events into existing project."
206
+ )
207
+
208
+ path = Path(log_dir)
209
+ if not path.exists():
210
+ raise FileNotFoundError(f"TF events directory not found: {path}")
211
+
212
+ # Use tbparse to read all tfevents files in the directory structure
213
+ reader = SummaryReader(str(path), extra_columns={"dir_name"})
214
+ df = reader.scalars
215
+
216
+ if df.empty:
217
+ raise ValueError(f"No TensorFlow events data found in {path}")
218
+
219
+ total_imported = 0
220
+ imported_runs = []
221
+
222
+ # Group by dir_name to create separate runs
223
+ for dir_name, group_df in df.groupby("dir_name"):
224
+ try:
225
+ # Determine run name based on directory name
226
+ if dir_name == "":
227
+ run_name = "main" # For files in the root directory
228
+ else:
229
+ run_name = dir_name # Use directory name
230
+
231
+ if name:
232
+ run_name = f"{name}_{run_name}"
233
+
234
+ if group_df.empty:
235
+ print(f"* Skipping directory {dir_name}: no scalar data found")
236
+ continue
237
+
238
+ metrics_list = []
239
+ steps = []
240
+ timestamps = []
241
+
242
+ for _, row in group_df.iterrows():
243
+ # Convert row values to appropriate types
244
+ tag = str(row["tag"])
245
+ value = float(row["value"])
246
+ step = int(row["step"])
247
+
248
+ metrics = {tag: value}
249
+ metrics_list.append(metrics)
250
+ steps.append(step)
251
+
252
+ # Use wall_time if present, else fallback
253
+ if "wall_time" in group_df.columns and not bool(
254
+ pd.isna(row["wall_time"])
255
+ ):
256
+ timestamps.append(str(row["wall_time"]))
257
+ else:
258
+ timestamps.append("")
259
+
260
+ if metrics_list:
261
+ SQLiteStorage.bulk_log(
262
+ project=project,
263
+ run=str(run_name),
264
+ metrics_list=metrics_list,
265
+ steps=steps,
266
+ timestamps=timestamps,
267
+ )
268
+
269
+ total_imported += len(metrics_list)
270
+ imported_runs.append(run_name)
271
+
272
+ print(
273
+ f"* Imported {len(metrics_list)} scalar events from directory '{dir_name}' as run '{run_name}'"
274
+ )
275
+ print(f"* Metrics in this run: {', '.join(set(group_df['tag']))}")
276
+
277
+ except Exception as e:
278
+ print(f"* Error processing directory {dir_name}: {e}")
279
+ continue
280
+
281
+ if not imported_runs:
282
+ raise ValueError("No valid TensorFlow events data could be imported")
283
+
284
+ print(f"* Total imported events: {total_imported}")
285
+ print(f"* Created runs: {', '.join(imported_runs)}")
286
+
287
+ space_id, dataset_id = utils.preprocess_space_and_dataset_ids(space_id, dataset_id)
288
+ if dataset_id is not None:
289
+ os.environ["TRACKIO_DATASET_ID"] = dataset_id
290
+ print(f"* Trackio metrics will be synced to Hugging Face Dataset: {dataset_id}")
291
+
292
+ if space_id is None:
293
+ utils.print_dashboard_instructions(project)
294
+ else:
295
+ deploy.create_space_if_not_exists(
296
+ space_id, dataset_id=dataset_id, private=private
297
+ )
298
+ deploy.wait_until_space_exists(space_id)
299
+ deploy.upload_db_to_space(project, space_id)
300
+ print(
301
+ f"* View dashboard by going to: {deploy.SPACE_URL.format(space_id=space_id)}"
302
+ )
media.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import uuid
4
+ from abc import ABC, abstractmethod
5
+ from pathlib import Path
6
+ from typing import Literal
7
+
8
+ import numpy as np
9
+ from PIL import Image as PILImage
10
+
11
+ try: # absolute imports when installed
12
+ from trackio.file_storage import FileStorage
13
+ from trackio.utils import MEDIA_DIR
14
+ from trackio.video_writer import write_video
15
+ except ImportError: # relative imports for local execution on Spaces
16
+ from file_storage import FileStorage
17
+ from utils import MEDIA_DIR
18
+ from video_writer import write_video
19
+
20
+
21
+ class TrackioMedia(ABC):
22
+ """
23
+ Abstract base class for Trackio media objects
24
+ Provides shared functionality for file handling and serialization.
25
+ """
26
+
27
+ TYPE: str
28
+
29
+ def __init_subclass__(cls, **kwargs):
30
+ """Ensure subclasses define the TYPE attribute."""
31
+ super().__init_subclass__(**kwargs)
32
+ if not hasattr(cls, "TYPE") or cls.TYPE is None:
33
+ raise TypeError(f"Class {cls.__name__} must define TYPE attribute")
34
+
35
+ def __init__(self, value, caption: str | None = None):
36
+ self.caption = caption
37
+ self._value = value
38
+ self._file_path: Path | None = None
39
+
40
+ # Validate file existence for string/Path inputs
41
+ if isinstance(self._value, str | Path):
42
+ if not os.path.isfile(self._value):
43
+ raise ValueError(f"File not found: {self._value}")
44
+
45
+ def _file_extension(self) -> str:
46
+ if self._file_path:
47
+ return self._file_path.suffix[1:].lower()
48
+ if isinstance(self._value, str | Path):
49
+ path = Path(self._value)
50
+ return path.suffix[1:].lower()
51
+ if hasattr(self, "_format") and self._format:
52
+ return self._format
53
+ return "unknown"
54
+
55
+ def _get_relative_file_path(self) -> Path | None:
56
+ return self._file_path
57
+
58
+ def _get_absolute_file_path(self) -> Path | None:
59
+ if self._file_path:
60
+ return MEDIA_DIR / self._file_path
61
+ return None
62
+
63
+ def _save(self, project: str, run: str, step: int = 0):
64
+ if self._file_path:
65
+ return
66
+
67
+ media_dir = FileStorage.init_project_media_path(project, run, step)
68
+ filename = f"{uuid.uuid4()}.{self._file_extension()}"
69
+ file_path = media_dir / filename
70
+
71
+ # Delegate to subclass-specific save logic
72
+ self._save_media(file_path)
73
+
74
+ self._file_path = file_path.relative_to(MEDIA_DIR)
75
+
76
+ @abstractmethod
77
+ def _save_media(self, file_path: Path):
78
+ """
79
+ Performs the actual media saving logic.
80
+ """
81
+ pass
82
+
83
+ def _to_dict(self) -> dict:
84
+ if not self._file_path:
85
+ raise ValueError("Media must be saved to file before serialization")
86
+ return {
87
+ "_type": self.TYPE,
88
+ "file_path": str(self._get_relative_file_path()),
89
+ "caption": self.caption,
90
+ }
91
+
92
+
93
+ TrackioImageSourceType = str | Path | np.ndarray | PILImage.Image
94
+
95
+
96
+ class TrackioImage(TrackioMedia):
97
+ """
98
+ Initializes an Image object.
99
+
100
+ Example:
101
+ ```python
102
+ import trackio
103
+ import numpy as np
104
+ from PIL import Image
105
+
106
+ # Create an image from numpy array
107
+ image_data = np.random.randint(0, 255, (64, 64, 3), dtype=np.uint8)
108
+ image = trackio.Image(image_data, caption="Random image")
109
+ trackio.log({"my_image": image})
110
+
111
+ # Create an image from PIL Image
112
+ pil_image = Image.new('RGB', (100, 100), color='red')
113
+ image = trackio.Image(pil_image, caption="Red square")
114
+ trackio.log({"red_image": image})
115
+
116
+ # Create an image from file path
117
+ image = trackio.Image("path/to/image.jpg", caption="Photo from file")
118
+ trackio.log({"file_image": image})
119
+ ```
120
+
121
+ Args:
122
+ value (`str`, `Path`, `numpy.ndarray`, or `PIL.Image`, *optional*):
123
+ A path to an image, a PIL Image, or a numpy array of shape (height, width, channels).
124
+ caption (`str`, *optional*):
125
+ A string caption for the image.
126
+ """
127
+
128
+ TYPE = "trackio.image"
129
+
130
+ def __init__(self, value: TrackioImageSourceType, caption: str | None = None):
131
+ super().__init__(value, caption)
132
+ self._format: str | None = None
133
+
134
+ if (
135
+ isinstance(self._value, np.ndarray | PILImage.Image)
136
+ and self._format is None
137
+ ):
138
+ self._format = "png"
139
+
140
+ def _as_pil(self) -> PILImage.Image | None:
141
+ try:
142
+ if isinstance(self._value, np.ndarray):
143
+ arr = np.asarray(self._value).astype("uint8")
144
+ return PILImage.fromarray(arr).convert("RGBA")
145
+ if isinstance(self._value, PILImage.Image):
146
+ return self._value.convert("RGBA")
147
+ except Exception as e:
148
+ raise ValueError(f"Failed to process image data: {self._value}") from e
149
+ return None
150
+
151
+ def _save_media(self, file_path: Path):
152
+ if pil := self._as_pil():
153
+ pil.save(file_path, format=self._format)
154
+ elif isinstance(self._value, str | Path):
155
+ if os.path.isfile(self._value):
156
+ shutil.copy(self._value, file_path)
157
+ else:
158
+ raise ValueError(f"File not found: {self._value}")
159
+
160
+
161
+ TrackioVideoSourceType = str | Path | np.ndarray
162
+ TrackioVideoFormatType = Literal["gif", "mp4", "webm"]
163
+
164
+
165
+ class TrackioVideo(TrackioMedia):
166
+ """
167
+ Initializes a Video object.
168
+
169
+ Example:
170
+ ```python
171
+ import trackio
172
+ import numpy as np
173
+
174
+ # Create a simple video from numpy array
175
+ frames = np.random.randint(0, 255, (10, 3, 64, 64), dtype=np.uint8)
176
+ video = trackio.Video(frames, caption="Random video", fps=30)
177
+
178
+ # Create a batch of videos
179
+ batch_frames = np.random.randint(0, 255, (3, 10, 3, 64, 64), dtype=np.uint8)
180
+ batch_video = trackio.Video(batch_frames, caption="Batch of videos", fps=15)
181
+
182
+ # Create video from file path
183
+ video = trackio.Video("path/to/video.mp4", caption="Video from file")
184
+ ```
185
+
186
+ Args:
187
+ value (`str`, `Path`, or `numpy.ndarray`, *optional*):
188
+ A path to a video file, or a numpy array.
189
+ The array should be of type `np.uint8` with RGB values in the range `[0, 255]`.
190
+ It is expected to have shape of either (frames, channels, height, width) or (batch, frames, channels, height, width).
191
+ For the latter, the videos will be tiled into a grid.
192
+ caption (`str`, *optional*):
193
+ A string caption for the video.
194
+ fps (`int`, *optional*):
195
+ Frames per second for the video. Only used when value is an ndarray. Default is `24`.
196
+ format (`Literal["gif", "mp4", "webm"]`, *optional*):
197
+ Video format ("gif", "mp4", or "webm"). Only used when value is an ndarray. Default is "gif".
198
+ """
199
+
200
+ TYPE = "trackio.video"
201
+
202
+ def __init__(
203
+ self,
204
+ value: TrackioVideoSourceType,
205
+ caption: str | None = None,
206
+ fps: int | None = None,
207
+ format: TrackioVideoFormatType | None = None,
208
+ ):
209
+ super().__init__(value, caption)
210
+ if isinstance(value, np.ndarray):
211
+ if format is None:
212
+ format = "gif"
213
+ if fps is None:
214
+ fps = 24
215
+ self._fps = fps
216
+ self._format = format
217
+
218
+ @property
219
+ def _codec(self) -> str:
220
+ match self._format:
221
+ case "gif":
222
+ return "gif"
223
+ case "mp4":
224
+ return "h264"
225
+ case "webm":
226
+ return "vp9"
227
+ case _:
228
+ raise ValueError(f"Unsupported format: {self._format}")
229
+
230
+ def _save_media(self, file_path: Path):
231
+ if isinstance(self._value, np.ndarray):
232
+ video = TrackioVideo._process_ndarray(self._value)
233
+ write_video(file_path, video, fps=self._fps, codec=self._codec)
234
+ elif isinstance(self._value, str | Path):
235
+ if os.path.isfile(self._value):
236
+ shutil.copy(self._value, file_path)
237
+ else:
238
+ raise ValueError(f"File not found: {self._value}")
239
+
240
+ @staticmethod
241
+ def _process_ndarray(value: np.ndarray) -> np.ndarray:
242
+ # Verify value is either 4D (single video) or 5D array (batched videos).
243
+ # Expected format: (frames, channels, height, width) or (batch, frames, channels, height, width)
244
+ if value.ndim < 4:
245
+ raise ValueError(
246
+ "Video requires at least 4 dimensions (frames, channels, height, width)"
247
+ )
248
+ if value.ndim > 5:
249
+ raise ValueError(
250
+ "Videos can have at most 5 dimensions (batch, frames, channels, height, width)"
251
+ )
252
+ if value.ndim == 4:
253
+ # Reshape to 5D with single batch: (1, frames, channels, height, width)
254
+ value = value[np.newaxis, ...]
255
+
256
+ value = TrackioVideo._tile_batched_videos(value)
257
+ return value
258
+
259
+ @staticmethod
260
+ def _tile_batched_videos(video: np.ndarray) -> np.ndarray:
261
+ """
262
+ Tiles a batch of videos into a grid of videos.
263
+
264
+ Input format: (batch, frames, channels, height, width) - original FCHW format
265
+ Output format: (frames, total_height, total_width, channels)
266
+ """
267
+ batch_size, frames, channels, height, width = video.shape
268
+
269
+ next_pow2 = 1 << (batch_size - 1).bit_length()
270
+ if batch_size != next_pow2:
271
+ pad_len = next_pow2 - batch_size
272
+ pad_shape = (pad_len, frames, channels, height, width)
273
+ padding = np.zeros(pad_shape, dtype=video.dtype)
274
+ video = np.concatenate((video, padding), axis=0)
275
+ batch_size = next_pow2
276
+
277
+ n_rows = 1 << ((batch_size.bit_length() - 1) // 2)
278
+ n_cols = batch_size // n_rows
279
+
280
+ # Reshape to grid layout: (n_rows, n_cols, frames, channels, height, width)
281
+ video = video.reshape(n_rows, n_cols, frames, channels, height, width)
282
+
283
+ # Rearrange dimensions to (frames, total_height, total_width, channels)
284
+ video = video.transpose(2, 0, 4, 1, 5, 3)
285
+ video = video.reshape(frames, n_rows * height, n_cols * width, channels)
286
+ return video
py.typed ADDED
File without changes
run.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading
2
+ import time
3
+ from datetime import datetime, timezone
4
+
5
+ import huggingface_hub
6
+ from gradio_client import Client, handle_file
7
+
8
+ from trackio import utils
9
+ from trackio.media import TrackioMedia
10
+ from trackio.sqlite_storage import SQLiteStorage
11
+ from trackio.table import Table
12
+ from trackio.typehints import LogEntry, UploadEntry
13
+
14
+ BATCH_SEND_INTERVAL = 0.5
15
+
16
+
17
+ class Run:
18
+ def __init__(
19
+ self,
20
+ url: str,
21
+ project: str,
22
+ client: Client | None,
23
+ name: str | None = None,
24
+ group: str | None = None,
25
+ config: dict | None = None,
26
+ space_id: str | None = None,
27
+ ):
28
+ self.url = url
29
+ self.project = project
30
+ self._client_lock = threading.Lock()
31
+ self._client_thread = None
32
+ self._client = client
33
+ self._space_id = space_id
34
+ self.name = name or utils.generate_readable_name(
35
+ SQLiteStorage.get_runs(project), space_id
36
+ )
37
+ self.group = group
38
+ self.config = utils.to_json_safe(config or {})
39
+
40
+ if isinstance(self.config, dict):
41
+ for key in self.config:
42
+ if key.startswith("_"):
43
+ raise ValueError(
44
+ f"Config key '{key}' is reserved (keys starting with '_' are reserved for internal use)"
45
+ )
46
+
47
+ self.config["_Username"] = self._get_username()
48
+ self.config["_Created"] = datetime.now(timezone.utc).isoformat()
49
+ self.config["_Group"] = self.group
50
+
51
+ self._queued_logs: list[LogEntry] = []
52
+ self._queued_uploads: list[UploadEntry] = []
53
+ self._stop_flag = threading.Event()
54
+ self._config_logged = False
55
+
56
+ self._client_thread = threading.Thread(target=self._init_client_background)
57
+ self._client_thread.daemon = True
58
+ self._client_thread.start()
59
+
60
+ def _get_username(self) -> str | None:
61
+ """Get the current HuggingFace username if logged in, otherwise None."""
62
+ try:
63
+ who = huggingface_hub.whoami()
64
+ return who["name"] if who else None
65
+ except Exception:
66
+ return None
67
+
68
+ def _batch_sender(self):
69
+ """Send batched logs every BATCH_SEND_INTERVAL."""
70
+ while not self._stop_flag.is_set() or len(self._queued_logs) > 0:
71
+ # If the stop flag has been set, then just quickly send all
72
+ # the logs and exit.
73
+ if not self._stop_flag.is_set():
74
+ time.sleep(BATCH_SEND_INTERVAL)
75
+
76
+ with self._client_lock:
77
+ if self._client is None:
78
+ return
79
+ if self._queued_logs:
80
+ logs_to_send = self._queued_logs.copy()
81
+ self._queued_logs.clear()
82
+ self._client.predict(
83
+ api_name="/bulk_log",
84
+ logs=logs_to_send,
85
+ hf_token=huggingface_hub.utils.get_token(),
86
+ )
87
+ if self._queued_uploads:
88
+ uploads_to_send = self._queued_uploads.copy()
89
+ self._queued_uploads.clear()
90
+ self._client.predict(
91
+ api_name="/bulk_upload_media",
92
+ uploads=uploads_to_send,
93
+ hf_token=huggingface_hub.utils.get_token(),
94
+ )
95
+
96
+ def _init_client_background(self):
97
+ if self._client is None:
98
+ fib = utils.fibo()
99
+ for sleep_coefficient in fib:
100
+ try:
101
+ client = Client(self.url, verbose=False)
102
+
103
+ with self._client_lock:
104
+ self._client = client
105
+ break
106
+ except Exception:
107
+ pass
108
+ if sleep_coefficient is not None:
109
+ time.sleep(0.1 * sleep_coefficient)
110
+
111
+ self._batch_sender()
112
+
113
+ def _process_media(self, metrics, step: int | None) -> dict:
114
+ """
115
+ Serialize media in metrics and upload to space if needed.
116
+ """
117
+ serializable_metrics = {}
118
+ if not step:
119
+ step = 0
120
+ for key, value in metrics.items():
121
+ if isinstance(value, TrackioMedia):
122
+ value._save(self.project, self.name, step)
123
+ serializable_metrics[key] = value._to_dict()
124
+ if self._space_id:
125
+ # Upload local media when deploying to space
126
+ upload_entry: UploadEntry = {
127
+ "project": self.project,
128
+ "run": self.name,
129
+ "step": step,
130
+ "uploaded_file": handle_file(value._get_absolute_file_path()),
131
+ }
132
+ with self._client_lock:
133
+ self._queued_uploads.append(upload_entry)
134
+ else:
135
+ serializable_metrics[key] = value
136
+ return serializable_metrics
137
+
138
+ @staticmethod
139
+ def _replace_tables(metrics):
140
+ for k, v in metrics.items():
141
+ if isinstance(v, Table):
142
+ metrics[k] = v._to_dict()
143
+
144
+ def log(self, metrics: dict, step: int | None = None):
145
+ for k in metrics.keys():
146
+ if k in utils.RESERVED_KEYS or k.startswith("__"):
147
+ raise ValueError(
148
+ f"Please do not use this reserved key as a metric: {k}"
149
+ )
150
+ Run._replace_tables(metrics)
151
+
152
+ metrics = self._process_media(metrics, step)
153
+ metrics = utils.serialize_values(metrics)
154
+
155
+ config_to_log = None
156
+ if not self._config_logged and self.config:
157
+ config_to_log = utils.to_json_safe(self.config)
158
+ self._config_logged = True
159
+
160
+ log_entry: LogEntry = {
161
+ "project": self.project,
162
+ "run": self.name,
163
+ "metrics": metrics,
164
+ "step": step,
165
+ "config": config_to_log,
166
+ }
167
+
168
+ with self._client_lock:
169
+ self._queued_logs.append(log_entry)
170
+
171
+ def finish(self):
172
+ """Cleanup when run is finished."""
173
+ self._stop_flag.set()
174
+
175
+ # Wait for the batch sender to finish before joining the client thread.
176
+ time.sleep(2 * BATCH_SEND_INTERVAL)
177
+
178
+ if self._client_thread is not None:
179
+ print("* Run finished. Uploading logs to Trackio (please wait...)")
180
+ self._client_thread.join()
sqlite_storage.py ADDED
@@ -0,0 +1,580 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import platform
3
+ import sqlite3
4
+ import time
5
+ from datetime import datetime
6
+ from pathlib import Path
7
+ from threading import Lock
8
+
9
+ try:
10
+ import fcntl
11
+ except ImportError: # fcntl is not available on Windows
12
+ fcntl = None
13
+
14
+ import huggingface_hub as hf
15
+ import orjson
16
+ import pandas as pd
17
+
18
+ try: # absolute imports when installed from PyPI
19
+ from trackio.commit_scheduler import CommitScheduler
20
+ from trackio.dummy_commit_scheduler import DummyCommitScheduler
21
+ from trackio.utils import (
22
+ TRACKIO_DIR,
23
+ deserialize_values,
24
+ serialize_values,
25
+ )
26
+ except ImportError: # relative imports when installed from source on Spaces
27
+ from commit_scheduler import CommitScheduler
28
+ from dummy_commit_scheduler import DummyCommitScheduler
29
+ from utils import TRACKIO_DIR, deserialize_values, serialize_values
30
+
31
+
32
+ class ProcessLock:
33
+ """A file-based lock that works across processes. Is a no-op on Windows."""
34
+
35
+ def __init__(self, lockfile_path: Path):
36
+ self.lockfile_path = lockfile_path
37
+ self.lockfile = None
38
+ self.is_windows = platform.system() == "Windows"
39
+
40
+ def __enter__(self):
41
+ """Acquire the lock with retry logic."""
42
+ if self.is_windows:
43
+ return self
44
+ self.lockfile_path.parent.mkdir(parents=True, exist_ok=True)
45
+ self.lockfile = open(self.lockfile_path, "w")
46
+
47
+ max_retries = 100
48
+ for attempt in range(max_retries):
49
+ try:
50
+ fcntl.flock(self.lockfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
51
+ return self
52
+ except IOError:
53
+ if attempt < max_retries - 1:
54
+ time.sleep(0.1)
55
+ else:
56
+ raise IOError("Could not acquire database lock after 10 seconds")
57
+
58
+ def __exit__(self, exc_type, exc_val, exc_tb):
59
+ """Release the lock."""
60
+ if self.is_windows:
61
+ return
62
+
63
+ if self.lockfile:
64
+ fcntl.flock(self.lockfile.fileno(), fcntl.LOCK_UN)
65
+ self.lockfile.close()
66
+
67
+
68
+ class SQLiteStorage:
69
+ _dataset_import_attempted = False
70
+ _current_scheduler: CommitScheduler | DummyCommitScheduler | None = None
71
+ _scheduler_lock = Lock()
72
+
73
+ @staticmethod
74
+ def _get_connection(db_path: Path) -> sqlite3.Connection:
75
+ conn = sqlite3.connect(str(db_path), timeout=30.0)
76
+ conn.execute("PRAGMA journal_mode = WAL")
77
+ conn.row_factory = sqlite3.Row
78
+ return conn
79
+
80
+ @staticmethod
81
+ def _get_process_lock(project: str) -> ProcessLock:
82
+ lockfile_path = TRACKIO_DIR / f"{project}.lock"
83
+ return ProcessLock(lockfile_path)
84
+
85
+ @staticmethod
86
+ def get_project_db_filename(project: str) -> Path:
87
+ """Get the database filename for a specific project."""
88
+ safe_project_name = "".join(
89
+ c for c in project if c.isalnum() or c in ("-", "_")
90
+ ).rstrip()
91
+ if not safe_project_name:
92
+ safe_project_name = "default"
93
+ return f"{safe_project_name}.db"
94
+
95
+ @staticmethod
96
+ def get_project_db_path(project: str) -> Path:
97
+ """Get the database path for a specific project."""
98
+ filename = SQLiteStorage.get_project_db_filename(project)
99
+ return TRACKIO_DIR / filename
100
+
101
+ @staticmethod
102
+ def init_db(project: str) -> Path:
103
+ """
104
+ Initialize the SQLite database with required tables.
105
+ If there is a dataset ID provided, copies from that dataset instead.
106
+ Returns the database path.
107
+ """
108
+ db_path = SQLiteStorage.get_project_db_path(project)
109
+ db_path.parent.mkdir(parents=True, exist_ok=True)
110
+ with SQLiteStorage._get_process_lock(project):
111
+ with sqlite3.connect(db_path, timeout=30.0) as conn:
112
+ conn.execute("PRAGMA journal_mode = WAL")
113
+ cursor = conn.cursor()
114
+ cursor.execute("""
115
+ CREATE TABLE IF NOT EXISTS metrics (
116
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
117
+ timestamp TEXT NOT NULL,
118
+ run_name TEXT NOT NULL,
119
+ step INTEGER NOT NULL,
120
+ metrics TEXT NOT NULL
121
+ )
122
+ """)
123
+ cursor.execute("""
124
+ CREATE TABLE IF NOT EXISTS configs (
125
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
126
+ run_name TEXT NOT NULL,
127
+ config TEXT NOT NULL,
128
+ created_at TEXT NOT NULL,
129
+ UNIQUE(run_name)
130
+ )
131
+ """)
132
+ cursor.execute(
133
+ """
134
+ CREATE INDEX IF NOT EXISTS idx_metrics_run_step
135
+ ON metrics(run_name, step)
136
+ """
137
+ )
138
+ cursor.execute(
139
+ """
140
+ CREATE INDEX IF NOT EXISTS idx_configs_run_name
141
+ ON configs(run_name)
142
+ """
143
+ )
144
+ cursor.execute(
145
+ """
146
+ CREATE INDEX IF NOT EXISTS idx_metrics_run_timestamp
147
+ ON metrics(run_name, timestamp)
148
+ """
149
+ )
150
+ conn.commit()
151
+ return db_path
152
+
153
+ @staticmethod
154
+ def export_to_parquet():
155
+ """
156
+ Exports all projects' DB files as Parquet under the same path but with extension ".parquet".
157
+ """
158
+ # don't attempt to export (potentially wrong/blank) data before importing for the first time
159
+ if not SQLiteStorage._dataset_import_attempted:
160
+ return
161
+ all_paths = os.listdir(TRACKIO_DIR)
162
+ db_paths = [f for f in all_paths if f.endswith(".db")]
163
+ for db_path in db_paths:
164
+ db_path = TRACKIO_DIR / db_path
165
+ parquet_path = db_path.with_suffix(".parquet")
166
+ if (not parquet_path.exists()) or (
167
+ db_path.stat().st_mtime > parquet_path.stat().st_mtime
168
+ ):
169
+ with sqlite3.connect(db_path) as conn:
170
+ df = pd.read_sql("SELECT * from metrics", conn)
171
+ # break out the single JSON metrics column into individual columns
172
+ metrics = df["metrics"].copy()
173
+ metrics = pd.DataFrame(
174
+ metrics.apply(
175
+ lambda x: deserialize_values(orjson.loads(x))
176
+ ).values.tolist(),
177
+ index=df.index,
178
+ )
179
+ del df["metrics"]
180
+ for col in metrics.columns:
181
+ df[col] = metrics[col]
182
+ df.to_parquet(parquet_path)
183
+
184
+ @staticmethod
185
+ def import_from_parquet():
186
+ """
187
+ Imports to all DB files that have matching files under the same path but with extension ".parquet".
188
+ """
189
+ all_paths = os.listdir(TRACKIO_DIR)
190
+ parquet_paths = [f for f in all_paths if f.endswith(".parquet")]
191
+ for parquet_path in parquet_paths:
192
+ parquet_path = TRACKIO_DIR / parquet_path
193
+ db_path = parquet_path.with_suffix(".db")
194
+ df = pd.read_parquet(parquet_path)
195
+ with sqlite3.connect(db_path) as conn:
196
+ # fix up df to have a single JSON metrics column
197
+ if "metrics" not in df.columns:
198
+ # separate other columns from metrics
199
+ metrics = df.copy()
200
+ other_cols = ["id", "timestamp", "run_name", "step"]
201
+ df = df[other_cols]
202
+ for col in other_cols:
203
+ del metrics[col]
204
+ # combine them all into a single metrics col
205
+ metrics = orjson.loads(metrics.to_json(orient="records"))
206
+ df["metrics"] = [
207
+ orjson.dumps(serialize_values(row)) for row in metrics
208
+ ]
209
+ df.to_sql("metrics", conn, if_exists="replace", index=False)
210
+
211
+ @staticmethod
212
+ def get_scheduler():
213
+ """
214
+ Get the scheduler for the database based on the environment variables.
215
+ This applies to both local and Spaces.
216
+ """
217
+ with SQLiteStorage._scheduler_lock:
218
+ if SQLiteStorage._current_scheduler is not None:
219
+ return SQLiteStorage._current_scheduler
220
+ hf_token = os.environ.get("HF_TOKEN")
221
+ dataset_id = os.environ.get("TRACKIO_DATASET_ID")
222
+ space_repo_name = os.environ.get("SPACE_REPO_NAME")
223
+ if dataset_id is None or space_repo_name is None:
224
+ scheduler = DummyCommitScheduler()
225
+ else:
226
+ scheduler = CommitScheduler(
227
+ repo_id=dataset_id,
228
+ repo_type="dataset",
229
+ folder_path=TRACKIO_DIR,
230
+ private=True,
231
+ allow_patterns=["*.parquet", "media/**/*"],
232
+ squash_history=True,
233
+ token=hf_token,
234
+ on_before_commit=SQLiteStorage.export_to_parquet,
235
+ )
236
+ SQLiteStorage._current_scheduler = scheduler
237
+ return scheduler
238
+
239
+ @staticmethod
240
+ def log(project: str, run: str, metrics: dict, step: int | None = None):
241
+ """
242
+ Safely log metrics to the database. Before logging, this method will ensure the database exists
243
+ and is set up with the correct tables. It also uses a cross-process lock to prevent
244
+ database locking errors when multiple processes access the same database.
245
+
246
+ This method is not used in the latest versions of Trackio (replaced by bulk_log) but
247
+ is kept for backwards compatibility for users who are connecting to a newer version of
248
+ a Trackio Spaces dashboard with an older version of Trackio installed locally.
249
+ """
250
+ db_path = SQLiteStorage.init_db(project)
251
+
252
+ with SQLiteStorage._get_process_lock(project):
253
+ with SQLiteStorage._get_connection(db_path) as conn:
254
+ cursor = conn.cursor()
255
+
256
+ cursor.execute(
257
+ """
258
+ SELECT MAX(step)
259
+ FROM metrics
260
+ WHERE run_name = ?
261
+ """,
262
+ (run,),
263
+ )
264
+ last_step = cursor.fetchone()[0]
265
+ if step is None:
266
+ current_step = 0 if last_step is None else last_step + 1
267
+ else:
268
+ current_step = step
269
+
270
+ current_timestamp = datetime.now().isoformat()
271
+
272
+ cursor.execute(
273
+ """
274
+ INSERT INTO metrics
275
+ (timestamp, run_name, step, metrics)
276
+ VALUES (?, ?, ?, ?)
277
+ """,
278
+ (
279
+ current_timestamp,
280
+ run,
281
+ current_step,
282
+ orjson.dumps(serialize_values(metrics)),
283
+ ),
284
+ )
285
+ conn.commit()
286
+
287
+ @staticmethod
288
+ def bulk_log(
289
+ project: str,
290
+ run: str,
291
+ metrics_list: list[dict],
292
+ steps: list[int] | None = None,
293
+ timestamps: list[str] | None = None,
294
+ config: dict | None = None,
295
+ ):
296
+ """
297
+ Safely log bulk metrics to the database. Before logging, this method will ensure the database exists
298
+ and is set up with the correct tables. It also uses a cross-process lock to prevent
299
+ database locking errors when multiple processes access the same database.
300
+ """
301
+ if not metrics_list:
302
+ return
303
+
304
+ if timestamps is None:
305
+ timestamps = [datetime.now().isoformat()] * len(metrics_list)
306
+
307
+ db_path = SQLiteStorage.init_db(project)
308
+ with SQLiteStorage._get_process_lock(project):
309
+ with SQLiteStorage._get_connection(db_path) as conn:
310
+ cursor = conn.cursor()
311
+
312
+ if steps is None:
313
+ steps = list(range(len(metrics_list)))
314
+ elif any(s is None for s in steps):
315
+ cursor.execute(
316
+ "SELECT MAX(step) FROM metrics WHERE run_name = ?", (run,)
317
+ )
318
+ last_step = cursor.fetchone()[0]
319
+ current_step = 0 if last_step is None else last_step + 1
320
+
321
+ processed_steps = []
322
+ for step in steps:
323
+ if step is None:
324
+ processed_steps.append(current_step)
325
+ current_step += 1
326
+ else:
327
+ processed_steps.append(step)
328
+ steps = processed_steps
329
+
330
+ if len(metrics_list) != len(steps) or len(metrics_list) != len(
331
+ timestamps
332
+ ):
333
+ raise ValueError(
334
+ "metrics_list, steps, and timestamps must have the same length"
335
+ )
336
+
337
+ data = []
338
+ for i, metrics in enumerate(metrics_list):
339
+ data.append(
340
+ (
341
+ timestamps[i],
342
+ run,
343
+ steps[i],
344
+ orjson.dumps(serialize_values(metrics)),
345
+ )
346
+ )
347
+
348
+ cursor.executemany(
349
+ """
350
+ INSERT INTO metrics
351
+ (timestamp, run_name, step, metrics)
352
+ VALUES (?, ?, ?, ?)
353
+ """,
354
+ data,
355
+ )
356
+
357
+ if config:
358
+ current_timestamp = datetime.now().isoformat()
359
+ cursor.execute(
360
+ """
361
+ INSERT OR REPLACE INTO configs
362
+ (run_name, config, created_at)
363
+ VALUES (?, ?, ?)
364
+ """,
365
+ (
366
+ run,
367
+ orjson.dumps(serialize_values(config)),
368
+ current_timestamp,
369
+ ),
370
+ )
371
+
372
+ conn.commit()
373
+
374
+ @staticmethod
375
+ def get_logs(project: str, run: str) -> list[dict]:
376
+ """Retrieve logs for a specific run. Logs include the step count (int) and the timestamp (datetime object)."""
377
+ db_path = SQLiteStorage.get_project_db_path(project)
378
+ if not db_path.exists():
379
+ return []
380
+
381
+ with SQLiteStorage._get_connection(db_path) as conn:
382
+ cursor = conn.cursor()
383
+ cursor.execute(
384
+ """
385
+ SELECT timestamp, step, metrics
386
+ FROM metrics
387
+ WHERE run_name = ?
388
+ ORDER BY timestamp
389
+ """,
390
+ (run,),
391
+ )
392
+
393
+ rows = cursor.fetchall()
394
+ results = []
395
+ for row in rows:
396
+ metrics = orjson.loads(row["metrics"])
397
+ metrics = deserialize_values(metrics)
398
+ metrics["timestamp"] = row["timestamp"]
399
+ metrics["step"] = row["step"]
400
+ results.append(metrics)
401
+ return results
402
+
403
+ @staticmethod
404
+ def load_from_dataset():
405
+ dataset_id = os.environ.get("TRACKIO_DATASET_ID")
406
+ space_repo_name = os.environ.get("SPACE_REPO_NAME")
407
+ if dataset_id is not None and space_repo_name is not None:
408
+ hfapi = hf.HfApi()
409
+ updated = False
410
+ if not TRACKIO_DIR.exists():
411
+ TRACKIO_DIR.mkdir(parents=True, exist_ok=True)
412
+ with SQLiteStorage.get_scheduler().lock:
413
+ try:
414
+ files = hfapi.list_repo_files(dataset_id, repo_type="dataset")
415
+ for file in files:
416
+ # Download parquet and media assets
417
+ if not (file.endswith(".parquet") or file.startswith("media/")):
418
+ continue
419
+ if (TRACKIO_DIR / file).exists():
420
+ continue
421
+ hf.hf_hub_download(
422
+ dataset_id, file, repo_type="dataset", local_dir=TRACKIO_DIR
423
+ )
424
+ updated = True
425
+ except hf.errors.EntryNotFoundError:
426
+ pass
427
+ except hf.errors.RepositoryNotFoundError:
428
+ pass
429
+ if updated:
430
+ SQLiteStorage.import_from_parquet()
431
+ SQLiteStorage._dataset_import_attempted = True
432
+
433
+ @staticmethod
434
+ def get_projects() -> list[str]:
435
+ """
436
+ Get list of all projects by scanning the database files in the trackio directory.
437
+ """
438
+ if not SQLiteStorage._dataset_import_attempted:
439
+ SQLiteStorage.load_from_dataset()
440
+
441
+ projects: set[str] = set()
442
+ if not TRACKIO_DIR.exists():
443
+ return []
444
+
445
+ for db_file in TRACKIO_DIR.glob("*.db"):
446
+ project_name = db_file.stem
447
+ projects.add(project_name)
448
+ return sorted(projects)
449
+
450
+ @staticmethod
451
+ def get_runs(project: str) -> list[str]:
452
+ """Get list of all runs for a project."""
453
+ db_path = SQLiteStorage.get_project_db_path(project)
454
+ if not db_path.exists():
455
+ return []
456
+
457
+ with SQLiteStorage._get_connection(db_path) as conn:
458
+ cursor = conn.cursor()
459
+ cursor.execute(
460
+ "SELECT DISTINCT run_name FROM metrics",
461
+ )
462
+ return [row[0] for row in cursor.fetchall()]
463
+
464
+ @staticmethod
465
+ def get_max_steps_for_runs(project: str) -> dict[str, int]:
466
+ """Get the maximum step for each run in a project."""
467
+ db_path = SQLiteStorage.get_project_db_path(project)
468
+ if not db_path.exists():
469
+ return {}
470
+
471
+ with SQLiteStorage._get_connection(db_path) as conn:
472
+ cursor = conn.cursor()
473
+ cursor.execute(
474
+ """
475
+ SELECT run_name, MAX(step) as max_step
476
+ FROM metrics
477
+ GROUP BY run_name
478
+ """
479
+ )
480
+
481
+ results = {}
482
+ for row in cursor.fetchall():
483
+ results[row["run_name"]] = row["max_step"]
484
+
485
+ return results
486
+
487
+ @staticmethod
488
+ def store_config(project: str, run: str, config: dict) -> None:
489
+ """Store configuration for a run."""
490
+ db_path = SQLiteStorage.init_db(project)
491
+
492
+ with SQLiteStorage._get_process_lock(project):
493
+ with SQLiteStorage._get_connection(db_path) as conn:
494
+ cursor = conn.cursor()
495
+ current_timestamp = datetime.now().isoformat()
496
+
497
+ cursor.execute(
498
+ """
499
+ INSERT OR REPLACE INTO configs
500
+ (run_name, config, created_at)
501
+ VALUES (?, ?, ?)
502
+ """,
503
+ (run, orjson.dumps(serialize_values(config)), current_timestamp),
504
+ )
505
+ conn.commit()
506
+
507
+ @staticmethod
508
+ def get_run_config(project: str, run: str) -> dict | None:
509
+ """Get configuration for a specific run."""
510
+ db_path = SQLiteStorage.get_project_db_path(project)
511
+ if not db_path.exists():
512
+ return None
513
+
514
+ with SQLiteStorage._get_connection(db_path) as conn:
515
+ cursor = conn.cursor()
516
+ try:
517
+ cursor.execute(
518
+ """
519
+ SELECT config FROM configs WHERE run_name = ?
520
+ """,
521
+ (run,),
522
+ )
523
+
524
+ row = cursor.fetchone()
525
+ if row:
526
+ config = orjson.loads(row["config"])
527
+ return deserialize_values(config)
528
+ return None
529
+ except sqlite3.OperationalError as e:
530
+ if "no such table: configs" in str(e):
531
+ return None
532
+ raise
533
+
534
+ @staticmethod
535
+ def delete_run(project: str, run: str) -> bool:
536
+ """Delete a run from the database (both metrics and config)."""
537
+ db_path = SQLiteStorage.get_project_db_path(project)
538
+ if not db_path.exists():
539
+ return False
540
+
541
+ with SQLiteStorage._get_process_lock(project):
542
+ with SQLiteStorage._get_connection(db_path) as conn:
543
+ cursor = conn.cursor()
544
+ try:
545
+ cursor.execute("DELETE FROM metrics WHERE run_name = ?", (run,))
546
+ cursor.execute("DELETE FROM configs WHERE run_name = ?", (run,))
547
+ conn.commit()
548
+ return True
549
+ except sqlite3.Error:
550
+ return False
551
+
552
+ @staticmethod
553
+ def get_all_run_configs(project: str) -> dict[str, dict]:
554
+ """Get configurations for all runs in a project."""
555
+ db_path = SQLiteStorage.get_project_db_path(project)
556
+ if not db_path.exists():
557
+ return {}
558
+
559
+ with SQLiteStorage._get_connection(db_path) as conn:
560
+ cursor = conn.cursor()
561
+ try:
562
+ cursor.execute(
563
+ """
564
+ SELECT run_name, config FROM configs
565
+ """
566
+ )
567
+
568
+ results = {}
569
+ for row in cursor.fetchall():
570
+ config = orjson.loads(row["config"])
571
+ results[row["run_name"]] = deserialize_values(config)
572
+ return results
573
+ except sqlite3.OperationalError as e:
574
+ if "no such table: configs" in str(e):
575
+ return {}
576
+ raise
577
+
578
+ def finish(self):
579
+ """Cleanup when run is finished."""
580
+ pass
table.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Literal
2
+
3
+ from pandas import DataFrame
4
+
5
+
6
+ class Table:
7
+ """
8
+ Initializes a Table object.
9
+
10
+ Args:
11
+ columns (`list[str]`, *optional*):
12
+ Names of the columns in the table. Optional if `data` is provided. Not
13
+ expected if `dataframe` is provided. Currently ignored.
14
+ data (`list[list[Any]]`, *optional*):
15
+ 2D row-oriented array of values.
16
+ dataframe (`pandas.`DataFrame``, *optional*):
17
+ DataFrame object used to create the table. When set, `data` and `columns`
18
+ arguments are ignored.
19
+ rows (`list[list[any]]`, *optional*):
20
+ Currently ignored.
21
+ optional (`bool` or `list[bool]`, *optional*, defaults to `True`):
22
+ Currently ignored.
23
+ allow_mixed_types (`bool`, *optional*, defaults to `False`):
24
+ Currently ignored.
25
+ log_mode: (`Literal["IMMUTABLE", "MUTABLE", "INCREMENTAL"]` or `None`, *optional*, defaults to `"IMMUTABLE"`):
26
+ Currently ignored.
27
+ """
28
+
29
+ TYPE = "trackio.table"
30
+
31
+ def __init__(
32
+ self,
33
+ columns: list[str] | None = None,
34
+ data: list[list[Any]] | None = None,
35
+ dataframe: DataFrame | None = None,
36
+ rows: list[list[Any]] | None = None,
37
+ optional: bool | list[bool] = True,
38
+ allow_mixed_types: bool = False,
39
+ log_mode: Literal["IMMUTABLE", "MUTABLE", "INCREMENTAL"] | None = "IMMUTABLE",
40
+ ):
41
+ # TODO: implement support for columns, dtype, optional, allow_mixed_types, and log_mode.
42
+ # for now (like `rows`) they are included for API compat but don't do anything.
43
+
44
+ if dataframe is None:
45
+ self.data = data
46
+ else:
47
+ self.data = dataframe.to_dict(orient="records")
48
+
49
+ def _to_dict(self):
50
+ return {
51
+ "_type": self.TYPE,
52
+ "_value": self.data,
53
+ }
typehints.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, TypedDict
2
+
3
+ from gradio import FileData
4
+
5
+
6
+ class LogEntry(TypedDict):
7
+ project: str
8
+ run: str
9
+ metrics: dict[str, Any]
10
+ step: int | None
11
+ config: dict[str, Any] | None
12
+
13
+
14
+ class UploadEntry(TypedDict):
15
+ project: str
16
+ run: str
17
+ step: int | None
18
+ uploaded_file: FileData