my_test / requirements.txt
AlexHung29629's picture
Update requirements.txt
c834bca verified
# This file was autogenerated by uv via the following command:
# uv pip compile pyproject.toml
accelerate>=1.8.1
# via mistral-7b (pyproject.toml)
aiofiles>=24.1.0
# via gradio
annotated-types>=0.7.0
# via pydantic
anyio>=4.9.0
# via
# gradio
# httpx
# starlette
bitsandbytes>=0.46.1
# via mistral-7b (pyproject.toml)
certifi>=2025.7.9
# via
# httpcore
# httpx
# requests
charset-normalizer>=3.4.2
# via requests
click>=8.2.1
# via
# typer
# uvicorn
fastapi>=0.116.0
# via gradio
ffmpy>=0.6.0
# via gradio
filelock>=3.18.0
# via
# huggingface-hub
# torch
# transformers
# triton
fsspec>=2025.5.1
# via
# gradio-client
# huggingface-hub
# torch
gradio>=5.35.0
# via
# mistral-7b (pyproject.toml)
# spaces
gradio-client>=1.10.4
# via gradio
groovy>=0.1.2
# via gradio
h11>=0.16.0
# via
# httpcore
# uvicorn
hf-transfer>=0.1.9
# via mistral-7b (pyproject.toml)
hf-xet>=1.1.5
# via huggingface-hub
httpcore>=1.0.9
# via httpx
httpx>=0.28.1
# via
# gradio
# gradio-client
# safehttpx
# spaces
huggingface-hub>=0.33.2
# via
# accelerate
# gradio
# gradio-client
# tokenizers
# transformers
idna>=3.10
# via
# anyio
# httpx
# requests
jinja2>=3.1.6
# via
# gradio
# torch
markdown-it-py>=3.0.0
# via rich
markupsafe>=3.0.2
# via
# gradio
# jinja2
mdurl>=0.1.2
# via markdown-it-py
mpmath>=1.3.0
# via sympy
networkx
# via torch
numpy
# via
# accelerate
# bitsandbytes
# gradio
# pandas
# transformers
nvidia-cublas-cu12>=12.1.3.1
# via
# nvidia-cudnn-cu12
# nvidia-cusolver-cu12
# torch
nvidia-cuda-cupti-cu12>=12.1.105
# via torch
nvidia-cuda-nvrtc-cu12>=12.1.105
# via torch
nvidia-cuda-runtime-cu12>=12.1.105
# via torch
nvidia-cudnn-cu12>=9.1.0.70
# via torch
nvidia-cufft-cu12>=11.0.2.54
# via torch
nvidia-curand-cu12>=10.3.2.106
# via torch
nvidia-cusolver-cu12>=11.4.5.107
# via torch
nvidia-cusparse-cu12>=12.1.0.106
# via
# nvidia-cusolver-cu12
# torch
nvidia-nccl-cu12>=2.20.5
# via torch
nvidia-nvjitlink-cu12>=12.9.86
# via
# nvidia-cusolver-cu12
# nvidia-cusparse-cu12
nvidia-nvtx-cu12>=12.1.105
# via torch
orjson>=3.10.18
# via gradio
packaging>=25.0
# via
# accelerate
# gradio
# gradio-client
# huggingface-hub
# spaces
# transformers
pandas>=2.3.1
# via gradio
pillow>=11.3.0
# via gradio
protobuf>=6.31.1
# via mistral-7b (pyproject.toml)
psutil>=5.9.8
# via
# accelerate
# spaces
pydantic>=2.11.7
# via
# fastapi
# gradio
# spaces
pydantic-core>=2.33.2
# via pydantic
pydub>=0.25.1
# via gradio
pygments>=2.19.2
# via rich
python-dateutil>=2.9.0.post0
# via pandas
python-multipart>=0.0.20
# via gradio
pytz>=2025.2
# via pandas
pyyaml>=6.0.2
# via
# accelerate
# gradio
# huggingface-hub
# transformers
regex>=2024.11.6
# via transformers
requests>=2.32.4
# via
# huggingface-hub
# spaces
# transformers
rich>=14.0.0
# via typer
ruff>=0.12.2
# via gradio
safehttpx>=0.1.6
# via gradio
safetensors>=0.5.3
# via
# accelerate
# transformers
semantic-version>=2.10.0
# via gradio
sentencepiece>=0.2.0
# via mistral-7b (pyproject.toml)
setuptools>=80.9.0
# via torch
shellingham>=1.5.4
# via typer
six>=1.17.0
# via python-dateutil
sniffio>=1.3.1
# via anyio
spaces>=0.37.1
# via mistral-7b (pyproject.toml)
starlette>=0.46.2
# via
# fastapi
# gradio
sympy>=1.14.0
# via torch
tokenizers>=0.21.2
# via transformers
tomlkit>=0.13.3
# via gradio
torch>=2.4.0
# via
# mistral-7b (pyproject.toml)
# accelerate
# bitsandbytes
tqdm>=4.67.1
# via
# huggingface-hub
# transformers
transformers>=4.53.1
# via mistral-7b (pyproject.toml)
triton>=3.0.0
# via torch
typer>=0.16.0
# via gradio
typing-extensions>=4.14.1
# via
# anyio
# fastapi
# gradio
# gradio-client
# huggingface-hub
# pydantic
# pydantic-core
# spaces
# torch
# typer
# typing-inspection
typing-inspection>=0.4.1
# via pydantic
tzdata>=2025.2
# via pandas
urllib3>=2.5.0
# via requests
uvicorn>=0.35.0
# via gradio
websockets>=15.0.1
# via gradio-client