# # This file is autogenerated by pip-compile with Python 3.12 # by the following command: # # python3.12 -m piptools compile requirements-test.in -o requirements-test.txt # absl-py==2.1.0 # via rouge-score accelerate==1.0.1 # via # lm-eval # peft aiohappyeyeballs==2.4.3 # via aiohttp aiohttp==3.10.10 # via # datasets # fsspec # lm-eval aiosignal==1.3.1 # via # aiohttp # ray annotated-types==0.7.0 # via pydantic anyio==4.6.2.post1 # via httpx argcomplete==3.5.1 # via datamodel-code-generator attrs==24.2.0 # via # aiohttp # jsonlines # jsonschema # referencing audioread==3.0.1 # via librosa awscli==1.35.23 # via -r requirements-test.in bitsandbytes>=0.45.0 # via -r requirements-test.in black==24.10.0 # via datamodel-code-generator boto3==1.35.57 # via tensorizer botocore==1.35.57 # via # awscli # boto3 # s3transfer bounded-pool-executor==0.0.3 # via pqdm buildkite-test-collector==0.1.9 # via -r requirements-test.in certifi==2024.8.30 # via # httpcore # httpx # requests cffi==1.17.1 # via soundfile chardet==5.2.0 # via mbstrdecoder charset-normalizer==3.4.0 # via requests click==8.1.7 # via # black # nltk # ray colorama==0.4.6 # via # awscli # sacrebleu # tqdm-multiprocess contourpy==1.3.0 # via matplotlib cupy-cuda12x==13.3.0 # via ray cycler==0.12.1 # via matplotlib datamodel-code-generator==0.26.3 # via -r requirements-test.in dataproperty==1.0.1 # via # pytablewriter # tabledata datasets==3.0.2 # via # evaluate # lm-eval decorator==5.1.1 # via librosa decord==0.6.0 # via -r requirements-test.in dill==0.3.8 # via # datasets # evaluate # lm-eval # multiprocess dnspython==2.7.0 # via email-validator docutils==0.16 # via awscli einops==0.8.0 # via -r requirements-test.in email-validator==2.2.0 # via pydantic evaluate==0.4.3 # via lm-eval fastrlock==0.8.2 # via cupy-cuda12x filelock==3.16.1 # via # datasets # huggingface-hub # ray # torch # transformers # triton fonttools==4.54.1 # via matplotlib frozenlist==1.5.0 # via # aiohttp # aiosignal # ray fsspec[http]==2024.9.0 # via # datasets # evaluate # huggingface-hub # torch genson==1.3.0 # via datamodel-code-generator h11==0.14.0 # via httpcore hiredis==3.0.0 # via tensorizer httpcore==1.0.6 # via httpx httpx==0.27.2 # via -r requirements-test.in huggingface-hub==0.26.2 # via # accelerate # datasets # evaluate # peft # sentence-transformers # timm # tokenizers # transformers idna==3.10 # via # anyio # email-validator # httpx # requests # yarl inflect==5.6.2 # via datamodel-code-generator iniconfig==2.0.0 # via pytest isort==5.13.2 # via datamodel-code-generator jinja2==3.1.4 # via # datamodel-code-generator # torch jmespath==1.0.1 # via # boto3 # botocore joblib==1.4.2 # via # librosa # nltk # scikit-learn jsonlines==4.0.0 # via lm-eval jsonschema==4.23.0 # via # mistral-common # ray jsonschema-specifications==2024.10.1 # via jsonschema kiwisolver==1.4.7 # via matplotlib lazy-loader==0.4 # via librosa libnacl==2.1.0 # via tensorizer librosa==0.10.2.post1 # via -r requirements-test.in llvmlite==0.43.0 # via numba lm-eval[api]==0.4.4 # via -r requirements-test.in lxml==5.3.0 # via sacrebleu markupsafe==3.0.2 # via jinja2 matplotlib==3.9.2 # via -r requirements-test.in mbstrdecoder==1.1.3 # via # dataproperty # pytablewriter # typepy mistral-common[opencv]==1.5.1 # via # -r requirements-test.in # mistral-common more-itertools==10.5.0 # via lm-eval mpmath==1.3.0 # via sympy msgpack==1.1.0 # via # librosa # ray multidict==6.1.0 # via # aiohttp # yarl multiprocess==0.70.16 # via # datasets # evaluate mypy-extensions==1.0.0 # via black networkx==3.2.1 # via torch nltk==3.9.1 # via rouge-score numba==0.60.0 # via librosa numexpr==2.10.1 # via lm-eval numpy==1.26.4 # via # -r requirements-test.in # accelerate # bitsandbytes # contourpy # cupy-cuda12x # datasets # decord # evaluate # librosa # matplotlib # mistral-common # numba # numexpr # opencv-python-headless # pandas # peft # rouge-score # sacrebleu # scikit-learn # scipy # soxr # tensorizer # torchvision # transformers nvidia-cublas-cu12==12.4.5.8 # via # nvidia-cudnn-cu12 # nvidia-cusolver-cu12 # torch nvidia-cuda-cupti-cu12==12.4.127 # via torch nvidia-cuda-nvrtc-cu12==12.4.127 # via torch nvidia-cuda-runtime-cu12==12.4.127 # via torch nvidia-cudnn-cu12==9.1.0.70 # via torch nvidia-cufft-cu12==11.2.1.3 # via torch nvidia-curand-cu12==10.3.5.147 # via torch nvidia-cusolver-cu12==11.6.1.9 # via torch nvidia-cusparse-cu12==12.3.1.170 # via # nvidia-cusolver-cu12 # torch nvidia-nccl-cu12==2.21.5 # via torch nvidia-nvjitlink-cu12==12.4.127 # via # nvidia-cusolver-cu12 # nvidia-cusparse-cu12 # torch nvidia-nvtx-cu12==12.4.127 # via torch opencv-python-headless==4.10.0.84 # via mistral-common packaging==24.1 # via # accelerate # black # datamodel-code-generator # datasets # evaluate # huggingface-hub # lazy-loader # matplotlib # peft # pooch # pytest # pytest-rerunfailures # ray # transformers # typepy pandas==2.2.3 # via # datasets # evaluate pathspec==0.12.1 # via black pathvalidate==3.2.1 # via pytablewriter peft==0.13.2 # via # -r requirements-test.in # lm-eval pillow==10.4.0 # via # matplotlib # mistral-common # sentence-transformers # torchvision platformdirs==4.3.6 # via # black # pooch pluggy==1.5.0 # via pytest pooch==1.8.2 # via librosa portalocker==2.10.1 # via sacrebleu pqdm==0.2.0 # via -r requirements-test.in propcache==0.2.0 # via yarl protobuf==5.28.3 # via # ray # tensorizer psutil==6.1.0 # via # accelerate # peft # tensorizer py==1.11.0 # via pytest-forked pyarrow==18.0.0 # via datasets pyasn1==0.6.1 # via rsa pybind11==2.13.6 # via lm-eval pycparser==2.22 # via cffi pydantic[email]==2.9.2 # via # datamodel-code-generator # mistral-common pydantic-core==2.23.4 # via pydantic pyparsing==3.2.0 # via matplotlib pytablewriter==1.2.0 # via lm-eval pytest==8.3.3 # via # -r requirements-test.in # buildkite-test-collector # pytest-asyncio # pytest-forked # pytest-rerunfailures # pytest-shard pytest-asyncio==0.24.0 # via -r requirements-test.in pytest-forked==1.6.0 # via -r requirements-test.in pytest-rerunfailures==14.0 # via -r requirements-test.in pytest-shard==0.1.2 # via -r requirements-test.in python-dateutil==2.9.0.post0 # via # botocore # matplotlib # pandas # typepy pytz==2024.2 # via # pandas # typepy pyyaml==6.0.2 # via # accelerate # awscli # datamodel-code-generator # datasets # huggingface-hub # peft # ray # timm # transformers ray[adag]==2.40.0 # via -r requirements-test.in redis==5.2.0 # via tensorizer referencing==0.35.1 # via # jsonschema # jsonschema-specifications regex==2024.9.11 # via # nltk # sacrebleu # tiktoken # transformers requests==2.32.3 # via # buildkite-test-collector # datasets # evaluate # huggingface-hub # lm-eval # mistral-common # pooch # ray # tiktoken # transformers rouge-score==0.1.2 # via lm-eval rpds-py==0.20.1 # via # jsonschema # referencing rsa==4.7.2 # via awscli s3transfer==0.10.3 # via # awscli # boto3 sacrebleu==2.4.3 # via lm-eval safetensors==0.4.5 # via # accelerate # peft # timm # transformers scikit-learn==1.5.2 # via # librosa # lm-eval # sentence-transformers scipy==1.13.1 # via # librosa # scikit-learn # sentence-transformers sentence-transformers==3.2.1 # via -r requirements-test.in sentencepiece==0.2.0 # via mistral-common six==1.16.0 # via # python-dateutil # rouge-score sniffio==1.3.1 # via # anyio # httpx soundfile==0.12.1 # via # -r requirements-test.in # librosa soxr==0.5.0.post1 # via librosa sqlitedict==2.1.0 # via lm-eval sympy==1.13.1 # via torch tabledata==1.3.3 # via pytablewriter tabulate==0.9.0 # via sacrebleu tcolorpy==0.1.6 # via pytablewriter tenacity==9.0.0 # via lm-eval tensorizer==2.9.0 # via -r requirements-test.in threadpoolctl==3.5.0 # via scikit-learn tiktoken==0.7.0 # via # lm-eval # mistral-common timm==1.0.11 # via -r requirements-test.in tokenizers==0.21.0 # via transformers torch==2.5.1 # via # -r requirements-test.in # accelerate # bitsandbytes # lm-eval # peft # sentence-transformers # tensorizer # timm # torchvision torchvision==0.20.1 # via timm tqdm==4.66.6 # via # datasets # evaluate # huggingface-hub # lm-eval # nltk # peft # sentence-transformers # tqdm-multiprocess # transformers tqdm-multiprocess==0.0.11 # via lm-eval transformers==4.47.0 # via # lm-eval # peft # sentence-transformers # transformers-stream-generator transformers-stream-generator==0.0.5 # via -r requirements-test.in triton==3.1.0 # via torch typepy[datetime]==1.3.2 # via # dataproperty # pytablewriter # tabledata typing-extensions==4.12.2 # via # huggingface-hub # librosa # mistral-common # pydantic # pydantic-core # torch tzdata==2024.2 # via pandas urllib3==1.26.20 # via # botocore # requests word2number==1.1 # via lm-eval xxhash==3.5.0 # via # datasets # evaluate yarl==1.17.1 # via aiohttp zstandard==0.23.0 # via lm-eval # The following packages are considered to be unsafe in a requirements file: # setuptools