We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
2 parents b7c6d4f + f103c14 commit 407dbd7Copy full SHA for 407dbd7
1 file changed
Dockerfile
@@ -5,9 +5,9 @@ RUN apt-get update -y \
5
6
RUN ldconfig /usr/local/cuda-12.9/compat/
7
8
-# Install vLLM with FlashInfer - use CUDA 12.8 PyTorch wheels (compatible with vLLM 0.15.0)
+# Install vLLM with FlashInfer - use CUDA 12.8 PyTorch wheels (compatible with vLLM 0.15.1)
9
RUN python3 -m pip install --upgrade pip && \
10
- python3 -m pip install "vllm[flashinfer]==0.15.0" --extra-index-url https://download.pytorch.org/whl/cu129
+ python3 -m pip install "vllm[flashinfer]==0.15.1" --extra-index-url https://download.pytorch.org/whl/cu129
11
12
13
0 commit comments