-
Notifications
You must be signed in to change notification settings - Fork 72
Adding SGLang backend [with dependency fix] #491
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
5a27d33
b295064
62ddc97
d554c58
70037a0
dbfc078
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -33,7 +33,13 @@ COPY --link . . | |
| # Build the Go binary (static build) | ||
| RUN --mount=type=cache,target=/go/pkg/mod \ | ||
| --mount=type=cache,target=/root/.cache/go-build \ | ||
| CGO_ENABLED=1 GOOS=linux go build -ldflags="-s -w" -o model-runner ./main.go | ||
| CGO_ENABLED=1 GOOS=linux go build -ldflags="-s -w" -o model-runner . | ||
|
|
||
| # Build the Go binary for SGLang (without vLLM) | ||
| FROM builder AS builder-sglang | ||
| RUN --mount=type=cache,target=/go/pkg/mod \ | ||
| --mount=type=cache,target=/root/.cache/go-build \ | ||
| CGO_ENABLED=1 GOOS=linux go build -tags=novllm -ldflags="-s -w" -o model-runner . | ||
|
|
||
| # --- Get llama.cpp binary --- | ||
| FROM docker/docker-model-backend-llamacpp:${LLAMA_SERVER_VERSION}-${LLAMA_SERVER_VARIANT} AS llama-server | ||
|
|
@@ -97,21 +103,58 @@ USER modelrunner | |
|
|
||
| # Install uv and vLLM as modelrunner user | ||
| RUN curl -LsSf https://astral.sh/uv/install.sh | sh \ | ||
| && ~/.local/bin/uv venv --python /usr/bin/python3 /opt/vllm-env \ | ||
| && if [ "$TARGETARCH" = "amd64" ]; then \ | ||
| WHEEL_ARCH="manylinux_2_31_x86_64"; \ | ||
| WHEEL_URL="https://github.com/vllm-project/vllm/releases/download/v${VLLM_VERSION}/vllm-${VLLM_VERSION}%2B${VLLM_CUDA_VERSION}-${VLLM_PYTHON_TAG}-${WHEEL_ARCH}.whl"; \ | ||
| ~/.local/bin/uv pip install --python /opt/vllm-env/bin/python "$WHEEL_URL"; \ | ||
| && ~/.local/bin/uv venv --python /usr/bin/python3 /opt/vllm-env \ | ||
| && if [ "$TARGETARCH" = "amd64" ]; then \ | ||
| WHEEL_ARCH="manylinux_2_31_x86_64"; \ | ||
| WHEEL_URL="https://github.com/vllm-project/vllm/releases/download/v${VLLM_VERSION}/vllm-${VLLM_VERSION}%2B${VLLM_CUDA_VERSION}-${VLLM_PYTHON_TAG}-${WHEEL_ARCH}.whl"; \ | ||
| ~/.local/bin/uv pip install --python /opt/vllm-env/bin/python "$WHEEL_URL"; \ | ||
| else \ | ||
| ~/.local/bin/uv pip install --python /opt/vllm-env/bin/python "vllm==${VLLM_VERSION}"; \ | ||
| ~/.local/bin/uv pip install --python /opt/vllm-env/bin/python "vllm==${VLLM_VERSION}"; \ | ||
| fi | ||
|
|
||
| RUN /opt/vllm-env/bin/python -c "import vllm; print(vllm.__version__)" > /opt/vllm-env/version | ||
|
|
||
| # --- SGLang variant --- | ||
| FROM llamacpp AS sglang | ||
|
|
||
| ARG SGLANG_VERSION=0.5.6 | ||
|
|
||
| USER root | ||
|
|
||
| # Install CUDA toolkit 13 for nvcc (needed for flashinfer JIT compilation) | ||
| RUN apt update && apt install -y \ | ||
| python3 python3-venv python3-dev \ | ||
| curl ca-certificates build-essential \ | ||
| libnuma1 libnuma-dev numactl ninja-build \ | ||
| wget gnupg \ | ||
| && wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb \ | ||
| && dpkg -i cuda-keyring_1.1-1_all.deb \ | ||
| && apt update && apt install -y cuda-toolkit-13-0 \ | ||
|
Comment on lines
+130
to
+132
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The Dockerfile is configured to use an
Comment on lines
+124
to
+132
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. suggestion (bug_risk): Installing CUDA toolkit 13.0 on top of a CUDA 12.9 runtime image may introduce version skew and significantly increase image size. This stage installs Suggested implementation: If SGLang/flashinfer explicitly requires CUDA 13.x features, a more robust solution would be:
|
||
| && rm cuda-keyring_1.1-1_all.deb \ | ||
| && rm -rf /var/lib/apt/lists/* | ||
|
|
||
| RUN mkdir -p /opt/sglang-env && chown -R modelrunner:modelrunner /opt/sglang-env | ||
|
|
||
| USER modelrunner | ||
|
|
||
| # Set CUDA paths for nvcc (needed during flashinfer compilation) | ||
| ENV PATH=/usr/local/cuda-13.0/bin:$PATH | ||
| ENV LD_LIBRARY_PATH=/usr/local/cuda-13.0/lib64:$LD_LIBRARY_PATH | ||
|
|
||
| # Install uv and SGLang as modelrunner user | ||
| RUN curl -LsSf https://astral.sh/uv/install.sh | sh \ | ||
| && ~/.local/bin/uv venv --python /usr/bin/python3 /opt/sglang-env \ | ||
| && ~/.local/bin/uv pip install --python /opt/sglang-env/bin/python "sglang==${SGLANG_VERSION}" | ||
|
|
||
| RUN /opt/sglang-env/bin/python -c "import sglang; print(sglang.__version__)" > /opt/sglang-env/version | ||
| FROM llamacpp AS final-llamacpp | ||
| # Copy the built binary from builder | ||
| COPY --from=builder /app/model-runner /app/model-runner | ||
|
|
||
| FROM vllm AS final-vllm | ||
| # Copy the built binary from builder | ||
| COPY --from=builder /app/model-runner /app/model-runner | ||
|
|
||
| FROM sglang AS final-sglang | ||
| # Copy the built binary from builder-sglang (without vLLM) | ||
| COPY --from=builder-sglang /app/model-runner /app/model-runner | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,23 @@ | ||
| //go:build !novllm | ||
|
|
||
| package main | ||
|
|
||
| import ( | ||
| "github.com/docker/model-runner/pkg/inference" | ||
| "github.com/docker/model-runner/pkg/inference/backends/vllm" | ||
| "github.com/docker/model-runner/pkg/inference/models" | ||
| "github.com/sirupsen/logrus" | ||
| ) | ||
|
|
||
| func initVLLMBackend(log *logrus.Logger, modelManager *models.Manager) (inference.Backend, error) { | ||
| return vllm.New( | ||
| log, | ||
| modelManager, | ||
| log.WithFields(logrus.Fields{"component": vllm.Name}), | ||
| nil, | ||
| ) | ||
| } | ||
|
|
||
| func registerVLLMBackend(backends map[string]inference.Backend, backend inference.Backend) { | ||
| backends[vllm.Name] = backend | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,17 @@ | ||
| //go:build novllm | ||
|
|
||
| package main | ||
|
|
||
| import ( | ||
| "github.com/docker/model-runner/pkg/inference" | ||
| "github.com/docker/model-runner/pkg/inference/models" | ||
| "github.com/sirupsen/logrus" | ||
| ) | ||
|
|
||
| func initVLLMBackend(log *logrus.Logger, modelManager *models.Manager) (inference.Backend, error) { | ||
| return nil, nil | ||
| } | ||
|
|
||
| func registerVLLMBackend(backends map[string]inference.Backend, backend inference.Backend) { | ||
| // No-op when vLLM is disabled | ||
| } |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
To keep the Docker image size minimal, it's a good practice to use the
--no-install-recommendsflag withapt install. This prevents the installation of optional packages that are not strictly necessary.