llamacpp-liquid / Dockerfile
Javedalam's picture
Fix Dockerfile: single-line CMD JSON array
f18f74d
FROM debian:bookworm-slim
ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y --no-install-recommends \
git build-essential cmake curl ca-certificates pkg-config libcurl4-openssl-dev \
&& rm -rf /var/lib/apt/lists/*
# build llama.cpp (HTTP server in tools/server)
WORKDIR /app
RUN git clone --depth 1 https://github.com/ggml-org/llama.cpp.git \
&& cd llama.cpp && mkdir -p build && cd build \
&& cmake -DCMAKE_BUILD_TYPE=Release \
-DGGML_NATIVE=ON \
-DLLAMA_BUILD_EXAMPLES=ON \
-DLLAMA_BUILD_SERVER=ON \
-DLLAMA_BUILD_TESTS=OFF \
.. \
&& cmake --build . --target llama-server -j
# fetch LiquidAI LFM2-350M GGUF
RUN mkdir -p /models && \
curl -fL --retry 5 --retry-delay 2 -o /models/model.gguf \
"https://huggingface.co/LiquidAI/LFM2-350M-GGUF/resolve/main/LFM2-350M-Q4_K_M.gguf?download=true"
EXPOSE 7860
CMD ["/app/llama.cpp/build/bin/llama-server","-m","/models/model.gguf","-c","2048","-ngl","0","-t","4","--host","0.0.0.0","--port","7860"]