Spaces:
Running
Running
Add Dockerfile and HF Spaces deployment config
Browse files- Multi-stage Docker build (Node frontend + Python backend)
- Backend serves built frontend as static files in production
- HF Spaces metadata in README frontmatter
- CONCENTRATE_API_KEY read from env (set as HF Space secret)
Made-with: Cursor
- Dockerfile +26 -0
- README.md +9 -0
- backend/main.py +20 -2
Dockerfile
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Stage 1: Build the React frontend
|
| 2 |
+
FROM node:20-slim AS frontend-build
|
| 3 |
+
WORKDIR /app/frontend
|
| 4 |
+
COPY frontend/package.json frontend/package-lock.json ./
|
| 5 |
+
RUN npm ci
|
| 6 |
+
COPY frontend/ ./
|
| 7 |
+
RUN npm run build
|
| 8 |
+
|
| 9 |
+
# Stage 2: Python backend + built frontend
|
| 10 |
+
FROM python:3.11-slim
|
| 11 |
+
WORKDIR /app
|
| 12 |
+
|
| 13 |
+
RUN useradd -m -u 1000 appuser
|
| 14 |
+
|
| 15 |
+
COPY backend/requirements.txt .
|
| 16 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 17 |
+
|
| 18 |
+
COPY backend/main.py .
|
| 19 |
+
COPY --from=frontend-build /app/frontend/dist ./static/
|
| 20 |
+
|
| 21 |
+
RUN chown -R appuser:appuser /app
|
| 22 |
+
USER appuser
|
| 23 |
+
|
| 24 |
+
EXPOSE 7860
|
| 25 |
+
|
| 26 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
CHANGED
|
@@ -1,3 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
# LLM Arena
|
| 2 |
|
| 3 |
Side-by-side LLM comparison app powered by [Concentrate AI](https://concentrate.ai). Pick any two models from 70+ providers, send the same prompt, and watch both responses stream in real-time with latency, token, and cost stats.
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: LLM Arena
|
| 3 |
+
emoji: ⚔️
|
| 4 |
+
colorFrom: indigo
|
| 5 |
+
colorTo: purple
|
| 6 |
+
sdk: docker
|
| 7 |
+
app_port: 7860
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
# LLM Arena
|
| 11 |
|
| 12 |
Side-by-side LLM comparison app powered by [Concentrate AI](https://concentrate.ai). Pick any two models from 70+ providers, send the same prompt, and watch both responses stream in real-time with latency, token, and cost stats.
|
backend/main.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
import io
|
| 2 |
import json
|
| 3 |
import os
|
|
|
|
| 4 |
import time
|
| 5 |
from typing import Optional
|
| 6 |
|
|
@@ -10,7 +11,8 @@ from docx import Document as DocxDocument
|
|
| 10 |
from dotenv import load_dotenv
|
| 11 |
from fastapi import FastAPI, HTTPException, UploadFile
|
| 12 |
from fastapi.middleware.cors import CORSMiddleware
|
| 13 |
-
from fastapi.responses import StreamingResponse
|
|
|
|
| 14 |
from pydantic import BaseModel, Field
|
| 15 |
|
| 16 |
load_dotenv()
|
|
@@ -22,7 +24,7 @@ app = FastAPI(title="LLM Arena Backend")
|
|
| 22 |
|
| 23 |
app.add_middleware(
|
| 24 |
CORSMiddleware,
|
| 25 |
-
allow_origins=["
|
| 26 |
allow_credentials=True,
|
| 27 |
allow_methods=["*"],
|
| 28 |
allow_headers=["*"],
|
|
@@ -256,3 +258,19 @@ async def stream_response(req: StreamRequest):
|
|
| 256 |
@app.get("/api/health")
|
| 257 |
async def health():
|
| 258 |
return {"status": "ok"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import io
|
| 2 |
import json
|
| 3 |
import os
|
| 4 |
+
import pathlib
|
| 5 |
import time
|
| 6 |
from typing import Optional
|
| 7 |
|
|
|
|
| 11 |
from dotenv import load_dotenv
|
| 12 |
from fastapi import FastAPI, HTTPException, UploadFile
|
| 13 |
from fastapi.middleware.cors import CORSMiddleware
|
| 14 |
+
from fastapi.responses import FileResponse, StreamingResponse
|
| 15 |
+
from fastapi.staticfiles import StaticFiles
|
| 16 |
from pydantic import BaseModel, Field
|
| 17 |
|
| 18 |
load_dotenv()
|
|
|
|
| 24 |
|
| 25 |
app.add_middleware(
|
| 26 |
CORSMiddleware,
|
| 27 |
+
allow_origins=["*"],
|
| 28 |
allow_credentials=True,
|
| 29 |
allow_methods=["*"],
|
| 30 |
allow_headers=["*"],
|
|
|
|
| 258 |
@app.get("/api/health")
|
| 259 |
async def health():
|
| 260 |
return {"status": "ok"}
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
# ── Static file serving (production / HF Spaces) ─────────────────────────────
|
| 264 |
+
|
| 265 |
+
STATIC_DIR = pathlib.Path(__file__).resolve().parent / "static"
|
| 266 |
+
|
| 267 |
+
if STATIC_DIR.is_dir():
|
| 268 |
+
app.mount("/assets", StaticFiles(directory=STATIC_DIR / "assets"), name="assets")
|
| 269 |
+
|
| 270 |
+
@app.get("/{full_path:path}")
|
| 271 |
+
async def serve_spa(full_path: str):
|
| 272 |
+
"""Serve the React SPA for any non-API route."""
|
| 273 |
+
file_path = STATIC_DIR / full_path
|
| 274 |
+
if file_path.is_file():
|
| 275 |
+
return FileResponse(file_path)
|
| 276 |
+
return FileResponse(STATIC_DIR / "index.html")
|