# Optional: copy to services/chandra/upstream/local.env (see upstream README). # Or export before running ./run-chandra.sh # MODEL_CHECKPOINT=datalab-to/chandra-ocr-2 # MAX_OUTPUT_TOKENS=12384 # vLLM (default inference path for lightweight pip install) # VLLM_API_BASE=http://localhost:8000/v1 # VLLM_MODEL_NAME=chandra # VLLM_GPUS=0