From 92985b879664bec993b574d949c5a4f865d86b16 Mon Sep 17 00:00:00 2001 From: Ankur-singh Date: Mon, 13 Apr 2026 11:46:36 -0700 Subject: [PATCH 1/6] Update Qwen3.5 FP8 B200 SGLang: tp8 conc=4, tp4 conc=4-256, new server args Update search-space to tp8 conc=4 and tp4 conc=4-256. Update benchmark script with new server launch: enable-symm-mem, prefill/chunked 16384, mem-fraction-static 0.8, stream-interval 50, tokenizer-path. --- .github/configs/nvidia-master.yaml | 8 ++-- benchmarks/single_node/qwen3.5_fp8_b200.sh | 50 ++++++++-------------- 2 files changed, 21 insertions(+), 37 deletions(-) diff --git a/.github/configs/nvidia-master.yaml b/.github/configs/nvidia-master.yaml index 15dc69195..c64c4f3c0 100644 --- a/.github/configs/nvidia-master.yaml +++ b/.github/configs/nvidia-master.yaml @@ -1781,13 +1781,13 @@ qwen3.5-fp8-b200-sglang: - isl: 1024 osl: 1024 search-space: - - { tp: 8, ep: 1, conc-start: 4, conc-end: 16 } - - { tp: 4, ep: 4, conc-start: 16, conc-end: 128 } + - { tp: 8, conc-start: 4, conc-end: 4 } + - { tp: 4, conc-start: 4, conc-end: 256 } - isl: 8192 osl: 1024 search-space: - - { tp: 8, ep: 1, conc-start: 4, conc-end: 16 } - - { tp: 4, ep: 4, conc-start: 16, conc-end: 128 } + - { tp: 8, conc-start: 4, conc-end: 4 } + - { tp: 4, conc-start: 4, conc-end: 256 } qwen3.5-fp4-b200-sglang: image: lmsysorg/sglang:nightly-dev-20260402-d7256eb6 diff --git a/benchmarks/single_node/qwen3.5_fp8_b200.sh b/benchmarks/single_node/qwen3.5_fp8_b200.sh index 36e5d579d..b374237a6 100755 --- a/benchmarks/single_node/qwen3.5_fp8_b200.sh +++ b/benchmarks/single_node/qwen3.5_fp8_b200.sh @@ -20,55 +20,39 @@ nvidia-smi hf download "$MODEL" -export NCCL_NVLS_ENABLE=1 -export SGL_ENABLE_JIT_DEEPGEMM=false -export SGLANG_ENABLE_FLASHINFER_GEMM=true -export PYTHONUNBUFFERED=1 - SERVER_LOG=/workspace/server.log PORT=${PORT:-8888} -# Default: recv every ~10 requests; if CONC ≥ 16, relax to ~30 requests between scheduler recv polls. -if [[ $CONC -ge 16 ]]; then - SCHEDULER_RECV_INTERVAL=30 -else - SCHEDULER_RECV_INTERVAL=10 -fi - -MEM_FRAC_STATIC=0.82 -CHUNKED_PREFILL_SIZE=32768 -MAX_PREFILL_TOKENS=32768 -CUDA_GRAPH_MAX_BATCH_SIZE=$CONC -MAX_RUNNING_REQUESTS=128 CONTEXT_LENGTH=$((ISL + OSL + 20)) if [ "${EVAL_ONLY}" = "true" ]; then setup_eval_context CONTEXT_LENGTH="$EVAL_MAX_MODEL_LEN" fi -if [[ $TP -eq 8 ]]; then - EXTRA_ARGS="--enable-flashinfer-allreduce-fusion" -else - EXTRA_ARGS="" -fi - -echo "SCHEDULER_RECV_INTERVAL: $SCHEDULER_RECV_INTERVAL, CONC: $CONC, ISL: $ISL, OSL: $OSL" - # Start GPU monitoring (power, temperature, clocks every second) start_gpu_monitor set -x PYTHONNOUSERSITE=1 python3 -m sglang.launch_server --model-path=$MODEL --host=0.0.0.0 --port=$PORT \ --trust-remote-code \ ---tensor-parallel-size=$TP --data-parallel-size=1 --ep-size $EP_SIZE \ ---quantization fp8 --kv-cache-dtype fp8_e4m3 \ +--tensor-parallel-size=$TP --data-parallel-size=1 --expert-parallel-size=$EP_SIZE \ +--enable-symm-mem \ +--disable-radix-cache \ +--quantization fp8 \ +--kv-cache-dtype fp8_e4m3 \ --mamba-ssm-dtype bfloat16 \ ---cuda-graph-max-bs $CUDA_GRAPH_MAX_BATCH_SIZE --max-running-requests $MAX_RUNNING_REQUESTS \ ---mem-fraction-static $MEM_FRAC_STATIC --chunked-prefill-size $CHUNKED_PREFILL_SIZE --max-prefill-tokens $MAX_PREFILL_TOKENS \ ---context-length $CONTEXT_LENGTH --disable-radix-cache \ ---attention-backend trtllm_mha --moe-runner-backend flashinfer_trtllm \ -$EXTRA_ARGS --scheduler-recv-interval $SCHEDULER_RECV_INTERVAL \ ---tokenizer-worker-num 6 --stream-interval 30 > $SERVER_LOG 2>&1 & +--attention-backend trtllm_mha \ +--moe-runner-backend flashinfer_trtllm \ +--cuda-graph-max-bs $CONC \ +--max-running-requests $CONC \ +--max-prefill-tokens 16384 \ +--chunked-prefill-size 16384 \ +--mem-fraction-static 0.8 \ +--stream-interval 50 \ +--scheduler-recv-interval 10 \ +--tokenizer-worker-num 6 \ +--tokenizer-path $MODEL \ +--context-length $CONTEXT_LENGTH > $SERVER_LOG 2>&1 & SERVER_PID=$! From 2b15d3d2213ec5bcad36ed61d2125192678760fe Mon Sep 17 00:00:00 2001 From: "claude[bot]" <41898282+claude[bot]@users.noreply.github.com> Date: Mon, 13 Apr 2026 18:53:54 +0000 Subject: [PATCH 2/6] Add perf-changelog entry for Qwen3.5 FP8 B200 SGLang config update Co-authored-by: Ankur Singh --- perf-changelog.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 19e7988e6..dbae55d0f 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1,3 +1,13 @@ +- config-keys: + - qwen3.5-fp8-b200-sglang + description: + - "Update search-space: tp8 conc=4, tp4 conc=4-256 (both seq-lens); remove explicit ep from search-space" + - "Update server launch args: add --enable-symm-mem, --disable-radix-cache, --tokenizer-path" + - "Reduce chunked-prefill-size/max-prefill-tokens from 32768 to 16384, mem-fraction-static from 0.82 to 0.8" + - "Set max-running-requests to $CONC, stream-interval to 50, fixed scheduler-recv-interval to 10" + - "Remove env vars (NCCL_NVLS_ENABLE, SGL_ENABLE_JIT_DEEPGEMM, SGLANG_ENABLE_FLASHINFER_GEMM) and conditional flashinfer-allreduce-fusion" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1027 + - config-keys: - kimik2.5-int4-mi300x-vllm description: From 82d3275515668697d2cc2f4108f3d2b2d0050d73 Mon Sep 17 00:00:00 2001 From: Ankur Singh Date: Mon, 13 Apr 2026 11:56:34 -0700 Subject: [PATCH 3/6] Update perf-changelog.yaml --- perf-changelog.yaml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/perf-changelog.yaml b/perf-changelog.yaml index dbae55d0f..3188be116 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1,13 +1,3 @@ -- config-keys: - - qwen3.5-fp8-b200-sglang - description: - - "Update search-space: tp8 conc=4, tp4 conc=4-256 (both seq-lens); remove explicit ep from search-space" - - "Update server launch args: add --enable-symm-mem, --disable-radix-cache, --tokenizer-path" - - "Reduce chunked-prefill-size/max-prefill-tokens from 32768 to 16384, mem-fraction-static from 0.82 to 0.8" - - "Set max-running-requests to $CONC, stream-interval to 50, fixed scheduler-recv-interval to 10" - - "Remove env vars (NCCL_NVLS_ENABLE, SGL_ENABLE_JIT_DEEPGEMM, SGLANG_ENABLE_FLASHINFER_GEMM) and conditional flashinfer-allreduce-fusion" - pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1027 - - config-keys: - kimik2.5-int4-mi300x-vllm description: @@ -1332,3 +1322,13 @@ description: - "Qwen3.5 fp4 support on SGL" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1006 + +- config-keys: + - qwen3.5-fp8-b200-sglang + description: + - "Update search-space: tp8 conc=4, tp4 conc=4-256 (both seq-lens); remove explicit ep from search-space" + - "Update server launch args: add --enable-symm-mem, --disable-radix-cache, --tokenizer-path" + - "Reduce chunked-prefill-size/max-prefill-tokens from 32768 to 16384, mem-fraction-static from 0.82 to 0.8" + - "Set max-running-requests to $CONC, stream-interval to 50, fixed scheduler-recv-interval to 10" + - "Remove env vars (NCCL_NVLS_ENABLE, SGL_ENABLE_JIT_DEEPGEMM, SGLANG_ENABLE_FLASHINFER_GEMM) and conditional flashinfer-allreduce-fusion" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1027 From 5d6a8e40aed4cc3d4b38611f593b484dbd8a5eca Mon Sep 17 00:00:00 2001 From: Ankur-singh Date: Mon, 13 Apr 2026 14:51:14 -0700 Subject: [PATCH 4/6] Update Qwen3.5 FP8 B200 SGLang image to v0.5.10.post1-cu130 --- .github/configs/nvidia-master.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/configs/nvidia-master.yaml b/.github/configs/nvidia-master.yaml index c64c4f3c0..c06e4eefa 100644 --- a/.github/configs/nvidia-master.yaml +++ b/.github/configs/nvidia-master.yaml @@ -1770,7 +1770,7 @@ qwen3.5-bf16-b200-sglang: - { tp: 8, ep: 1, conc-start: 4, conc-end: 64 } qwen3.5-fp8-b200-sglang: - image: lmsysorg/sglang:v0.5.9-cu130-amd64 + image: lmsysorg/sglang:v0.5.10.post1-cu130 model: Qwen/Qwen3.5-397B-A17B-FP8 model-prefix: qwen3.5 runner: b200 From bf116b2f5ec95de3fa8bf2fa45c0c49921135088 Mon Sep 17 00:00:00 2001 From: Ankur-singh Date: Tue, 14 Apr 2026 12:00:21 -0700 Subject: [PATCH 5/6] Update chunked_prefill_size and max_prefill_tokens to 81920 --- benchmarks/single_node/qwen3.5_fp8_b200.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/benchmarks/single_node/qwen3.5_fp8_b200.sh b/benchmarks/single_node/qwen3.5_fp8_b200.sh index b374237a6..08cbca4ad 100755 --- a/benchmarks/single_node/qwen3.5_fp8_b200.sh +++ b/benchmarks/single_node/qwen3.5_fp8_b200.sh @@ -45,8 +45,8 @@ PYTHONNOUSERSITE=1 python3 -m sglang.launch_server --model-path=$MODEL --host=0. --moe-runner-backend flashinfer_trtllm \ --cuda-graph-max-bs $CONC \ --max-running-requests $CONC \ ---max-prefill-tokens 16384 \ ---chunked-prefill-size 16384 \ +--max-prefill-tokens 81920 \ +--chunked-prefill-size 81920 \ --mem-fraction-static 0.8 \ --stream-interval 50 \ --scheduler-recv-interval 10 \ From 8c7991506c9948ca7938d50a390e15bbdfdb867f Mon Sep 17 00:00:00 2001 From: Ankur-singh Date: Wed, 15 Apr 2026 12:09:59 -0700 Subject: [PATCH 6/6] update config to include ep4 update config to include ep4 --- .github/configs/nvidia-master.yaml | 4 ++-- perf-changelog.yaml | 17 ++++++----------- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/.github/configs/nvidia-master.yaml b/.github/configs/nvidia-master.yaml index 0ac58aba4..f348dc146 100644 --- a/.github/configs/nvidia-master.yaml +++ b/.github/configs/nvidia-master.yaml @@ -1782,12 +1782,12 @@ qwen3.5-fp8-b200-sglang: osl: 1024 search-space: - { tp: 8, conc-start: 4, conc-end: 4 } - - { tp: 4, conc-start: 4, conc-end: 256 } + - { tp: 4, ep: 4, conc-start: 4, conc-end: 256 } - isl: 8192 osl: 1024 search-space: - { tp: 8, conc-start: 4, conc-end: 4 } - - { tp: 4, conc-start: 4, conc-end: 256 } + - { tp: 4, ep: 4, conc-start: 4, conc-end: 256 } qwen3.5-fp4-b200-sglang: image: lmsysorg/sglang:nightly-dev-20260402-d7256eb6 diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 3bec1b901..dfabb9edf 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1347,17 +1347,6 @@ - "Bump GLM-5 FP8 B200 SGLang concurrency from 128 to 256" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1012 - -- config-keys: - - qwen3.5-fp8-b200-sglang - description: - - "Update search-space: tp8 conc=4, tp4 conc=4-256 (both seq-lens); remove explicit ep from search-space" - - "Update server launch args: add --enable-symm-mem, --disable-radix-cache, --tokenizer-path" - - "Reduce chunked-prefill-size/max-prefill-tokens from 32768 to 16384, mem-fraction-static from 0.82 to 0.8" - - "Set max-running-requests to $CONC, stream-interval to 50, fixed scheduler-recv-interval to 10" - - "Remove env vars (NCCL_NVLS_ENABLE, SGL_ENABLE_JIT_DEEPGEMM, SGLANG_ENABLE_FLASHINFER_GEMM) and conditional flashinfer-allreduce-fusion" - pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1027 - - config-keys: - qwen3.5-fp4-mi355x-sglang description: @@ -1369,3 +1358,9 @@ description: - "Enable SGLANG_ENABLE_SPEC_V2=1 for Qwen3.5 FP8 H200 SGLang MTP" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1017 + +- config-keys: + - qwen3.5-fp8-b200-sglang + description: + - updated container image to `v0.5.10post1` + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1027