01
# Update the vllm-ascend image
export IMAGE=quay.io/ascend/vllm-ascend:v0.10.2rc1-openeuler
docker run --rm \
--name vllm-ascend \
--device dev/davinci0 \
--device dev/davinci1 \
--device dev/davinci2 \
--device dev/davinci3 \
--device dev/davinci_manager \
--device dev/devmm_svm \
--device dev/hisi_hdc \
-v usr/local/dcmi:/usr/local/dcmi \
-v usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
-v usr/local/Ascend/driver/lib64/:/usr/local/Ascend/driver/lib64/ \
-v usr/local/Ascend/driver/version.info:/usr/local/Ascend/driver/version.info \
-v etc/ascend_install.info:/etc/ascend_install.info \
-v root/.cache:/root/.cache \
-p 8000:8000 \
-it $IMAGE bash
# Install Bisheng compiler
wget https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/Ascend-BiSheng-toolkit_aarch64.run
chmod a+x Ascend-BiSheng-toolkit_aarch64.run
./Ascend-BiSheng-toolkit_aarch64.run --install
source /usr/local/Ascend/8.3.RC1/bisheng_toolkit/set_env.sh
# Install Triton Ascend
wget https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/triton_ascend-3.2.0.dev20250914-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl
pip install triton_ascend-3.2.0.dev20250914-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl
# 使用 VLLM_USE_MODELSCOPE 提高模型下载速度
export VLLM_USE_MODELSCOPE=true
# Qwen3-Next 目前发布了两个 80B 的模型
# Qwen3-Next-80B-A3B-Thinking
# Qwen3-Next-80B-A3B-Instruct
# 启动在线推理服务
vllm serve Qwen/Qwen3-Next-80B-A3B-Instruct --tensor-parallel-size 4 --enforce-eager
# 使用 curl 访问推理服务
curl http://localhost:8000/v1/completions \
-H "Content-Type: application/json" \
-d '{"model": "Qwen/Qwen3-Next-80B-A3B-Instruct ", "prompt": "The future of AI is", "max_tokens": 5, "temperature": 0}' | python3 -m json.tool
import gc
import torch
from vllm import LLM, SamplingParams
from vllm.distributed.parallel_state import (destroy_distributed_environment,
destroy_model_parallel)
def clean_up():
destroy_model_parallel()
destroy_distributed_environment()
gc.collect()
torch.npu.empty_cache()
if __name__ == '__main__':
prompts = [
"Who are you?",
]
sampling_params = SamplingParams(temperature=0.6, top_p=0.95, top_k=40, max_tokens=32)
llm = LLM(model="Qwen/Qwen3-Next-80B-A3B-Instruct",
tensor_parallel_size=4,
enforce_eager=True,
distributed_executor_backend="mp",
max_model_len=4096)
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
del llm
clean_up()
# 使用 VLLM_USE_MODELSCOPE 提高模型下载速度
export VLLM_USE_MODELSCOPE=true
python example.py
02
项目仓库:https://github.com/vllm-project/vllm-ascend
文档链接:https://vllm-ascend.readthedocs.io
问题反馈:https://github.com/vllm-project/vllm-ascend/issues
教程指引:https://vllm-ascend.readthedocs.io/en/latest/tutorials
Gitcode AI 社区链接:https://ai.gitcode.com/vLLM_Ascend/Qwen3-Next-80B-A3B-Instruct
魔乐社区链接:https://modelers.cn/models/vLLM_Ascend/Qwen3-Next-80B-A3B-Instruct
文章转载自OpenAtom openEuler,如果涉嫌侵权,请发送邮件至:contact@modb.pro进行举报,并提供相关证据,一经查实,墨天轮将立刻删除相关内容。




