{"resultsPerPage":1,"startIndex":0,"totalResults":1,"format":"NVD_CVE","version":"2.0","timestamp":"2026-05-04T20:46:33.414","vulnerabilities":[{"cve":{"id":"CVE-2025-62426","sourceIdentifier":"security-advisories@github.com","published":"2025-11-21T02:15:43.570","lastModified":"2025-12-04T17:42:10.913","vulnStatus":"Analyzed","cveTags":[],"descriptions":[{"lang":"en","value":"vLLM is an inference and serving engine for large language models (LLMs). From version 0.5.5 to before 0.11.1, the /v1/chat/completions and /tokenize endpoints allow a chat_template_kwargs request parameter that is used in the code before it is properly validated against the chat template. With the right chat_template_kwargs parameters, it is possible to block processing of the API server for long periods of time, delaying all other requests. This issue has been patched in version 0.11.1."}],"metrics":{"cvssMetricV31":[{"source":"security-advisories@github.com","type":"Secondary","cvssData":{"version":"3.1","vectorString":"CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H","baseScore":6.5,"baseSeverity":"MEDIUM","attackVector":"NETWORK","attackComplexity":"LOW","privilegesRequired":"LOW","userInteraction":"NONE","scope":"UNCHANGED","confidentialityImpact":"NONE","integrityImpact":"NONE","availabilityImpact":"HIGH"},"exploitabilityScore":2.8,"impactScore":3.6}]},"weaknesses":[{"source":"security-advisories@github.com","type":"Secondary","description":[{"lang":"en","value":"CWE-770"}]}],"configurations":[{"nodes":[{"operator":"OR","negate":false,"cpeMatch":[{"vulnerable":true,"criteria":"cpe:2.3:a:vllm:vllm:*:*:*:*:*:*:*:*","versionStartIncluding":"0.5.5","versionEndExcluding":"0.11.1","matchCriteriaId":"BA1047E1-ED1E-4685-B699-8CE5B2058D87"},{"vulnerable":true,"criteria":"cpe:2.3:a:vllm:vllm:0.11.1:rc0:*:*:*:*:*:*","matchCriteriaId":"FEE054E1-1F84-4ACC-894C-D7E3652EF1B1"},{"vulnerable":true,"criteria":"cpe:2.3:a:vllm:vllm:0.11.1:rc1:*:*:*:*:*:*","matchCriteriaId":"B05850DF-38FE-439F-9F7A-AA96DA9038CC"}]}]}],"references":[{"url":"https://github.com/vllm-project/vllm/blob/2a6dc67eb520ddb9c4138d8b35ed6fe6226997fb/vllm/entrypoints/chat_utils.py#L1602-L1610","source":"security-advisories@github.com","tags":["Product"]},{"url":"https://github.com/vllm-project/vllm/blob/2a6dc67eb520ddb9c4138d8b35ed6fe6226997fb/vllm/entrypoints/openai/serving_engine.py#L809-L814","source":"security-advisories@github.com","tags":["Product"]},{"url":"https://github.com/vllm-project/vllm/commit/3ada34f9cb4d1af763fdfa3b481862a93eb6bd2b","source":"security-advisories@github.com","tags":["Patch"]},{"url":"https://github.com/vllm-project/vllm/pull/27205","source":"security-advisories@github.com","tags":["Issue Tracking"]},{"url":"https://github.com/vllm-project/vllm/security/advisories/GHSA-69j4-grxj-j64p","source":"security-advisories@github.com","tags":["Vendor Advisory"]}]}}]}