{"resultsPerPage":1,"startIndex":0,"totalResults":1,"format":"NVD_CVE","version":"2.0","timestamp":"2026-04-22T16:14:40.830","vulnerabilities":[{"cve":{"id":"CVE-2026-22807","sourceIdentifier":"security-advisories@github.com","published":"2026-01-21T22:15:49.077","lastModified":"2026-01-30T14:43:22.290","vulnStatus":"Analyzed","cveTags":[],"descriptions":[{"lang":"en","value":"vLLM is an inference and serving engine for large language models (LLMs). Starting in version 0.10.1 and prior to version 0.14.0, vLLM loads Hugging Face `auto_map` dynamic modules during model resolution without gating on `trust_remote_code`, allowing attacker-controlled Python code in a model repo/path to execute at server startup. An attacker who can influence the model repo/path (local directory or remote Hugging Face repo) can achieve arbitrary code execution on the vLLM host during model load. This happens before any request handling and does not require API access. Version 0.14.0 fixes the issue."},{"lang":"es","value":"vLLM es un motor de inferencia y servicio para modelos de lenguaje grandes (LLM). A partir de la versión 0.10.1 y antes de la versión 0.14.0, vLLM carga módulos dinámicos 'auto_map' de Hugging Face durante la resolución del modelo sin depender de 'trust_remote_code', permitiendo que código Python controlado por el atacante en un repositorio/ruta de modelo se ejecute al inicio del servidor. Un atacante que pueda influir en el repositorio/ruta del modelo (directorio local o repositorio remoto de Hugging Face) puede lograr la ejecución de código arbitrario en el host de vLLM durante la carga del modelo. Esto ocurre antes de cualquier manejo de solicitudes y no requiere acceso a la API. La versión 0.14.0 corrige el problema."}],"metrics":{"cvssMetricV31":[{"source":"security-advisories@github.com","type":"Secondary","cvssData":{"version":"3.1","vectorString":"CVSS:3.1/AV:N/AC:L/PR:N/UI:R/S:U/C:H/I:H/A:H","baseScore":8.8,"baseSeverity":"HIGH","attackVector":"NETWORK","attackComplexity":"LOW","privilegesRequired":"NONE","userInteraction":"REQUIRED","scope":"UNCHANGED","confidentialityImpact":"HIGH","integrityImpact":"HIGH","availabilityImpact":"HIGH"},"exploitabilityScore":2.8,"impactScore":5.9},{"source":"nvd@nist.gov","type":"Primary","cvssData":{"version":"3.1","vectorString":"CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H","baseScore":9.8,"baseSeverity":"CRITICAL","attackVector":"NETWORK","attackComplexity":"LOW","privilegesRequired":"NONE","userInteraction":"NONE","scope":"UNCHANGED","confidentialityImpact":"HIGH","integrityImpact":"HIGH","availabilityImpact":"HIGH"},"exploitabilityScore":3.9,"impactScore":5.9}]},"weaknesses":[{"source":"security-advisories@github.com","type":"Primary","description":[{"lang":"en","value":"CWE-94"}]}],"configurations":[{"nodes":[{"operator":"OR","negate":false,"cpeMatch":[{"vulnerable":true,"criteria":"cpe:2.3:a:vllm:vllm:*:*:*:*:*:*:*:*","versionStartIncluding":"0.10.1","versionEndExcluding":"0.14.0","matchCriteriaId":"F2E87BA6-DDF8-4FF6-A286-B44780082C69"}]}]}],"references":[{"url":"https://github.com/vllm-project/vllm/commit/78d13ea9de4b1ce5e4d8a5af9738fea71fb024e5","source":"security-advisories@github.com","tags":["Patch"]},{"url":"https://github.com/vllm-project/vllm/pull/32194","source":"security-advisories@github.com","tags":["Issue Tracking","Patch"]},{"url":"https://github.com/vllm-project/vllm/releases/tag/v0.14.0","source":"security-advisories@github.com","tags":["Product","Release Notes"]},{"url":"https://github.com/vllm-project/vllm/security/advisories/GHSA-2pc9-4j83-qjmr","source":"security-advisories@github.com","tags":["Patch","Vendor Advisory"]}]}}]}