{"resultsPerPage":1,"startIndex":0,"totalResults":1,"format":"NVD_CVE","version":"2.0","timestamp":"2026-04-18T15:37:09.228","vulnerabilities":[{"cve":{"id":"CVE-2026-27893","sourceIdentifier":"security-advisories@github.com","published":"2026-03-27T00:16:22.333","lastModified":"2026-03-30T18:56:21.730","vulnStatus":"Analyzed","cveTags":[],"descriptions":[{"lang":"en","value":"vLLM is an inference and serving engine for large language models (LLMs). Starting in version 0.10.1 and prior to version 0.18.0, two model implementation files hardcode `trust_remote_code=True` when loading sub-components, bypassing the user's explicit `--trust-remote-code=False` security opt-out. This enables remote code execution via malicious model repositories even when the user has explicitly disabled remote code trust. Version 0.18.0 patches the issue."},{"lang":"es","value":"vLLM es un motor de inferencia y servicio para modelos de lenguaje grandes (LLM). A partir de la versión 0.10.1 y antes de la versión 0.18.0, dos archivos de implementación de modelos codifican de forma rígida 'trust_remote_code=True' al cargar subcomponentes, eludiendo la exclusión voluntaria de seguridad explícita del usuario '--trust-remote-code=False'. Esto permite la ejecución remota de código a través de repositorios de modelos maliciosos incluso cuando el usuario ha deshabilitado explícitamente la confianza en el código remoto. La versión 0.18.0 corrige el problema."}],"metrics":{"cvssMetricV31":[{"source":"security-advisories@github.com","type":"Secondary","cvssData":{"version":"3.1","vectorString":"CVSS:3.1/AV:N/AC:L/PR:N/UI:R/S:U/C:H/I:H/A:H","baseScore":8.8,"baseSeverity":"HIGH","attackVector":"NETWORK","attackComplexity":"LOW","privilegesRequired":"NONE","userInteraction":"REQUIRED","scope":"UNCHANGED","confidentialityImpact":"HIGH","integrityImpact":"HIGH","availabilityImpact":"HIGH"},"exploitabilityScore":2.8,"impactScore":5.9}]},"weaknesses":[{"source":"security-advisories@github.com","type":"Primary","description":[{"lang":"en","value":"CWE-693"}]}],"configurations":[{"nodes":[{"operator":"OR","negate":false,"cpeMatch":[{"vulnerable":true,"criteria":"cpe:2.3:a:vllm:vllm:*:*:*:*:*:*:*:*","versionStartIncluding":"0.10.1","versionEndExcluding":"0.18.0","matchCriteriaId":"2130385B-68E6-4854-AC42-0CBA1F30B487"}]}]}],"references":[{"url":"https://github.com/vllm-project/vllm/commit/00bd08edeee5dd4d4c13277c0114a464011acf72","source":"security-advisories@github.com","tags":["Patch"]},{"url":"https://github.com/vllm-project/vllm/pull/36192","source":"security-advisories@github.com","tags":["Issue Tracking"]},{"url":"https://github.com/vllm-project/vllm/security/advisories/GHSA-7972-pg2x-xr59","source":"security-advisories@github.com","tags":["Vendor Advisory"]}]}}]}