{"resultsPerPage":1,"startIndex":0,"totalResults":1,"format":"NVD_CVE","version":"2.0","timestamp":"2026-04-19T21:22:25.605","vulnerabilities":[{"cve":{"id":"CVE-2026-34070","sourceIdentifier":"security-advisories@github.com","published":"2026-03-31T03:15:58.947","lastModified":"2026-04-02T17:04:43.713","vulnStatus":"Analyzed","cveTags":[],"descriptions":[{"lang":"en","value":"LangChain is a framework for building agents and LLM-powered applications. Prior to version 1.2.22, multiple functions in langchain_core.prompts.loading read files from paths embedded in deserialized config dicts without validating against directory traversal or absolute path injection. When an application passes user-influenced prompt configurations to load_prompt() or load_prompt_from_config(), an attacker can read arbitrary files on the host filesystem, constrained only by file-extension checks (.txt for templates, .json/.yaml for examples). This issue has been patched in version 1.2.22."},{"lang":"es","value":"LangChain es un framework para construir agentes y aplicaciones impulsadas por LLM. Antes de la versión 1.2.22, múltiples funciones en langchain_core.prompts.loading leían archivos de rutas incrustadas en diccionarios de configuración deserializados sin validar contra salto de directorio o inyección de ruta absoluta. Cuando una aplicación pasa configuraciones de prompt influenciadas por el usuario a load_prompt() o load_prompt_from_config(), un atacante puede leer archivos arbitrarios en el sistema de archivos del host, restringido solo por verificaciones de extensión de archivo (.txt para plantillas, .json/.yaml para ejemplos). Este problema ha sido parcheado en la versión 1.2.22."}],"metrics":{"cvssMetricV31":[{"source":"security-advisories@github.com","type":"Secondary","cvssData":{"version":"3.1","vectorString":"CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N","baseScore":7.5,"baseSeverity":"HIGH","attackVector":"NETWORK","attackComplexity":"LOW","privilegesRequired":"NONE","userInteraction":"NONE","scope":"UNCHANGED","confidentialityImpact":"HIGH","integrityImpact":"NONE","availabilityImpact":"NONE"},"exploitabilityScore":3.9,"impactScore":3.6}]},"weaknesses":[{"source":"security-advisories@github.com","type":"Secondary","description":[{"lang":"en","value":"CWE-22"}]}],"configurations":[{"nodes":[{"operator":"OR","negate":false,"cpeMatch":[{"vulnerable":true,"criteria":"cpe:2.3:a:langchain:langchain:*:*:*:*:*:*:*:*","versionEndExcluding":"1.2.22","matchCriteriaId":"B1D0654C-A99F-4835-9A30-865AEED16BC6"}]}]}],"references":[{"url":"https://github.com/langchain-ai/langchain/commit/27add913474e01e33bededf4096151130ba0d47c","source":"security-advisories@github.com","tags":["Patch"]},{"url":"https://github.com/langchain-ai/langchain/releases/tag/langchain-core==1.2.22","source":"security-advisories@github.com","tags":["Release Notes"]},{"url":"https://github.com/langchain-ai/langchain/security/advisories/GHSA-qh6h-p6c9-ff54","source":"security-advisories@github.com","tags":["Exploit","Vendor Advisory"]},{"url":"https://github.com/langchain-ai/langchain/security/advisories/GHSA-qh6h-p6c9-ff54","source":"134c704f-9b21-4f2e-91b3-4a467353bcc0","tags":["Exploit","Vendor Advisory"]}]}}]}