Source code for langchain_community.document_loaders.news

"""使用unstructured加载HTML文件的加载器。"""
import logging
from typing import Any, Iterator, List

from langchain_core.documents import Document

from langchain_community.document_loaders.base import BaseLoader

logger = logging.getLogger(__name__)


[docs]class NewsURLLoader(BaseLoader): """从URL使用`Unstructured`加载新闻文章。 Args: urls: 要加载的URL。每个URL都加载到自己的文档中。 text_mode: 如果为True,则从URL提取文本并将其用作页面内容。否则,提取原始HTML。 nlp: 如果为True,则对提取的内容执行自然语言处理(NLP),如提供摘要和提取关键词。 continue_on_failure: 如果为True,则即使对特定URL加载失败,也继续加载文档。 show_progress_bar: 如果为True,则使用tqdm显示加载进度条。需要安装tqdm,``pip install tqdm``。 **newspaper_kwargs: 要传递给 newspaper.Article() 的任何其他命名参数。 Example: .. code-block:: python from langchain_community.document_loaders import NewsURLLoader loader = NewsURLLoader( urls=["<url-1>", "<url-2>"], ) docs = loader.load() Newspaper 参考文档: https://newspaper.readthedocs.io/en/latest/"""
[docs] def __init__( self, urls: List[str], text_mode: bool = True, nlp: bool = False, continue_on_failure: bool = True, show_progress_bar: bool = False, **newspaper_kwargs: Any, ) -> None: """使用文件路径进行初始化。""" try: import newspaper self.__version = newspaper.__version__ except ImportError: raise ImportError( "newspaper package not found, please install it with " "`pip install newspaper3k`" ) self.urls = urls self.text_mode = text_mode self.nlp = nlp self.continue_on_failure = continue_on_failure self.newspaper_kwargs = newspaper_kwargs self.show_progress_bar = show_progress_bar
[docs] def load(self) -> List[Document]: iter = self.lazy_load() if self.show_progress_bar: try: from tqdm import tqdm except ImportError as e: raise ImportError( "Package tqdm must be installed if show_progress_bar=True. " "Please install with 'pip install tqdm' or set " "show_progress_bar=False." ) from e iter = tqdm(iter) return list(iter)
[docs] def lazy_load(self) -> Iterator[Document]: try: from newspaper import Article except ImportError as e: raise ImportError( "Cannot import newspaper, please install with `pip install newspaper3k`" ) from e for url in self.urls: try: article = Article(url, **self.newspaper_kwargs) article.download() article.parse() if self.nlp: article.nlp() except Exception as e: if self.continue_on_failure: logger.error(f"Error fetching or processing {url}, exception: {e}") continue else: raise e metadata = { "title": getattr(article, "title", ""), "link": getattr(article, "url", getattr(article, "canonical_link", "")), "authors": getattr(article, "authors", []), "language": getattr(article, "meta_lang", ""), "description": getattr(article, "meta_description", ""), "publish_date": getattr(article, "publish_date", ""), } if self.text_mode: content = article.text else: content = article.html if self.nlp: metadata["keywords"] = getattr(article, "keywords", []) metadata["summary"] = getattr(article, "summary", "") yield Document(page_content=content, metadata=metadata)