Source code for langchain_community.document_loaders.gitbook

from typing import Any, Iterator, List, Optional
from urllib.parse import urljoin, urlparse

from langchain_core.documents import Document

from langchain_community.document_loaders.web_base import WebBaseLoader


[docs]class GitbookLoader(WebBaseLoader): """加载`GitBook`数据。 1. 从单个页面加载,或 2. 加载导航栏中的所有(相对)路径。"""
[docs] def __init__( self, web_page: str, load_all_paths: bool = False, base_url: Optional[str] = None, content_selector: str = "main", continue_on_failure: bool = False, ): """使用网页和是否加载所有路径进行初始化。 参数: web_page:要加载的网页或从中发现相对路径的起始点。 load_all_paths:如果设置为True,则加载导航栏中的所有相对路径,而不仅仅是`web_page`。 base_url:如果`load_all_paths`为True,则将相对路径附加到此基本URL。默认为`web_page`。 content_selector:要加载的内容的CSS选择器。默认为"main"。 continue_on_failure:是否在加载URL时发生错误时继续加载站点地图,发出警告而不是引发异常。将此设置为True会使加载器更加健壮,但也可能导致数据丢失。默认值:False """ self.base_url = base_url or web_page if self.base_url.endswith("/"): self.base_url = self.base_url[:-1] if load_all_paths: # set web_path to the sitemap if we want to crawl all paths web_page = f"{self.base_url}/sitemap.xml" super().__init__(web_paths=(web_page,), continue_on_failure=continue_on_failure) self.load_all_paths = load_all_paths self.content_selector = content_selector
[docs] def lazy_load(self) -> Iterator[Document]: """从一个GitBook页面获取文本。""" if self.load_all_paths: soup_info = self.scrape() relative_paths = self._get_paths(soup_info) urls = [urljoin(self.base_url, path) for path in relative_paths] soup_infos = self.scrape_all(urls) for soup_info, url in zip(soup_infos, urls): doc = self._get_document(soup_info, url) if doc: yield doc else: soup_info = self.scrape() doc = self._get_document(soup_info, self.web_path) if doc: yield doc
def _get_document( self, soup: Any, custom_url: Optional[str] = None ) -> Optional[Document]: """从页面获取内容并返回文档。""" page_content_raw = soup.find(self.content_selector) if not page_content_raw: return None content = page_content_raw.get_text(separator="\n").strip() title_if_exists = page_content_raw.find("h1") title = title_if_exists.text if title_if_exists else "" metadata = {"source": custom_url or self.web_path, "title": title} return Document(page_content=content, metadata=metadata) def _get_paths(self, soup: Any) -> List[str]: """获取导航栏中的所有相对路径。""" return [urlparse(loc.text).path for loc in soup.find_all("loc")]