from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any, Iterator, List, Optional, Sequence, Tuple, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
if TYPE_CHECKING:
from bs4 import NavigableString
from bs4.element import Comment, Tag
[docs]class ReadTheDocsLoader(BaseLoader):
"""加载`ReadTheDocs`文档目录。"""
[docs] def __init__(
self,
path: Union[str, Path],
encoding: Optional[str] = None,
errors: Optional[str] = None,
custom_html_tag: Optional[Tuple[str, dict]] = None,
patterns: Sequence[str] = ("*.htm", "*.html"),
exclude_links_ratio: float = 1.0,
**kwargs: Optional[Any],
):
"""初始化ReadTheDocsLoader
加载器循环遍历`path`下的所有文件,并通过检索主要的html标签来提取文件的实际内容。默认的主要html标签包括`<main id="main-content>`、`<div role="main>`和`<article role="main>`。您还可以通过传递custom_html_tag来定义自己的html标签,例如`("div", "class=main")`。加载器按照自定义html标签(如果存在)和默认html标签的顺序迭代html标签。如果任何标签不为空,则循环将中断并从该标签中检索内容。
参数:
path:pulled readthedocs文件夹的位置。
encoding:打开文档时要使用的编码。
errors:指定如何处理编码和解码错误—不能在二进制模式下使用。
custom_html_tag:可选的自定义html标签,用于从文件中检索内容。
patterns:要加载的文件模式,传递给`glob.rglob`。
exclude_links_ratio:链接:内容的比率,用于排除页面。这是为了减少索引页面进入检索结果的频率。建议值:0.5
kwargs:传递给`bs4.BeautifulSoup`的命名参数。
"""
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
"Could not import python packages. "
"Please install it with `pip install beautifulsoup4`. "
)
try:
_ = BeautifulSoup(
"<html><body>Parser builder library test.</body></html>",
"html.parser",
**kwargs,
)
except Exception as e:
raise ValueError("Parsing kwargs do not appear valid") from e
self.file_path = Path(path)
self.encoding = encoding
self.errors = errors
self.custom_html_tag = custom_html_tag
self.patterns = patterns
self.bs_kwargs = kwargs
self.exclude_links_ratio = exclude_links_ratio
[docs] def lazy_load(self) -> Iterator[Document]:
"""一个用于文档的惰性加载器。"""
for file_pattern in self.patterns:
for p in self.file_path.rglob(file_pattern):
if p.is_dir():
continue
with open(p, encoding=self.encoding, errors=self.errors) as f:
text = self._clean_data(f.read())
yield Document(page_content=text, metadata={"source": str(p)})
def _clean_data(self, data: str) -> str:
from bs4 import BeautifulSoup
soup = BeautifulSoup(data, "html.parser", **self.bs_kwargs)
# default tags
html_tags = [
("div", {"role": "main"}),
("main", {"id": "main-content"}),
]
if self.custom_html_tag is not None:
html_tags.append(self.custom_html_tag)
element = None
# reversed order. check the custom one first
for tag, attrs in html_tags[::-1]:
element = soup.find(tag, attrs)
# if found, break
if element is not None:
break
if element is not None and _get_link_ratio(element) <= self.exclude_links_ratio:
text = _get_clean_text(element)
else:
text = ""
# trim empty lines
return "\n".join([t for t in text.split("\n") if t])
def _get_clean_text(element: Tag) -> str:
"""返回清理后的文本,保留换行符并删除不相关的元素。"""
elements_to_skip = [
"script",
"noscript",
"canvas",
"meta",
"svg",
"map",
"area",
"audio",
"source",
"track",
"video",
"embed",
"object",
"param",
"picture",
"iframe",
"frame",
"frameset",
"noframes",
"applet",
"form",
"button",
"select",
"base",
"style",
"img",
]
newline_elements = [
"p",
"div",
"ul",
"ol",
"li",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"pre",
"table",
"tr",
]
text = _process_element(element, elements_to_skip, newline_elements)
return text.strip()
def _get_link_ratio(section: Tag) -> float:
links = section.find_all("a")
total_text = "".join(str(s) for s in section.stripped_strings)
if len(total_text) == 0:
return 0
link_text = "".join(
str(string.string.strip())
for link in links
for string in link.strings
if string
)
return len(link_text) / len(total_text)
def _process_element(
element: Union[Tag, NavigableString, Comment],
elements_to_skip: List[str],
newline_elements: List[str],
) -> str:
"""递归遍历HTML树以保留换行符并跳过不需要的(代码/二进制)元素
"""
from bs4 import NavigableString
from bs4.element import Comment, Tag
tag_name = getattr(element, "name", None)
if isinstance(element, Comment) or tag_name in elements_to_skip:
return ""
elif isinstance(element, NavigableString):
return element
elif tag_name == "br":
return "\n"
elif tag_name in newline_elements:
return (
"".join(
_process_element(child, elements_to_skip, newline_elements)
for child in element.children
if isinstance(child, (Tag, NavigableString, Comment))
)
+ "\n"
)
else:
return "".join(
_process_element(child, elements_to_skip, newline_elements)
for child in element.children
if isinstance(child, (Tag, NavigableString, Comment))
)