Source code for langchain_text_splitters.nltk

from __future__ import annotations

from typing import Any, List

from langchain_text_splitters.base import TextSplitter


[docs]class NLTKTextSplitter(TextSplitter): """使用NLTK包分割文本。"""
[docs] def __init__( self, separator: str = "\n\n", language: str = "english", **kwargs: Any ) -> None: """初始化NLTK分词器。""" super().__init__(**kwargs) try: from nltk.tokenize import sent_tokenize self._tokenizer = sent_tokenize except ImportError: raise ImportError( "NLTK is not installed, please install it with `pip install nltk`." ) self._separator = separator self._language = language
[docs] def split_text(self, text: str) -> List[str]: """分割传入的文本并返回各个部分。""" # First we naively split the large input into a bunch of smaller ones. splits = self._tokenizer(text, language=self._language) return self._merge_splits(splits, self._separator)