跳至内容

vllm.sampling_params

文本生成的采样参数。

_MAX_TEMP module-attribute

_MAX_TEMP = 0.01

_SAMPLING_EPS module-attribute

_SAMPLING_EPS = 1e-05

日志记录器 module-attribute

logger = init_logger(__name__)

BeamSearchParams

基类: Struct

用于文本生成的束搜索参数。

Source code in vllm/sampling_params.py
class BeamSearchParams(
        msgspec.Struct,
        omit_defaults=True,  # type: ignore[call-arg]
        # required for @cached_property.
        dict=True):  # type: ignore[call-arg]
    """Beam search parameters for text generation."""
    beam_width: int
    max_tokens: int
    ignore_eos: bool = False
    temperature: float = 0.0
    length_penalty: float = 1.0
    include_stop_str_in_output: bool = False

beam_width instance-attribute

beam_width: int

ignore_eos class-attribute instance-attribute

ignore_eos: bool = False

include_stop_str_in_output class-attribute instance-attribute

include_stop_str_in_output: bool = False

length_penalty class-attribute instance-attribute

length_penalty: float = 1.0

max_tokens instance-attribute

max_tokens: int

temperature class-attribute instance-attribute

temperature: float = 0.0

GuidedDecodingParams dataclass

这些字段中的一个将被用来构建一个逻辑处理器。

Source code in vllm/sampling_params.py
@dataclass
class GuidedDecodingParams:
    """One of these fields will be used to build a logit processor."""
    json: Optional[Union[str, dict]] = None
    regex: Optional[str] = None
    choice: Optional[list[str]] = None
    grammar: Optional[str] = None
    json_object: Optional[bool] = None
    """These are other options that can be set"""
    backend: Optional[str] = None
    backend_was_auto: bool = False
    disable_fallback: bool = False
    disable_any_whitespace: bool = False
    disable_additional_properties: bool = False
    whitespace_pattern: Optional[str] = None
    structural_tag: Optional[str] = None

    @staticmethod
    def from_optional(
        json: Optional[Union[dict, BaseModel, str]] = None,
        regex: Optional[str] = None,
        choice: Optional[list[str]] = None,
        grammar: Optional[str] = None,
        json_object: Optional[bool] = None,
        backend: Optional[str] = None,
        whitespace_pattern: Optional[str] = None,
        structural_tag: Optional[str] = None,
    ) -> Optional["GuidedDecodingParams"]:
        if all(arg is None for arg in (json, regex, choice, grammar,
                                       json_object, structural_tag)):
            return None
        # Extract json schemas from pydantic models
        if isinstance(json, (BaseModel, type(BaseModel))):
            json = json.model_json_schema()
        return GuidedDecodingParams(
            json=json,
            regex=regex,
            choice=choice,
            grammar=grammar,
            json_object=json_object,
            backend=backend,
            whitespace_pattern=whitespace_pattern,
            structural_tag=structural_tag,
        )

    def __post_init__(self):
        """Validate that some fields are mutually exclusive."""
        guide_count = sum([
            self.json is not None, self.regex is not None, self.choice
            is not None, self.grammar is not None, self.json_object is not None
        ])
        if guide_count > 1:
            raise ValueError(
                "You can only use one kind of guided decoding but multiple are "
                f"specified: {self.__dict__}")

后端 class-attribute instance-attribute

backend: Optional[str] = None

backend_was_auto class-attribute instance-attribute

backend_was_auto: bool = False

选择 class-attribute instance-attribute

choice: Optional[list[str]] = None

disable_additional_properties class-attribute instance-attribute

disable_additional_properties: bool = False

disable_any_whitespace class-attribute instance-attribute

disable_any_whitespace: bool = False

disable_fallback class-attribute instance-attribute

disable_fallback: bool = False

语法 class-attribute instance-attribute

grammar: Optional[str] = None

json class-attribute instance-attribute

json: Optional[Union[str, dict]] = None

json_object class-attribute instance-attribute

json_object: Optional[bool] = None

以下是可设置的其他选项

正则表达式 class-attribute instance-attribute

regex: Optional[str] = None

结构标签 class-attribute instance-attribute

structural_tag: Optional[str] = None

whitespace_pattern class-attribute instance-attribute

whitespace_pattern: Optional[str] = None

__init__

__init__(
    json: Optional[Union[str, dict]] = None,
    regex: Optional[str] = None,
    choice: Optional[list[str]] = None,
    grammar: Optional[str] = None,
    json_object: Optional[bool] = None,
    backend: Optional[str] = None,
    backend_was_auto: bool = False,
    disable_fallback: bool = False,
    disable_any_whitespace: bool = False,
    disable_additional_properties: bool = False,
    whitespace_pattern: Optional[str] = None,
    structural_tag: Optional[str] = None,
) -> None

__post_init__

__post_init__()

验证某些字段是互斥的。

Source code in vllm/sampling_params.py
def __post_init__(self):
    """Validate that some fields are mutually exclusive."""
    guide_count = sum([
        self.json is not None, self.regex is not None, self.choice
        is not None, self.grammar is not None, self.json_object is not None
    ])
    if guide_count > 1:
        raise ValueError(
            "You can only use one kind of guided decoding but multiple are "
            f"specified: {self.__dict__}")

from_optional staticmethod

from_optional(
    json: Optional[Union[dict, BaseModel, str]] = None,
    regex: Optional[str] = None,
    choice: Optional[list[str]] = None,
    grammar: Optional[str] = None,
    json_object: Optional[bool] = None,
    backend: Optional[str] = None,
    whitespace_pattern: Optional[str] = None,
    structural_tag: Optional[str] = None,
) -> Optional[GuidedDecodingParams]
Source code in vllm/sampling_params.py
@staticmethod
def from_optional(
    json: Optional[Union[dict, BaseModel, str]] = None,
    regex: Optional[str] = None,
    choice: Optional[list[str]] = None,
    grammar: Optional[str] = None,
    json_object: Optional[bool] = None,
    backend: Optional[str] = None,
    whitespace_pattern: Optional[str] = None,
    structural_tag: Optional[str] = None,
) -> Optional["GuidedDecodingParams"]:
    if all(arg is None for arg in (json, regex, choice, grammar,
                                   json_object, structural_tag)):
        return None
    # Extract json schemas from pydantic models
    if isinstance(json, (BaseModel, type(BaseModel))):
        json = json.model_json_schema()
    return GuidedDecodingParams(
        json=json,
        regex=regex,
        choice=choice,
        grammar=grammar,
        json_object=json_object,
        backend=backend,
        whitespace_pattern=whitespace_pattern,
        structural_tag=structural_tag,
    )

RequestOutputKind

基础类: Enum

Source code in vllm/sampling_params.py
class RequestOutputKind(Enum):
    # Return entire output so far in every RequestOutput
    CUMULATIVE = 0
    # Return only deltas in each RequestOutput
    DELTA = 1
    # Do not return intermediate RequestOutput
    FINAL_ONLY = 2

累计 class-attribute instance-attribute

CUMULATIVE = 0

DELTA class-attribute instance-attribute

DELTA = 1

FINAL_ONLY class-attribute instance-attribute

FINAL_ONLY = 2

SamplingParams

基类: Struct

文本生成的采样参数。

总体而言,我们遵循OpenAI文本补全API(https://platform.openai.com/docs/api-reference/completions/create)中的采样参数。此外,我们还支持OpenAI不支持的束搜索(beam search)功能。

参数:

名称 类型 描述 默认值
n

为给定提示返回的输出序列数量。

required
best_of

从提示词生成的输出序列数量。从这些best_of序列中,返回前n个序列。best_of必须大于或等于n。默认情况下,best_of设置为n。警告:此功能仅在V0版本中支持。

required
presence_penalty

根据新词元是否已在生成文本中出现来施加惩罚的浮点数值。数值大于0会鼓励模型使用新词元,而数值小于0则会鼓励模型重复词元。

required
frequency_penalty

根据生成文本中已出现频率对新词元施加惩罚的浮点数值。数值大于0鼓励模型使用新词元,数值小于0则鼓励模型重复词元。

required
repetition_penalty

该浮点值用于根据新词元是否出现在提示词和已生成文本中进行惩罚。值大于1鼓励模型使用新词元,而值小于1则鼓励模型重复词元。

required
temperature

控制采样随机性的浮点数值。数值越低,模型输出越确定;数值越高,模型输出越随机。零值表示贪婪采样。

required
top_p

控制要考虑的顶部词元的累积概率的浮点数。必须在 (0, 1] 范围内。设置为 1 表示考虑所有词元。

required
top_k

控制要考虑的顶部令牌数量的整数。设置为0(或-1)表示考虑所有令牌。

required
min_p

表示一个token被考虑的最低概率,相对于最可能token的概率。必须介于[0, 1]之间。设置为0可禁用此功能。

required
seed

用于生成的随机种子。

required
stop

生成过程中遇到这些字符串时会停止生成。返回的输出将不包含这些停止字符串。

required
stop_token_ids

生成这些标记时会停止生成的标记列表。返回的输出将包含停止标记,除非停止标记是特殊标记。

required
bad_words

禁止生成的词汇列表。更准确地说,只有当下一个生成的标记可以完成对应标记序列时,该序列的最后一个标记才被禁止。

required
include_stop_str_in_output

是否在输出文本中包含停止字符串。默认为False。

required
ignore_eos

是否忽略EOS标记并在生成EOS标记后继续生成标记。

required
max_tokens

每个输出序列生成的最大token数量。

required
min_tokens

在生成EOS或stop_token_ids之前,每个输出序列需生成的最小token数量

required
logprobs

每个输出令牌返回的对数概率数量。当设置为None时,不返回概率。如果设置为非None值,结果将包含指定数量最可能令牌的对数概率,以及所选令牌。请注意,该实现遵循OpenAI API:API将始终返回采样令牌的对数概率,因此响应中最多可能有logprobs+1个元素。

required
prompt_logprobs

每个提示词(token)返回的对数概率数量。

required
detokenize

是否对输出进行解码。默认为True。

required
skip_special_tokens

是否在输出中跳过特殊标记。

required
spaces_between_special_tokens

是否在输出的特殊标记之间添加空格。默认为True。

required
logits_processors

基于先前生成的令牌修改logits的函数列表,并可选择将提示令牌作为第一个参数。

required
truncate_prompt_tokens

如果设置为-1,将使用模型支持的截断大小。如果设置为整数k,则仅使用提示中的最后k个标记(即左截断)。默认为None(即不截断)。

required
guided_decoding

如果提供这些参数,引擎将根据它们构建一个引导式解码对数概率处理器。默认为None。

required
logit_bias

如果提供此参数,引擎将构建一个应用这些对数偏置的logits处理器。默认为None。

required
allowed_token_ids

如果提供此参数,引擎将构建一个仅保留给定令牌ID分数的logits处理器。默认为None。

required
extra_args

任意附加参数,可供自定义采样实现、插件等使用。内置采样实现不会使用这些参数。

required
Source code in vllm/sampling_params.py
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
class SamplingParams(
        msgspec.Struct,
        omit_defaults=True,  # type: ignore[call-arg]
        # required for @cached_property.
        dict=True):  # type: ignore[call-arg]
    """Sampling parameters for text generation.

    Overall, we follow the sampling parameters from the OpenAI text completion
    API (https://platform.openai.com/docs/api-reference/completions/create).
    In addition, we support beam search, which is not supported by OpenAI.

    Args:
        n: Number of output sequences to return for the given prompt.
        best_of: Number of output sequences that are generated from the prompt.
            From these `best_of` sequences, the top `n` sequences are returned.
            `best_of` must be greater than or equal to `n`. By default,
            `best_of` is set to `n`. Warning, this is only supported in V0.
        presence_penalty: Float that penalizes new tokens based on whether they
            appear in the generated text so far. Values > 0 encourage the model
            to use new tokens, while values < 0 encourage the model to repeat
            tokens.
        frequency_penalty: Float that penalizes new tokens based on their
            frequency in the generated text so far. Values > 0 encourage the
            model to use new tokens, while values < 0 encourage the model to
            repeat tokens.
        repetition_penalty: Float that penalizes new tokens based on whether
            they appear in the prompt and the generated text so far. Values > 1
            encourage the model to use new tokens, while values < 1 encourage
            the model to repeat tokens.
        temperature: Float that controls the randomness of the sampling. Lower
            values make the model more deterministic, while higher values make
            the model more random. Zero means greedy sampling.
        top_p: Float that controls the cumulative probability of the top tokens
            to consider. Must be in (0, 1]. Set to 1 to consider all tokens.
        top_k: Integer that controls the number of top tokens to consider. Set
            to 0 (or -1) to consider all tokens.
        min_p: Float that represents the minimum probability for a token to be
            considered, relative to the probability of the most likely token.
            Must be in [0, 1]. Set to 0 to disable this.
        seed: Random seed to use for the generation.
        stop: list of strings that stop the generation when they are generated.
            The returned output will not contain the stop strings.
        stop_token_ids: list of tokens that stop the generation when they are
            generated. The returned output will contain the stop tokens unless
            the stop tokens are special tokens.
        bad_words: list of words that are not allowed to be generated.
            More precisely, only the last token of a corresponding
            token sequence is not allowed when the next generated token
            can complete the sequence.
        include_stop_str_in_output: Whether to include the stop strings in
            output text. Defaults to False.
        ignore_eos: Whether to ignore the EOS token and continue generating
            tokens after the EOS token is generated.
        max_tokens: Maximum number of tokens to generate per output sequence.
        min_tokens: Minimum number of tokens to generate per output sequence
            before EOS or stop_token_ids can be generated
        logprobs: Number of log probabilities to return per output token.
            When set to None, no probability is returned. If set to a non-None
            value, the result includes the log probabilities of the specified
            number of most likely tokens, as well as the chosen tokens.
            Note that the implementation follows the OpenAI API: The API will
            always return the log probability of the sampled token, so there
            may be up to `logprobs+1` elements in the response.
        prompt_logprobs: Number of log probabilities to return per prompt token.
        detokenize: Whether to detokenize the output. Defaults to True.
        skip_special_tokens: Whether to skip special tokens in the output.
        spaces_between_special_tokens: Whether to add spaces between special
            tokens in the output.  Defaults to True.
        logits_processors: list of functions that modify logits based on
            previously generated tokens, and optionally prompt tokens as
            a first argument.
        truncate_prompt_tokens: If set to -1, will use the truncation size
            supported by the model. If set to an integer k, will use only
            the last k tokens from the prompt (i.e., left truncation).
            Defaults to None (i.e., no truncation).
        guided_decoding: If provided, the engine will construct a guided
            decoding logits processor from these parameters. Defaults to None.
        logit_bias: If provided, the engine will construct a logits processor
            that applies these logit biases. Defaults to None.
        allowed_token_ids: If provided, the engine will construct a logits
            processor which only retains scores for the given token ids.
            Defaults to None.
        extra_args: Arbitrary additional args, that can be used by custom
            sampling implementations, plugins, etc. Not used by any in-tree
            sampling implementations.
    """

    n: int = 1
    best_of: Optional[int] = None
    _real_n: Optional[int] = None
    presence_penalty: float = 0.0
    frequency_penalty: float = 0.0
    repetition_penalty: float = 1.0
    temperature: float = 1.0
    top_p: float = 1.0
    top_k: int = 0
    min_p: float = 0.0
    seed: Optional[int] = None
    stop: Optional[Union[str, list[str]]] = None
    stop_token_ids: Optional[list[int]] = None
    ignore_eos: bool = False
    max_tokens: Optional[int] = 16
    min_tokens: int = 0
    logprobs: Optional[int] = None
    prompt_logprobs: Optional[int] = None
    # NOTE: This parameter is only exposed at the engine level for now.
    # It is not exposed in the OpenAI API server, as the OpenAI API does
    # not support returning only a list of token IDs.
    detokenize: bool = True
    skip_special_tokens: bool = True
    spaces_between_special_tokens: bool = True
    # Optional[list[LogitsProcessor]] type. We use Any here because
    # Optional[list[LogitsProcessor]] type is not supported by msgspec.
    logits_processors: Optional[Any] = None
    include_stop_str_in_output: bool = False
    truncate_prompt_tokens: Optional[Annotated[int, msgspec.Meta(ge=1)]] = None
    output_kind: RequestOutputKind = RequestOutputKind.CUMULATIVE

    # The below fields are not supposed to be used as an input.
    # They are set in post_init.
    output_text_buffer_length: int = 0
    _all_stop_token_ids: set[int] = msgspec.field(default_factory=set)

    # Fields used to construct logits processors
    guided_decoding: Optional[GuidedDecodingParams] = None
    logit_bias: Optional[dict[int, float]] = None
    allowed_token_ids: Optional[list[int]] = None
    extra_args: Optional[dict[str, Any]] = None

    # Fields used for bad words
    bad_words: Optional[list[str]] = None
    _bad_words_token_ids: Optional[list[list[int]]] = None

    @staticmethod
    def from_optional(
        n: Optional[int] = 1,
        best_of: Optional[int] = None,
        presence_penalty: Optional[float] = 0.0,
        frequency_penalty: Optional[float] = 0.0,
        repetition_penalty: Optional[float] = 1.0,
        temperature: Optional[float] = 1.0,
        top_p: Optional[float] = 1.0,
        top_k: int = 0,
        min_p: float = 0.0,
        seed: Optional[int] = None,
        stop: Optional[Union[str, list[str]]] = None,
        stop_token_ids: Optional[list[int]] = None,
        bad_words: Optional[list[str]] = None,
        include_stop_str_in_output: bool = False,
        ignore_eos: bool = False,
        max_tokens: Optional[int] = 16,
        min_tokens: int = 0,
        logprobs: Optional[int] = None,
        prompt_logprobs: Optional[int] = None,
        detokenize: bool = True,
        skip_special_tokens: bool = True,
        spaces_between_special_tokens: bool = True,
        logits_processors: Optional[list[LogitsProcessor]] = None,
        truncate_prompt_tokens: Optional[Annotated[int,
                                                   msgspec.Meta(ge=1)]] = None,
        output_kind: RequestOutputKind = RequestOutputKind.CUMULATIVE,
        guided_decoding: Optional[GuidedDecodingParams] = None,
        logit_bias: Optional[Union[dict[int, float], dict[str, float]]] = None,
        allowed_token_ids: Optional[list[int]] = None,
        extra_args: Optional[dict[str, Any]] = None,
    ) -> "SamplingParams":
        if logit_bias is not None:
            # Convert token_id to integer
            # Clamp the bias between -100 and 100 per OpenAI API spec
            logit_bias = {
                int(token): min(100.0, max(-100.0, bias))
                for token, bias in logit_bias.items()
            }

        return SamplingParams(
            n=1 if n is None else n,
            best_of=best_of,
            presence_penalty=0.0
            if presence_penalty is None else presence_penalty,
            frequency_penalty=0.0
            if frequency_penalty is None else frequency_penalty,
            repetition_penalty=1.0
            if repetition_penalty is None else repetition_penalty,
            temperature=1.0 if temperature is None else temperature,
            top_p=1.0 if top_p is None else top_p,
            top_k=top_k,
            min_p=min_p,
            seed=seed,
            stop=stop,
            stop_token_ids=stop_token_ids,
            bad_words=bad_words,
            include_stop_str_in_output=include_stop_str_in_output,
            ignore_eos=ignore_eos,
            max_tokens=max_tokens,
            min_tokens=min_tokens,
            logprobs=logprobs,
            prompt_logprobs=prompt_logprobs,
            detokenize=detokenize,
            skip_special_tokens=skip_special_tokens,
            spaces_between_special_tokens=spaces_between_special_tokens,
            logits_processors=logits_processors,
            truncate_prompt_tokens=truncate_prompt_tokens,
            output_kind=output_kind,
            guided_decoding=guided_decoding,
            logit_bias=logit_bias,
            allowed_token_ids=allowed_token_ids,
            extra_args=extra_args,
        )

    def __post_init__(self) -> None:
        # how we deal with `best_of``:
        # if `best_of`` is not set, we default to `n`;
        # if `best_of`` is set, we set `n`` to `best_of`,
        # and set `_real_n`` to the original `n`.
        # when we return the result, we will check
        # if we need to return `n` or `_real_n` results
        if self.best_of:
            if self.best_of < self.n:
                raise ValueError(
                    f"best_of must be greater than or equal to n, "
                    f"got n={self.n} and best_of={self.best_of}.")
            if not self._real_n:
                self._real_n = self.n
                self.n = self.best_of

        if 0 < self.temperature < _MAX_TEMP:
            logger.warning(
                "temperature %s is less than %s, which may cause numerical "
                "errors nan or inf in tensors. We have maxed it out to %s.",
                self.temperature, _MAX_TEMP, _MAX_TEMP)
            self.temperature = max(self.temperature, _MAX_TEMP)

        if self.seed == -1:
            self.seed = None

        if self.stop is None:
            self.stop = []
        elif isinstance(self.stop, str):
            self.stop = [self.stop]

        if self.stop_token_ids is None:
            self.stop_token_ids = []

        if self.bad_words is None:
            self.bad_words = []

        if self.logprobs is True:
            self.logprobs = 1

        if self.prompt_logprobs is True:
            self.prompt_logprobs = 1

        # Number of characters to hold back for stop string evaluation
        # until sequence is finished.
        if self.stop and not self.include_stop_str_in_output:
            self.output_text_buffer_length = max(len(s) for s in self.stop) - 1

        self._verify_args()

        if self.temperature < _SAMPLING_EPS:
            # Zero temperature means greedy sampling.
            self.top_p = 1.0
            self.top_k = 0
            self.min_p = 0.0
            self._verify_greedy_sampling()

        # eos_token_id is added to this by the engine
        self._all_stop_token_ids.update(self.stop_token_ids)

    def _verify_args(self) -> None:
        if not isinstance(self.n, int):
            raise ValueError(f"n must be an int, but is of "
                             f"type {type(self.n)}")
        if self.n < 1:
            raise ValueError(f"n must be at least 1, got {self.n}.")
        if self.best_of is not None:
            if not isinstance(self.best_of, int):
                raise ValueError(
                    f"best_of must be an integer, got {type(self.best_of)}")
            if self.best_of < 1:
                raise ValueError(
                    f"best_of must be at least 1, got {self.best_of}")
            if self.best_of < self.n:
                raise ValueError(
                    f"best_of must be greater than or equal to n, "
                    f"got n={self.n} and best_of={self.best_of}.")
        if not -2.0 <= self.presence_penalty <= 2.0:
            raise ValueError("presence_penalty must be in [-2, 2], got "
                             f"{self.presence_penalty}.")
        if not -2.0 <= self.frequency_penalty <= 2.0:
            raise ValueError("frequency_penalty must be in [-2, 2], got "
                             f"{self.frequency_penalty}.")
        if self.repetition_penalty <= 0.0:
            raise ValueError(
                "repetition_penalty must be greater than zero, got "
                f"{self.repetition_penalty}.")
        if self.temperature < 0.0:
            raise ValueError(
                f"temperature must be non-negative, got {self.temperature}.")
        if not 0.0 < self.top_p <= 1.0:
            raise ValueError(f"top_p must be in (0, 1], got {self.top_p}.")
        # quietly accept -1 as disabled, but prefer 0
        if self.top_k < -1:
            raise ValueError(f"top_k must be 0 (disable), or at least 1, "
                             f"got {self.top_k}.")
        if not isinstance(self.top_k, int):
            raise TypeError(
                f"top_k must be an integer, got {type(self.top_k).__name__}")
        if not 0.0 <= self.min_p <= 1.0:
            raise ValueError("min_p must be in [0, 1], got "
                             f"{self.min_p}.")
        if self.max_tokens is not None and self.max_tokens < 1:
            raise ValueError(
                f"max_tokens must be at least 1, got {self.max_tokens}.")
        if self.min_tokens < 0:
            raise ValueError(f"min_tokens must be greater than or equal to 0, "
                             f"got {self.min_tokens}.")
        if self.max_tokens is not None and self.min_tokens > self.max_tokens:
            raise ValueError(
                f"min_tokens must be less than or equal to "
                f"max_tokens={self.max_tokens}, got {self.min_tokens}.")
        if self.logprobs is not None and self.logprobs < 0:
            raise ValueError(
                f"logprobs must be non-negative, got {self.logprobs}.")
        if self.prompt_logprobs is not None and self.prompt_logprobs < 0:
            raise ValueError(f"prompt_logprobs must be non-negative, got "
                             f"{self.prompt_logprobs}.")
        if (self.truncate_prompt_tokens is not None
                and self.truncate_prompt_tokens < 1):
            raise ValueError(f"truncate_prompt_tokens must be >= 1, "
                             f"got {self.truncate_prompt_tokens}")
        assert isinstance(self.stop_token_ids, list)
        if not all(isinstance(st_id, int) for st_id in self.stop_token_ids):
            raise ValueError(f"stop_token_ids must contain only integers, "
                             f"got {self.stop_token_ids}.")
        assert isinstance(self.stop, list)
        if any(not stop_str for stop_str in self.stop):
            raise ValueError("stop cannot contain an empty string.")
        if self.stop and not self.detokenize:
            raise ValueError(
                "stop strings are only supported when detokenize is True. "
                "Set detokenize=True to use stop.")
        if self.best_of != self._real_n and self.output_kind == (
                RequestOutputKind.DELTA):
            raise ValueError("best_of must equal n to use output_kind=DELTA")

    def _verify_greedy_sampling(self) -> None:
        if self.n > 1:
            raise ValueError("n must be 1 when using greedy sampling, "
                             f"got {self.n}.")

    def update_from_generation_config(
            self,
            generation_config: dict[str, Any],
            model_eos_token_id: Optional[int] = None) -> None:
        """Update if there are non-default values from generation_config"""

        if model_eos_token_id is not None:
            # Add the eos token id into the sampling_params to support
            # min_tokens processing.
            self._all_stop_token_ids.add(model_eos_token_id)

        # Update eos_token_id for generation
        if (eos_ids := generation_config.get("eos_token_id")) is not None:
            # it can be either int or list of int
            eos_ids = {eos_ids} if isinstance(eos_ids, int) else set(eos_ids)
            if model_eos_token_id is not None:
                # We don't need to include the primary eos_token_id in
                # stop_token_ids since it's handled separately for stopping
                # purposes.
                eos_ids.discard(model_eos_token_id)
            if eos_ids:
                self._all_stop_token_ids.update(eos_ids)
                if not self.ignore_eos:
                    eos_ids.update(self.stop_token_ids)
                    self.stop_token_ids = list(eos_ids)

    def update_from_tokenizer(self, tokenizer: AnyTokenizer) -> None:
        if not self.bad_words:
            return
        self._bad_words_token_ids = []
        for bad_word in self.bad_words:
            # To prohibit words both at the beginning
            # and in the middle of text
            # (related to add_prefix_space tokenizer parameter)
            for add_prefix_space in [False, True]:
                prefix = " " if add_prefix_space else ""
                prompt = prefix + bad_word.lstrip()
                prompt_token_ids = tokenizer.encode(text=prompt,
                                                    add_special_tokens=False)

                # If no space at the beginning
                # or if prefix space produces a new word token
                if (not add_prefix_space) or (
                        add_prefix_space and prompt_token_ids[0]
                        != self._bad_words_token_ids[-1][0]
                        and len(prompt_token_ids) == len(
                            self._bad_words_token_ids[-1])):
                    self._bad_words_token_ids.append(prompt_token_ids)

        invalid_token_ids = [
            token_id for bad_words_token_ids in self._bad_words_token_ids
            for token_id in bad_words_token_ids
            if token_id < 0 or token_id > tokenizer.max_token_id
        ]
        if len(invalid_token_ids) > 0:
            raise ValueError(
                f"The model vocabulary size is {tokenizer.max_token_id+1},"
                f" but the following tokens"
                f" were specified as bad: {invalid_token_ids}."
                f" All token id values should be integers satisfying:"
                f" 0 <= token_id <= {tokenizer.max_token_id}.")

    @cached_property
    def sampling_type(self) -> SamplingType:
        if self.temperature < _SAMPLING_EPS:
            return SamplingType.GREEDY
        if self.seed is not None:
            return SamplingType.RANDOM_SEED
        return SamplingType.RANDOM

    @property
    def all_stop_token_ids(self) -> set[int]:
        return self._all_stop_token_ids

    @property
    def bad_words_token_ids(self) -> Optional[list[list[int]]]:
        # For internal use only. Backward compatibility not guaranteed
        return self._bad_words_token_ids

    def clone(self) -> "SamplingParams":
        """Deep copy, but maybe not the LogitsProcessor objects.

        LogitsProcessor objects may contain an arbitrary, nontrivial amount of
        data that is expensive to copy. However, if not copied, the processor
        needs to support parallel decoding for multiple sequences
        See https://github.com/vllm-project/vllm/issues/3087
        """

        logit_processor_refs = None if self.logits_processors is None else {
            id(lp): lp.clone() if hasattr(lp, 'clone') else lp
            for lp in self.logits_processors
        }
        return copy.deepcopy(self, memo=logit_processor_refs)

    def __repr__(self) -> str:
        return (
            f"SamplingParams(n={self.n}, "
            f"presence_penalty={self.presence_penalty}, "
            f"frequency_penalty={self.frequency_penalty}, "
            f"repetition_penalty={self.repetition_penalty}, "
            f"temperature={self.temperature}, "
            f"top_p={self.top_p}, "
            f"top_k={self.top_k}, "
            f"min_p={self.min_p}, "
            f"seed={self.seed}, "
            f"stop={self.stop}, "
            f"stop_token_ids={self.stop_token_ids}, "
            f"bad_words={self.bad_words}, "
            f"include_stop_str_in_output={self.include_stop_str_in_output}, "
            f"ignore_eos={self.ignore_eos}, "
            f"max_tokens={self.max_tokens}, "
            f"min_tokens={self.min_tokens}, "
            f"logprobs={self.logprobs}, "
            f"prompt_logprobs={self.prompt_logprobs}, "
            f"skip_special_tokens={self.skip_special_tokens}, "
            "spaces_between_special_tokens="
            f"{self.spaces_between_special_tokens}, "
            f"truncate_prompt_tokens={self.truncate_prompt_tokens}, "
            f"guided_decoding={self.guided_decoding}, "
            f"extra_args={self.extra_args})")

_all_stop_token_ids class-attribute instance-attribute

_all_stop_token_ids: set[int] = field(default_factory=set)

_bad_words_token_ids class-attribute instance-attribute

_bad_words_token_ids: Optional[list[list[int]]] = None

_real_n class-attribute instance-attribute

_real_n: Optional[int] = None

all_stop_token_ids property

all_stop_token_ids: set[int]

allowed_token_ids class-attribute instance-attribute

allowed_token_ids: Optional[list[int]] = None

bad_words class-attribute instance-attribute

bad_words: Optional[list[str]] = None

bad_words_token_ids property

bad_words_token_ids: Optional[list[list[int]]]

best_of class-attribute instance-attribute

best_of: Optional[int] = None

解码 class-attribute instance-attribute

detokenize: bool = True

extra_args class-attribute instance-attribute

extra_args: Optional[dict[str, Any]] = None

frequency_penalty class-attribute instance-attribute

frequency_penalty: float = 0.0

guided_decoding class-attribute instance-attribute

guided_decoding: Optional[GuidedDecodingParams] = None

ignore_eos class-attribute instance-attribute

ignore_eos: bool = False

include_stop_str_in_output class-attribute instance-attribute

include_stop_str_in_output: bool = False

logit_bias class-attribute instance-attribute

logit_bias: Optional[dict[int, float]] = None

logits_processors class-attribute instance-attribute

logits_processors: Optional[Any] = None

logprobs class-attribute instance-attribute

logprobs: Optional[int] = None

max_tokens class-attribute instance-attribute

max_tokens: Optional[int] = 16

min_p class-attribute instance-attribute

min_p: float = 0.0

min_tokens class-attribute instance-attribute

min_tokens: int = 0

n class-attribute instance-attribute

n: int = 1

output_kind class-attribute instance-attribute

output_text_buffer_length class-attribute instance-attribute

output_text_buffer_length: int = 0

presence_penalty class-attribute instance-attribute

presence_penalty: float = 0.0

prompt_logprobs class-attribute instance-attribute

prompt_logprobs: Optional[int] = None

repetition_penalty class-attribute instance-attribute

repetition_penalty: float = 1.0

sampling_type cached property

sampling_type: SamplingType

随机种子 class-attribute instance-attribute

seed: Optional[int] = None

skip_special_tokens class-attribute instance-attribute

skip_special_tokens: bool = True

spaces_between_special_tokens class-attribute instance-attribute

spaces_between_special_tokens: bool = True

停止 class-attribute instance-attribute

stop: Optional[Union[str, list[str]]] = None

stop_token_ids class-attribute instance-attribute

stop_token_ids: Optional[list[int]] = None

temperature class-attribute instance-attribute

temperature: float = 1.0

top_k class-attribute instance-attribute

top_k: int = 0

top_p class-attribute instance-attribute

top_p: float = 1.0

truncate_prompt_tokens class-attribute instance-attribute

truncate_prompt_tokens: Optional[
    Annotated[int, Meta(ge=1)]
] = None

__post_init__

__post_init__() -> None
Source code in vllm/sampling_params.py
def __post_init__(self) -> None:
    # how we deal with `best_of``:
    # if `best_of`` is not set, we default to `n`;
    # if `best_of`` is set, we set `n`` to `best_of`,
    # and set `_real_n`` to the original `n`.
    # when we return the result, we will check
    # if we need to return `n` or `_real_n` results
    if self.best_of:
        if self.best_of < self.n:
            raise ValueError(
                f"best_of must be greater than or equal to n, "
                f"got n={self.n} and best_of={self.best_of}.")
        if not self._real_n:
            self._real_n = self.n
            self.n = self.best_of

    if 0 < self.temperature < _MAX_TEMP:
        logger.warning(
            "temperature %s is less than %s, which may cause numerical "
            "errors nan or inf in tensors. We have maxed it out to %s.",
            self.temperature, _MAX_TEMP, _MAX_TEMP)
        self.temperature = max(self.temperature, _MAX_TEMP)

    if self.seed == -1:
        self.seed = None

    if self.stop is None:
        self.stop = []
    elif isinstance(self.stop, str):
        self.stop = [self.stop]

    if self.stop_token_ids is None:
        self.stop_token_ids = []

    if self.bad_words is None:
        self.bad_words = []

    if self.logprobs is True:
        self.logprobs = 1

    if self.prompt_logprobs is True:
        self.prompt_logprobs = 1

    # Number of characters to hold back for stop string evaluation
    # until sequence is finished.
    if self.stop and not self.include_stop_str_in_output:
        self.output_text_buffer_length = max(len(s) for s in self.stop) - 1

    self._verify_args()

    if self.temperature < _SAMPLING_EPS:
        # Zero temperature means greedy sampling.
        self.top_p = 1.0
        self.top_k = 0
        self.min_p = 0.0
        self._verify_greedy_sampling()

    # eos_token_id is added to this by the engine
    self._all_stop_token_ids.update(self.stop_token_ids)

__repr__

__repr__() -> str
Source code in vllm/sampling_params.py
def __repr__(self) -> str:
    return (
        f"SamplingParams(n={self.n}, "
        f"presence_penalty={self.presence_penalty}, "
        f"frequency_penalty={self.frequency_penalty}, "
        f"repetition_penalty={self.repetition_penalty}, "
        f"temperature={self.temperature}, "
        f"top_p={self.top_p}, "
        f"top_k={self.top_k}, "
        f"min_p={self.min_p}, "
        f"seed={self.seed}, "
        f"stop={self.stop}, "
        f"stop_token_ids={self.stop_token_ids}, "
        f"bad_words={self.bad_words}, "
        f"include_stop_str_in_output={self.include_stop_str_in_output}, "
        f"ignore_eos={self.ignore_eos}, "
        f"max_tokens={self.max_tokens}, "
        f"min_tokens={self.min_tokens}, "
        f"logprobs={self.logprobs}, "
        f"prompt_logprobs={self.prompt_logprobs}, "
        f"skip_special_tokens={self.skip_special_tokens}, "
        "spaces_between_special_tokens="
        f"{self.spaces_between_special_tokens}, "
        f"truncate_prompt_tokens={self.truncate_prompt_tokens}, "
        f"guided_decoding={self.guided_decoding}, "
        f"extra_args={self.extra_args})")

_verify_args

_verify_args() -> None
Source code in vllm/sampling_params.py
def _verify_args(self) -> None:
    if not isinstance(self.n, int):
        raise ValueError(f"n must be an int, but is of "
                         f"type {type(self.n)}")
    if self.n < 1:
        raise ValueError(f"n must be at least 1, got {self.n}.")
    if self.best_of is not None:
        if not isinstance(self.best_of, int):
            raise ValueError(
                f"best_of must be an integer, got {type(self.best_of)}")
        if self.best_of < 1:
            raise ValueError(
                f"best_of must be at least 1, got {self.best_of}")
        if self.best_of < self.n:
            raise ValueError(
                f"best_of must be greater than or equal to n, "
                f"got n={self.n} and best_of={self.best_of}.")
    if not -2.0 <= self.presence_penalty <= 2.0:
        raise ValueError("presence_penalty must be in [-2, 2], got "
                         f"{self.presence_penalty}.")
    if not -2.0 <= self.frequency_penalty <= 2.0:
        raise ValueError("frequency_penalty must be in [-2, 2], got "
                         f"{self.frequency_penalty}.")
    if self.repetition_penalty <= 0.0:
        raise ValueError(
            "repetition_penalty must be greater than zero, got "
            f"{self.repetition_penalty}.")
    if self.temperature < 0.0:
        raise ValueError(
            f"temperature must be non-negative, got {self.temperature}.")
    if not 0.0 < self.top_p <= 1.0:
        raise ValueError(f"top_p must be in (0, 1], got {self.top_p}.")
    # quietly accept -1 as disabled, but prefer 0
    if self.top_k < -1:
        raise ValueError(f"top_k must be 0 (disable), or at least 1, "
                         f"got {self.top_k}.")
    if not isinstance(self.top_k, int):
        raise TypeError(
            f"top_k must be an integer, got {type(self.top_k).__name__}")
    if not 0.0 <= self.min_p <= 1.0:
        raise ValueError("min_p must be in [0, 1], got "
                         f"{self.min_p}.")
    if self.max_tokens is not None and self.max_tokens < 1:
        raise ValueError(
            f"max_tokens must be at least 1, got {self.max_tokens}.")
    if self.min_tokens < 0:
        raise ValueError(f"min_tokens must be greater than or equal to 0, "
                         f"got {self.min_tokens}.")
    if self.max_tokens is not None and self.min_tokens > self.max_tokens:
        raise ValueError(
            f"min_tokens must be less than or equal to "
            f"max_tokens={self.max_tokens}, got {self.min_tokens}.")
    if self.logprobs is not None and self.logprobs < 0:
        raise ValueError(
            f"logprobs must be non-negative, got {self.logprobs}.")
    if self.prompt_logprobs is not None and self.prompt_logprobs < 0:
        raise ValueError(f"prompt_logprobs must be non-negative, got "
                         f"{self.prompt_logprobs}.")
    if (self.truncate_prompt_tokens is not None
            and self.truncate_prompt_tokens < 1):
        raise ValueError(f"truncate_prompt_tokens must be >= 1, "
                         f"got {self.truncate_prompt_tokens}")
    assert isinstance(self.stop_token_ids, list)
    if not all(isinstance(st_id, int) for st_id in self.stop_token_ids):
        raise ValueError(f"stop_token_ids must contain only integers, "
                         f"got {self.stop_token_ids}.")
    assert isinstance(self.stop, list)
    if any(not stop_str for stop_str in self.stop):
        raise ValueError("stop cannot contain an empty string.")
    if self.stop and not self.detokenize:
        raise ValueError(
            "stop strings are only supported when detokenize is True. "
            "Set detokenize=True to use stop.")
    if self.best_of != self._real_n and self.output_kind == (
            RequestOutputKind.DELTA):
        raise ValueError("best_of must equal n to use output_kind=DELTA")

_verify_greedy_sampling

_verify_greedy_sampling() -> None
Source code in vllm/sampling_params.py
def _verify_greedy_sampling(self) -> None:
    if self.n > 1:
        raise ValueError("n must be 1 when using greedy sampling, "
                         f"got {self.n}.")

克隆

clone() -> SamplingParams

深拷贝,但可能不包括LogitsProcessor对象。

LogitsProcessor对象可能包含大量任意且复杂的数据,复制这些数据的成本很高。但如果不需要复制,处理器需要支持多序列的并行解码。详情参见https://github.com/vllm-project/vllm/issues/3087

Source code in vllm/sampling_params.py
def clone(self) -> "SamplingParams":
    """Deep copy, but maybe not the LogitsProcessor objects.

    LogitsProcessor objects may contain an arbitrary, nontrivial amount of
    data that is expensive to copy. However, if not copied, the processor
    needs to support parallel decoding for multiple sequences
    See https://github.com/vllm-project/vllm/issues/3087
    """

    logit_processor_refs = None if self.logits_processors is None else {
        id(lp): lp.clone() if hasattr(lp, 'clone') else lp
        for lp in self.logits_processors
    }
    return copy.deepcopy(self, memo=logit_processor_refs)

from_optional staticmethod

from_optional(
    n: Optional[int] = 1,
    best_of: Optional[int] = None,
    presence_penalty: Optional[float] = 0.0,
    frequency_penalty: Optional[float] = 0.0,
    repetition_penalty: Optional[float] = 1.0,
    temperature: Optional[float] = 1.0,
    top_p: Optional[float] = 1.0,
    top_k: int = 0,
    min_p: float = 0.0,
    seed: Optional[int] = None,
    stop: Optional[Union[str, list[str]]] = None,
    stop_token_ids: Optional[list[int]] = None,
    bad_words: Optional[list[str]] = None,
    include_stop_str_in_output: bool = False,
    ignore_eos: bool = False,
    max_tokens: Optional[int] = 16,
    min_tokens: int = 0,
    logprobs: Optional[int] = None,
    prompt_logprobs: Optional[int] = None,
    detokenize: bool = True,
    skip_special_tokens: bool = True,
    spaces_between_special_tokens: bool = True,
    logits_processors: Optional[
        list[LogitsProcessor]
    ] = None,
    truncate_prompt_tokens: Optional[
        Annotated[int, Meta(ge=1)]
    ] = None,
    output_kind: RequestOutputKind = CUMULATIVE,
    guided_decoding: Optional[GuidedDecodingParams] = None,
    logit_bias: Optional[
        Union[dict[int, float], dict[str, float]]
    ] = None,
    allowed_token_ids: Optional[list[int]] = None,
    extra_args: Optional[dict[str, Any]] = None,
) -> SamplingParams
Source code in vllm/sampling_params.py
@staticmethod
def from_optional(
    n: Optional[int] = 1,
    best_of: Optional[int] = None,
    presence_penalty: Optional[float] = 0.0,
    frequency_penalty: Optional[float] = 0.0,
    repetition_penalty: Optional[float] = 1.0,
    temperature: Optional[float] = 1.0,
    top_p: Optional[float] = 1.0,
    top_k: int = 0,
    min_p: float = 0.0,
    seed: Optional[int] = None,
    stop: Optional[Union[str, list[str]]] = None,
    stop_token_ids: Optional[list[int]] = None,
    bad_words: Optional[list[str]] = None,
    include_stop_str_in_output: bool = False,
    ignore_eos: bool = False,
    max_tokens: Optional[int] = 16,
    min_tokens: int = 0,
    logprobs: Optional[int] = None,
    prompt_logprobs: Optional[int] = None,
    detokenize: bool = True,
    skip_special_tokens: bool = True,
    spaces_between_special_tokens: bool = True,
    logits_processors: Optional[list[LogitsProcessor]] = None,
    truncate_prompt_tokens: Optional[Annotated[int,
                                               msgspec.Meta(ge=1)]] = None,
    output_kind: RequestOutputKind = RequestOutputKind.CUMULATIVE,
    guided_decoding: Optional[GuidedDecodingParams] = None,
    logit_bias: Optional[Union[dict[int, float], dict[str, float]]] = None,
    allowed_token_ids: Optional[list[int]] = None,
    extra_args: Optional[dict[str, Any]] = None,
) -> "SamplingParams":
    if logit_bias is not None:
        # Convert token_id to integer
        # Clamp the bias between -100 and 100 per OpenAI API spec
        logit_bias = {
            int(token): min(100.0, max(-100.0, bias))
            for token, bias in logit_bias.items()
        }

    return SamplingParams(
        n=1 if n is None else n,
        best_of=best_of,
        presence_penalty=0.0
        if presence_penalty is None else presence_penalty,
        frequency_penalty=0.0
        if frequency_penalty is None else frequency_penalty,
        repetition_penalty=1.0
        if repetition_penalty is None else repetition_penalty,
        temperature=1.0 if temperature is None else temperature,
        top_p=1.0 if top_p is None else top_p,
        top_k=top_k,
        min_p=min_p,
        seed=seed,
        stop=stop,
        stop_token_ids=stop_token_ids,
        bad_words=bad_words,
        include_stop_str_in_output=include_stop_str_in_output,
        ignore_eos=ignore_eos,
        max_tokens=max_tokens,
        min_tokens=min_tokens,
        logprobs=logprobs,
        prompt_logprobs=prompt_logprobs,
        detokenize=detokenize,
        skip_special_tokens=skip_special_tokens,
        spaces_between_special_tokens=spaces_between_special_tokens,
        logits_processors=logits_processors,
        truncate_prompt_tokens=truncate_prompt_tokens,
        output_kind=output_kind,
        guided_decoding=guided_decoding,
        logit_bias=logit_bias,
        allowed_token_ids=allowed_token_ids,
        extra_args=extra_args,
    )

update_from_generation_config

update_from_generation_config(
    generation_config: dict[str, Any],
    model_eos_token_id: Optional[int] = None,
) -> None

如果generation_config中有非默认值则更新

Source code in vllm/sampling_params.py
def update_from_generation_config(
        self,
        generation_config: dict[str, Any],
        model_eos_token_id: Optional[int] = None) -> None:
    """Update if there are non-default values from generation_config"""

    if model_eos_token_id is not None:
        # Add the eos token id into the sampling_params to support
        # min_tokens processing.
        self._all_stop_token_ids.add(model_eos_token_id)

    # Update eos_token_id for generation
    if (eos_ids := generation_config.get("eos_token_id")) is not None:
        # it can be either int or list of int
        eos_ids = {eos_ids} if isinstance(eos_ids, int) else set(eos_ids)
        if model_eos_token_id is not None:
            # We don't need to include the primary eos_token_id in
            # stop_token_ids since it's handled separately for stopping
            # purposes.
            eos_ids.discard(model_eos_token_id)
        if eos_ids:
            self._all_stop_token_ids.update(eos_ids)
            if not self.ignore_eos:
                eos_ids.update(self.stop_token_ids)
                self.stop_token_ids = list(eos_ids)

update_from_tokenizer

update_from_tokenizer(tokenizer: AnyTokenizer) -> None
Source code in vllm/sampling_params.py
def update_from_tokenizer(self, tokenizer: AnyTokenizer) -> None:
    if not self.bad_words:
        return
    self._bad_words_token_ids = []
    for bad_word in self.bad_words:
        # To prohibit words both at the beginning
        # and in the middle of text
        # (related to add_prefix_space tokenizer parameter)
        for add_prefix_space in [False, True]:
            prefix = " " if add_prefix_space else ""
            prompt = prefix + bad_word.lstrip()
            prompt_token_ids = tokenizer.encode(text=prompt,
                                                add_special_tokens=False)

            # If no space at the beginning
            # or if prefix space produces a new word token
            if (not add_prefix_space) or (
                    add_prefix_space and prompt_token_ids[0]
                    != self._bad_words_token_ids[-1][0]
                    and len(prompt_token_ids) == len(
                        self._bad_words_token_ids[-1])):
                self._bad_words_token_ids.append(prompt_token_ids)

    invalid_token_ids = [
        token_id for bad_words_token_ids in self._bad_words_token_ids
        for token_id in bad_words_token_ids
        if token_id < 0 or token_id > tokenizer.max_token_id
    ]
    if len(invalid_token_ids) > 0:
        raise ValueError(
            f"The model vocabulary size is {tokenizer.max_token_id+1},"
            f" but the following tokens"
            f" were specified as bad: {invalid_token_ids}."
            f" All token id values should be integers satisfying:"
            f" 0 <= token_id <= {tokenizer.max_token_id}.")

采样类型

基础类: IntEnum

Source code in vllm/sampling_params.py
class SamplingType(IntEnum):
    GREEDY = 0
    RANDOM = 1
    RANDOM_SEED = 2

GREEDY class-attribute instance-attribute

GREEDY = 0

随机 class-attribute instance-attribute

RANDOM = 1

RANDOM_SEED class-attribute instance-attribute

RANDOM_SEED = 2
优云智算