LLM 引擎示例#
源代码 vllm-project/vllm。
1import argparse
2from typing import List, Tuple
3
4from vllm import EngineArgs, LLMEngine, RequestOutput, SamplingParams
5from vllm.utils import FlexibleArgumentParser
6
7
8def create_test_prompts() -> List[Tuple[str, SamplingParams]]:
9 """Create a list of test prompts with their sampling parameters."""
10 return [
11 ("A robot may not injure a human being",
12 SamplingParams(temperature=0.0, logprobs=1, prompt_logprobs=1)),
13 ("To be or not to be,",
14 SamplingParams(temperature=0.8, top_k=5, presence_penalty=0.2)),
15 ("What is the meaning of life?",
16 SamplingParams(n=2,
17 best_of=5,
18 temperature=0.8,
19 top_p=0.95,
20 frequency_penalty=0.1)),
21 ]
22
23
24def process_requests(engine: LLMEngine,
25 test_prompts: List[Tuple[str, SamplingParams]]):
26 """Continuously process a list of prompts and handle the outputs."""
27 request_id = 0
28
29 while test_prompts or engine.has_unfinished_requests():
30 if test_prompts:
31 prompt, sampling_params = test_prompts.pop(0)
32 engine.add_request(str(request_id), prompt, sampling_params)
33 request_id += 1
34
35 request_outputs: List[RequestOutput] = engine.step()
36
37 for request_output in request_outputs:
38 if request_output.finished:
39 print(request_output)
40
41
42def initialize_engine(args: argparse.Namespace) -> LLMEngine:
43 """Initialize the LLMEngine from the command line arguments."""
44 engine_args = EngineArgs.from_cli_args(args)
45 return LLMEngine.from_engine_args(engine_args)
46
47
48def main(args: argparse.Namespace):
49 """Main function that sets up and runs the prompt processing."""
50 engine = initialize_engine(args)
51 test_prompts = create_test_prompts()
52 process_requests(engine, test_prompts)
53
54
55if __name__ == '__main__':
56 parser = FlexibleArgumentParser(
57 description='Demo on using the LLMEngine class directly')
58 parser = EngineArgs.add_cli_args(parser)
59 args = parser.parse_args()
60 main(args)