API 客户端#
源代码 vllm-project/vllm。
1"""Example Python client for `vllm.entrypoints.api_server`
2NOTE: The API server is used only for demonstration and simple performance
3benchmarks. It is not intended for production use.
4For production use, we recommend `vllm serve` and the OpenAI client API.
5"""
6
7import argparse
8import json
9from typing import Iterable, List
10
11import requests
12
13
14def clear_line(n: int = 1) -> None:
15 LINE_UP = '\033[1A'
16 LINE_CLEAR = '\x1b[2K'
17 for _ in range(n):
18 print(LINE_UP, end=LINE_CLEAR, flush=True)
19
20
21def post_http_request(prompt: str,
22 api_url: str,
23 n: int = 1,
24 stream: bool = False) -> requests.Response:
25 headers = {"User-Agent": "Test Client"}
26 pload = {
27 "prompt": prompt,
28 "n": n,
29 "use_beam_search": True,
30 "temperature": 0.0,
31 "max_tokens": 16,
32 "stream": stream,
33 }
34 response = requests.post(api_url,
35 headers=headers,
36 json=pload,
37 stream=stream)
38 return response
39
40
41def get_streaming_response(response: requests.Response) -> Iterable[List[str]]:
42 for chunk in response.iter_lines(chunk_size=8192,
43 decode_unicode=False,
44 delimiter=b"\0"):
45 if chunk:
46 data = json.loads(chunk.decode("utf-8"))
47 output = data["text"]
48 yield output
49
50
51def get_response(response: requests.Response) -> List[str]:
52 data = json.loads(response.content)
53 output = data["text"]
54 return output
55
56
57if __name__ == "__main__":
58 parser = argparse.ArgumentParser()
59 parser.add_argument("--host", type=str, default="localhost")
60 parser.add_argument("--port", type=int, default=8000)
61 parser.add_argument("--n", type=int, default=4)
62 parser.add_argument("--prompt", type=str, default="San Francisco is a")
63 parser.add_argument("--stream", action="store_true")
64 args = parser.parse_args()
65 prompt = args.prompt
66 api_url = f"http://{args.host}:{args.port}/generate"
67 n = args.n
68 stream = args.stream
69
70 print(f"Prompt: {prompt!r}\n", flush=True)
71 response = post_http_request(prompt, api_url, n, stream)
72
73 if stream:
74 num_printed_lines = 0
75 for h in get_streaming_response(response):
76 clear_line(num_printed_lines)
77 num_printed_lines = 0
78 for i, line in enumerate(h):
79 num_printed_lines += 1
80 print(f"Beam candidate {i}: {line!r}", flush=True)
81 else:
82 output = get_response(response)
83 for i, line in enumerate(output):
84 print(f"Beam candidate {i}: {line!r}", flush=True)