Skip to content

vllm.entrypoints.openai.tool_parsers.kimi_k2_tool_parser ΒΆ

logger module-attribute ΒΆ

logger = init_logger(__name__)

KimiK2ToolParser ΒΆ

Bases: ToolParser

Source code in vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
class KimiK2ToolParser(ToolParser):
    def __init__(self, tokenizer: AnyTokenizer):
        super().__init__(tokenizer)
        self.current_tool_name_sent: bool = False
        self.prev_tool_call_arr: list[dict] = []
        self.current_tool_id: int = -1
        self.streamed_args_for_tool: list[
            str
        ] = []  # map what has been streamed for each tool so far to a list

        # Section-level state management to prevent token leakage
        self.in_tool_section: bool = False
        self.token_buffer: str = ""
        # Buffer size: empirical worst-case for longest marker (~30 chars) * 2
        # + safety margin for unicode + partial overlap. Prevents unbounded growth.
        self.buffer_max_size: int = 1024
        self.section_char_count: int = 0  # Track characters processed in tool section
        self.max_section_chars: int = 8192  # Force exit if section exceeds this
        self._buffer_overflow_logged: bool = False  # Log overflow once per session

        # Support both singular and plural variants
        self.tool_calls_start_token: str = "<|tool_calls_section_begin|>"
        self.tool_calls_end_token: str = "<|tool_calls_section_end|>"
        self.tool_calls_start_token_variants: list[str] = [
            "<|tool_calls_section_begin|>",
            "<|tool_call_section_begin|>",  # singular variant
        ]
        self.tool_calls_end_token_variants: list[str] = [
            "<|tool_calls_section_end|>",
            "<|tool_call_section_end|>",  # singular variant
        ]

        self.tool_call_start_token: str = "<|tool_call_begin|>"
        self.tool_call_end_token: str = "<|tool_call_end|>"

        self.tool_call_regex = re.compile(
            r"<\|tool_call_begin\|>\s*(?P<tool_call_id>[^<]+:\d+)\s*<\|tool_call_argument_begin\|>\s*(?P<function_arguments>(?:(?!<\|tool_call_begin\|>).)*?)\s*<\|tool_call_end\|>",
            re.DOTALL,
        )

        self.stream_tool_call_portion_regex = re.compile(
            r"(?P<tool_call_id>.+:\d+)\s*<\|tool_call_argument_begin\|>\s*(?P<function_arguments>.*)"
        )

        self.stream_tool_call_name_regex = re.compile(r"(?P<tool_call_id>.+:\d+)\s*")

        if not self.model_tokenizer:
            raise ValueError(
                "The model tokenizer must be passed to the ToolParser "
                "constructor during construction."
            )
        self.tool_calls_start_token_id = self.vocab.get(self.tool_calls_start_token)
        self.tool_calls_end_token_id = self.vocab.get(self.tool_calls_end_token)

        # Get token IDs for all variants
        self.tool_calls_start_token_ids: list[int] = [
            tid
            for variant in self.tool_calls_start_token_variants
            if (tid := self.vocab.get(variant)) is not None
        ]
        self.tool_calls_end_token_ids: list[int] = [
            tid
            for variant in self.tool_calls_end_token_variants
            if (tid := self.vocab.get(variant)) is not None
        ]

        self.tool_call_start_token_id = self.vocab.get(self.tool_call_start_token)
        self.tool_call_end_token_id = self.vocab.get(self.tool_call_end_token)

        if (
            self.tool_calls_start_token_id is None
            or self.tool_calls_end_token_id is None
        ):
            raise RuntimeError(
                "Kimi-K2 Tool parser could not locate tool call start/end "
                "tokens in the tokenizer!"
            )

    def _check_and_strip_markers(self, text: str) -> tuple[str, bool, bool]:
        """
        Check for section begin/end markers in text and strip them.
        Returns: (cleaned_text, found_section_begin, found_section_end)
        """
        found_begin = False
        found_end = False
        cleaned = text

        # Check for section begin markers (any variant)
        for variant in self.tool_calls_start_token_variants:
            if variant in cleaned:
                cleaned = cleaned.replace(variant, "")
                found_begin = True

        # Check for section end markers (any variant)
        for variant in self.tool_calls_end_token_variants:
            if variant in cleaned:
                cleaned = cleaned.replace(variant, "")
                found_end = True

        return cleaned, found_begin, found_end

    def _reset_section_state(self) -> None:
        """Reset state when exiting tool section."""
        self.in_tool_section = False
        self.token_buffer = ""
        self.section_char_count = 0

    def reset_streaming_state(self) -> None:
        """
        Reset all streaming state. Call this between requests to prevent
        state leakage when parser instance is reused.
        """
        # Reset section state
        self._reset_section_state()

        # Reset parent class state
        self.current_tool_name_sent = False
        self.prev_tool_call_arr = []
        self.current_tool_id = -1
        self.streamed_args_for_tool = []

        logger.debug("Streaming state reset")

    def extract_tool_calls(
        self,
        model_output: str,
        request: ChatCompletionRequest,
    ) -> ExtractedToolCallInformation:
        # sanity check; avoid unnecessary processing
        if self.tool_calls_start_token not in model_output:
            return ExtractedToolCallInformation(
                tools_called=False, tool_calls=[], content=model_output
            )

        else:
            try:
                # there are two possible captures - between tags, or between a
                # tag and end-of-string so the result of
                # findall is an array of tuples where one is a function call and
                # the other is None
                function_call_tuples = self.tool_call_regex.findall(model_output)

                logger.debug("function_call_tuples: %s", function_call_tuples)

                tool_calls = []
                for match in function_call_tuples:
                    function_id, function_args = match
                    # function_id: functions.get_weather:0 or get_weather:0
                    function_name = function_id.split(":")[0].split(".")[-1]
                    tool_calls.append(
                        ToolCall(
                            id=function_id,
                            type="function",
                            function=FunctionCall(
                                name=function_name, arguments=function_args
                            ),
                        )
                    )

                content = model_output[: model_output.find(self.tool_calls_start_token)]
                return ExtractedToolCallInformation(
                    tools_called=True,
                    tool_calls=tool_calls,
                    content=content if content else None,
                )

            except Exception:
                logger.exception("Error in extracting tool call from response.")
                return ExtractedToolCallInformation(
                    tools_called=False, tool_calls=[], content=model_output
                )

    def extract_tool_calls_streaming(
        self,
        previous_text: str,
        current_text: str,
        delta_text: str,
        previous_token_ids: Sequence[int],
        current_token_ids: Sequence[int],
        delta_token_ids: Sequence[int],
        request: ChatCompletionRequest,
    ) -> DeltaMessage | None:
        logger.debug("delta_text: %s", delta_text)
        logger.debug("delta_token_ids: %s", delta_token_ids)

        # Flag to defer section exit until after tool parsing completes
        deferred_section_exit = False

        # Add delta to buffer for split marker detection
        self.token_buffer += delta_text

        # Enforce buffer size limit to prevent memory issues
        if len(self.token_buffer) > self.buffer_max_size:
            if not self._buffer_overflow_logged:
                logger.warning(
                    "Token buffer exceeded max size (%d bytes), flushing excess. "
                    "This may indicate very long markers or unusual tokenization.",
                    self.buffer_max_size,
                )
                self._buffer_overflow_logged = True
            # Keep only the most recent content that might contain partial markers
            self.token_buffer = self.token_buffer[-self.buffer_max_size // 2 :]

        # Check buffer for section markers (handles split tokens)
        buffered_text, found_section_begin, found_section_end = (
            self._check_and_strip_markers(self.token_buffer)
        )

        # Track section state transitions
        if found_section_begin and not self.in_tool_section:
            logger.debug("Entering tool section")
            self.in_tool_section = True
            self.token_buffer = buffered_text  # Use cleaned buffer
            self.section_char_count = 0  # Reset counter for new section
        if found_section_end and self.in_tool_section:
            logger.debug("Detected section end marker")
            # CRITICAL: Don't exit early if tool_call_end is in this chunk.
            # Tool parser must emit final arguments/close first to avoid dropping
            # the final tool update and leaking tokens into reasoning channel.
            has_tool_end = self.tool_call_end_token_id in delta_token_ids
            if has_tool_end:
                # Defer exit until after tool parsing completes
                deferred_section_exit = True
                logger.debug("Deferring section exit: tool_call_end in same chunk")
                self.token_buffer = buffered_text
            else:
                # No tool call ending, safe to exit immediately
                logger.debug("Exiting tool section")
                remaining = buffered_text
                self._reset_section_state()
                # Return remaining text as reasoning content if non-empty
                if remaining.strip():
                    return DeltaMessage(content=remaining)
                # Return empty delta to maintain function contract
                # (always returns DeltaMessage)
                return DeltaMessage(content="")
        else:
            self.token_buffer = buffered_text

        # Check if any variant of section start token is in current_token_ids
        has_section_token = any(
            tid in current_token_ids for tid in self.tool_calls_start_token_ids
        )

        # Early return: if no section token detected yet, return as reasoning content
        if not has_section_token and not self.in_tool_section:
            logger.debug("No tool call tokens found!")
            # Don't clear buffer - it needs to accumulate partial markers across deltas
            # Buffer overflow is already protected by lines 215-224
            return DeltaMessage(content=delta_text)

        # Strip section markers from delta_text for subsequent processing
        # NOTE: This preprocessing happens BEFORE the regex-based tool call
        # parsing (from PR #24847) to ensure markers are removed cleanly
        # before pattern matching. No double-stripping occurs because
        # section markers and tool call markers are distinct.
        delta_text, _, _ = self._check_and_strip_markers(delta_text)

        # Error recovery: If in tool section for too long, force exit
        if self.in_tool_section:
            self.section_char_count += len(delta_text)
            if self.section_char_count > self.max_section_chars:
                logger.warning(
                    "Tool section exceeded max length (%d chars), forcing exit. "
                    "This may indicate malformed model output.",
                    self.max_section_chars,
                )
                self._reset_section_state()
                # Deferred exit already handled by forced exit above
                # Return remaining content as reasoning (or empty delta if no content)
                return DeltaMessage(content=delta_text if delta_text.strip() else "")

        try:
            # figure out where we are in the parsing by counting tool call
            # start & end tags
            prev_tool_start_count = previous_token_ids.count(
                self.tool_call_start_token_id
            )
            prev_tool_end_count = previous_token_ids.count(self.tool_call_end_token_id)
            cur_tool_start_count = current_token_ids.count(
                self.tool_call_start_token_id
            )
            cur_tool_end_count = current_token_ids.count(self.tool_call_end_token_id)
            tool_call_portion = None
            text_portion = None

            # case: if we're generating text, OR rounding out a tool call
            if (
                cur_tool_start_count == cur_tool_end_count
                and prev_tool_end_count == cur_tool_end_count
                and self.tool_call_end_token not in delta_text
            ):
                # CRITICAL FIX: Suppress content if in tool section but
                # no tool calls started
                if self.in_tool_section and cur_tool_start_count == 0:
                    logger.debug(
                        "In tool section but no tool calls started yet. "
                        "Suppressing: %s",
                        delta_text,
                    )
                    # Return empty delta to maintain iterator contract
                    return DeltaMessage(content="")
                logger.debug("Generating text content! skipping tool parsing.")
                return DeltaMessage(content=delta_text)

            if self.tool_call_end_token in delta_text:
                logger.debug("tool_call_end_token in delta_text")
                full_text = current_text + delta_text
                tool_call_portion = (
                    full_text.split(self.tool_call_start_token)[-1]
                    .split(self.tool_call_end_token)[0]
                    .rstrip()
                )
                delta_text = delta_text.split(self.tool_call_end_token)[0].rstrip()
                text_portion = delta_text.split(self.tool_call_end_token)[-1].lstrip()

            # case -- we're starting a new tool call
            if (
                cur_tool_start_count > cur_tool_end_count
                and cur_tool_start_count > prev_tool_start_count
            ):
                if len(delta_token_ids) > 1:
                    tool_call_portion = current_text.split(self.tool_call_start_token)[
                        -1
                    ]
                else:
                    tool_call_portion = None
                    delta = None

                text_portion = None

                # set cursors and state appropriately
                self.current_tool_id += 1
                self.current_tool_name_sent = False
                self.streamed_args_for_tool.append("")
                logger.debug("Starting on a new tool %s", self.current_tool_id)

            # case -- we're updating an existing tool call
            elif (
                cur_tool_start_count > cur_tool_end_count
                and cur_tool_start_count == prev_tool_start_count
            ):
                # get the portion of the text that's the tool call
                tool_call_portion = current_text.split(self.tool_call_start_token)[-1]
                text_portion = None

            # case -- the current tool call is being closed.
            elif (
                cur_tool_start_count == cur_tool_end_count
                and cur_tool_end_count >= prev_tool_end_count
            ):
                if self.prev_tool_call_arr is None or len(self.prev_tool_call_arr) == 0:
                    logger.debug("attempting to close tool call, but no tool call")
                    # Handle deferred section exit before returning
                    if deferred_section_exit and self.in_tool_section:
                        self._reset_section_state()
                    return None
                diff = self.prev_tool_call_arr[self.current_tool_id].get("arguments")
                if diff:
                    diff = (
                        diff.encode("utf-8").decode("unicode_escape")
                        if diff is str
                        else diff
                    )
                    if '"}' not in delta_text:
                        # Handle deferred section exit before returning
                        if deferred_section_exit and self.in_tool_section:
                            self._reset_section_state()
                        return None
                    end_loc = delta_text.rindex('"}')
                    diff = delta_text[:end_loc] + '"}'
                    logger.debug(
                        "Finishing tool and found diff that had not "
                        "been streamed yet: %s",
                        diff,
                    )
                    self.streamed_args_for_tool[self.current_tool_id] += diff
                    # Handle deferred section exit before returning
                    if deferred_section_exit and self.in_tool_section:
                        logger.debug("Completing deferred section exit")
                        self._reset_section_state()
                    return DeltaMessage(
                        tool_calls=[
                            DeltaToolCall(
                                index=self.current_tool_id,
                                function=DeltaFunctionCall(arguments=diff).model_dump(
                                    exclude_none=True
                                ),
                            )
                        ]
                    )

            # case -- otherwise we're just generating text
            else:
                # Check if we're in tool section - if so, suppress
                if self.in_tool_section:
                    logger.debug("In tool section, suppressing text generation")
                    # Handle deferred section exit before returning
                    if deferred_section_exit:
                        self._reset_section_state()
                    return DeltaMessage(content="")
                text = delta_text.replace(self.tool_call_start_token, "")
                text = text.replace(self.tool_call_end_token, "")
                delta = DeltaMessage(tool_calls=[], content=text)
                # Handle deferred section exit before returning
                if deferred_section_exit and self.in_tool_section:
                    self._reset_section_state()
                return delta

            current_tool_call = dict()
            if tool_call_portion:
                current_tool_call_matches = self.stream_tool_call_portion_regex.match(
                    tool_call_portion
                )
                if current_tool_call_matches:
                    tool_id, tool_args = current_tool_call_matches.groups()
                    tool_name = tool_id.split(":")[0].split(".")[-1]
                    current_tool_call["id"] = tool_id
                    current_tool_call["name"] = tool_name
                    current_tool_call["arguments"] = tool_args
                else:
                    current_tool_call_name_matches = (
                        self.stream_tool_call_name_regex.match(tool_call_portion)
                    )
                    if current_tool_call_name_matches:
                        (tool_id_str,) = current_tool_call_name_matches.groups()
                        tool_name = tool_id_str.split(":")[0].split(".")[-1]
                        current_tool_call["id"] = tool_id_str
                        current_tool_call["name"] = tool_name
                        current_tool_call["arguments"] = ""
                    else:
                        logger.debug("Not enough token")
                        return None

            # case - we haven't sent the tool name yet. If it's available, send
            #   it. otherwise, wait until it's available.
            if not self.current_tool_name_sent:
                if current_tool_call is None:
                    return None
                function_name: str | None = current_tool_call.get("name")
                tool_id = current_tool_call.get("id")
                if function_name:
                    self.current_tool_name_sent = True
                    return DeltaMessage(
                        tool_calls=[
                            DeltaToolCall(
                                index=self.current_tool_id,
                                type="function",
                                id=tool_id,
                                function=DeltaFunctionCall(
                                    name=function_name
                                ).model_dump(exclude_none=True),
                            )
                        ]
                    )
                else:
                    return None

            # case -- otherwise, send the tool call delta

            # if the tool call portion is None, send the delta as text
            if tool_call_portion is None:
                # if there's text but not tool calls, send that -
                # otherwise None to skip chunk
                delta = (
                    DeltaMessage(content=delta_text)
                    if text_portion is not None
                    else None
                )
                return delta

            # now, the nitty-gritty of tool calls
            # now we have the portion to parse as tool call.

            logger.debug(
                "Trying to parse current tool call with ID %s", self.current_tool_id
            )

            # if we're starting a new tool call, push an empty object in as
            #   a placeholder for the arguments
            if len(self.prev_tool_call_arr) <= self.current_tool_id:
                self.prev_tool_call_arr.append({})

            # main logic for tool parsing here - compare prev. partially-parsed
            #   JSON to the current partially-parsed JSON
            prev_arguments = self.prev_tool_call_arr[self.current_tool_id].get(
                "arguments"
            )
            cur_arguments = current_tool_call.get("arguments")

            logger.debug("diffing old arguments: %s", prev_arguments)
            logger.debug("against new ones: %s", cur_arguments)

            # case -- no arguments have been created yet. skip sending a delta.
            if not cur_arguments and not prev_arguments:
                logger.debug("Skipping text %s - no arguments", delta_text)
                delta = None

            # case -- prev arguments are defined, but non are now.
            #   probably impossible, but not a fatal error - just keep going
            elif not cur_arguments and prev_arguments:
                logger.error(
                    "should be impossible to have arguments reset "
                    "mid-call. skipping streaming anything."
                )
                delta = None

            # case -- we now have the first info about arguments available from
            #   autocompleting the JSON
            elif cur_arguments and not prev_arguments:
                delta = DeltaMessage(
                    tool_calls=[
                        DeltaToolCall(
                            index=self.current_tool_id,
                            function=DeltaFunctionCall(
                                arguments=cur_arguments
                            ).model_dump(exclude_none=True),
                        )
                    ]
                )
                self.streamed_args_for_tool[self.current_tool_id] = cur_arguments

            # last case -- we have an update to existing arguments.
            elif cur_arguments and prev_arguments:
                if (
                    isinstance(delta_text, str)
                    and cur_arguments != prev_arguments
                    and len(cur_arguments) > len(prev_arguments)
                    and cur_arguments.startswith(prev_arguments)
                ):
                    delta_arguments = cur_arguments[len(prev_arguments) :]
                    logger.debug("got diff %s", delta_text)

                    delta = DeltaMessage(
                        tool_calls=[
                            DeltaToolCall(
                                index=self.current_tool_id,
                                function=DeltaFunctionCall(
                                    arguments=delta_arguments
                                ).model_dump(exclude_none=True),
                            )
                        ]
                    )
                    self.streamed_args_for_tool[self.current_tool_id] = cur_arguments
                else:
                    delta = None

            # handle saving the state for the current tool into
            # the "prev" list for use in diffing for the next iteration
            if self.current_tool_id == len(self.prev_tool_call_arr) - 1:
                self.prev_tool_call_arr[self.current_tool_id] = current_tool_call
            else:
                self.prev_tool_call_arr.append(current_tool_call)

            # Handle deferred section exit after tool parsing completes
            if deferred_section_exit and self.in_tool_section:
                logger.debug("Completing deferred section exit")
                self._reset_section_state()

            return delta

        except Exception:
            logger.exception("Error trying to handle streaming tool call.")
            return None  # do not stream a delta. skip this token ID.

_buffer_overflow_logged instance-attribute ΒΆ

_buffer_overflow_logged: bool = False

buffer_max_size instance-attribute ΒΆ

buffer_max_size: int = 1024

current_tool_id instance-attribute ΒΆ

current_tool_id: int = -1

current_tool_name_sent instance-attribute ΒΆ

current_tool_name_sent: bool = False

in_tool_section instance-attribute ΒΆ

in_tool_section: bool = False

max_section_chars instance-attribute ΒΆ

max_section_chars: int = 8192

prev_tool_call_arr instance-attribute ΒΆ

prev_tool_call_arr: list[dict] = []

section_char_count instance-attribute ΒΆ

section_char_count: int = 0

stream_tool_call_name_regex instance-attribute ΒΆ

stream_tool_call_name_regex = compile(
    "(?P<tool_call_id>.+:\\d+)\\s*"
)

stream_tool_call_portion_regex instance-attribute ΒΆ

stream_tool_call_portion_regex = compile(
    "(?P<tool_call_id>.+:\\d+)\\s*<\\|tool_call_argument_begin\\|>\\s*(?P<function_arguments>.*)"
)

streamed_args_for_tool instance-attribute ΒΆ

streamed_args_for_tool: list[str] = []

token_buffer instance-attribute ΒΆ

token_buffer: str = ''

tool_call_end_token instance-attribute ΒΆ

tool_call_end_token: str = '<|tool_call_end|>'

tool_call_end_token_id instance-attribute ΒΆ

tool_call_end_token_id = get(tool_call_end_token)

tool_call_regex instance-attribute ΒΆ

tool_call_regex = compile(
    "<\\|tool_call_begin\\|>\\s*(?P<tool_call_id>[^<]+:\\d+)\\s*<\\|tool_call_argument_begin\\|>\\s*(?P<function_arguments>(?:(?!<\\|tool_call_begin\\|>).)*?)\\s*<\\|tool_call_end\\|>",
    DOTALL,
)

tool_call_start_token instance-attribute ΒΆ

tool_call_start_token: str = '<|tool_call_begin|>'

tool_call_start_token_id instance-attribute ΒΆ

tool_call_start_token_id = get(tool_call_start_token)

tool_calls_end_token instance-attribute ΒΆ

tool_calls_end_token: str = '<|tool_calls_section_end|>'

tool_calls_end_token_id instance-attribute ΒΆ

tool_calls_end_token_id = get(tool_calls_end_token)

tool_calls_end_token_ids instance-attribute ΒΆ

tool_calls_end_token_ids: list[int] = [
    tid
    for variant in (tool_calls_end_token_variants)
    if (tid := (get(variant))) is not None
]

tool_calls_end_token_variants instance-attribute ΒΆ

tool_calls_end_token_variants: list[str] = [
    "<|tool_calls_section_end|>",
    "<|tool_call_section_end|>",
]

tool_calls_start_token instance-attribute ΒΆ

tool_calls_start_token: str = "<|tool_calls_section_begin|>"

tool_calls_start_token_id instance-attribute ΒΆ

tool_calls_start_token_id = get(tool_calls_start_token)

tool_calls_start_token_ids instance-attribute ΒΆ

tool_calls_start_token_ids: list[int] = [
    tid
    for variant in (tool_calls_start_token_variants)
    if (tid := (get(variant))) is not None
]

tool_calls_start_token_variants instance-attribute ΒΆ

tool_calls_start_token_variants: list[str] = [
    "<|tool_calls_section_begin|>",
    "<|tool_call_section_begin|>",
]

__init__ ΒΆ

__init__(tokenizer: AnyTokenizer)
Source code in vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py
def __init__(self, tokenizer: AnyTokenizer):
    super().__init__(tokenizer)
    self.current_tool_name_sent: bool = False
    self.prev_tool_call_arr: list[dict] = []
    self.current_tool_id: int = -1
    self.streamed_args_for_tool: list[
        str
    ] = []  # map what has been streamed for each tool so far to a list

    # Section-level state management to prevent token leakage
    self.in_tool_section: bool = False
    self.token_buffer: str = ""
    # Buffer size: empirical worst-case for longest marker (~30 chars) * 2
    # + safety margin for unicode + partial overlap. Prevents unbounded growth.
    self.buffer_max_size: int = 1024
    self.section_char_count: int = 0  # Track characters processed in tool section
    self.max_section_chars: int = 8192  # Force exit if section exceeds this
    self._buffer_overflow_logged: bool = False  # Log overflow once per session

    # Support both singular and plural variants
    self.tool_calls_start_token: str = "<|tool_calls_section_begin|>"
    self.tool_calls_end_token: str = "<|tool_calls_section_end|>"
    self.tool_calls_start_token_variants: list[str] = [
        "<|tool_calls_section_begin|>",
        "<|tool_call_section_begin|>",  # singular variant
    ]
    self.tool_calls_end_token_variants: list[str] = [
        "<|tool_calls_section_end|>",
        "<|tool_call_section_end|>",  # singular variant
    ]

    self.tool_call_start_token: str = "<|tool_call_begin|>"
    self.tool_call_end_token: str = "<|tool_call_end|>"

    self.tool_call_regex = re.compile(
        r"<\|tool_call_begin\|>\s*(?P<tool_call_id>[^<]+:\d+)\s*<\|tool_call_argument_begin\|>\s*(?P<function_arguments>(?:(?!<\|tool_call_begin\|>).)*?)\s*<\|tool_call_end\|>",
        re.DOTALL,
    )

    self.stream_tool_call_portion_regex = re.compile(
        r"(?P<tool_call_id>.+:\d+)\s*<\|tool_call_argument_begin\|>\s*(?P<function_arguments>.*)"
    )

    self.stream_tool_call_name_regex = re.compile(r"(?P<tool_call_id>.+:\d+)\s*")

    if not self.model_tokenizer:
        raise ValueError(
            "The model tokenizer must be passed to the ToolParser "
            "constructor during construction."
        )
    self.tool_calls_start_token_id = self.vocab.get(self.tool_calls_start_token)
    self.tool_calls_end_token_id = self.vocab.get(self.tool_calls_end_token)

    # Get token IDs for all variants
    self.tool_calls_start_token_ids: list[int] = [
        tid
        for variant in self.tool_calls_start_token_variants
        if (tid := self.vocab.get(variant)) is not None
    ]
    self.tool_calls_end_token_ids: list[int] = [
        tid
        for variant in self.tool_calls_end_token_variants
        if (tid := self.vocab.get(variant)) is not None
    ]

    self.tool_call_start_token_id = self.vocab.get(self.tool_call_start_token)
    self.tool_call_end_token_id = self.vocab.get(self.tool_call_end_token)

    if (
        self.tool_calls_start_token_id is None
        or self.tool_calls_end_token_id is None
    ):
        raise RuntimeError(
            "Kimi-K2 Tool parser could not locate tool call start/end "
            "tokens in the tokenizer!"
        )

_check_and_strip_markers ΒΆ

_check_and_strip_markers(
    text: str,
) -> tuple[str, bool, bool]

Check for section begin/end markers in text and strip them. Returns: (cleaned_text, found_section_begin, found_section_end)

Source code in vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py
def _check_and_strip_markers(self, text: str) -> tuple[str, bool, bool]:
    """
    Check for section begin/end markers in text and strip them.
    Returns: (cleaned_text, found_section_begin, found_section_end)
    """
    found_begin = False
    found_end = False
    cleaned = text

    # Check for section begin markers (any variant)
    for variant in self.tool_calls_start_token_variants:
        if variant in cleaned:
            cleaned = cleaned.replace(variant, "")
            found_begin = True

    # Check for section end markers (any variant)
    for variant in self.tool_calls_end_token_variants:
        if variant in cleaned:
            cleaned = cleaned.replace(variant, "")
            found_end = True

    return cleaned, found_begin, found_end

_reset_section_state ΒΆ

_reset_section_state() -> None

Reset state when exiting tool section.

Source code in vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py
def _reset_section_state(self) -> None:
    """Reset state when exiting tool section."""
    self.in_tool_section = False
    self.token_buffer = ""
    self.section_char_count = 0

extract_tool_calls ΒΆ

extract_tool_calls(
    model_output: str, request: ChatCompletionRequest
) -> ExtractedToolCallInformation
Source code in vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py
def extract_tool_calls(
    self,
    model_output: str,
    request: ChatCompletionRequest,
) -> ExtractedToolCallInformation:
    # sanity check; avoid unnecessary processing
    if self.tool_calls_start_token not in model_output:
        return ExtractedToolCallInformation(
            tools_called=False, tool_calls=[], content=model_output
        )

    else:
        try:
            # there are two possible captures - between tags, or between a
            # tag and end-of-string so the result of
            # findall is an array of tuples where one is a function call and
            # the other is None
            function_call_tuples = self.tool_call_regex.findall(model_output)

            logger.debug("function_call_tuples: %s", function_call_tuples)

            tool_calls = []
            for match in function_call_tuples:
                function_id, function_args = match
                # function_id: functions.get_weather:0 or get_weather:0
                function_name = function_id.split(":")[0].split(".")[-1]
                tool_calls.append(
                    ToolCall(
                        id=function_id,
                        type="function",
                        function=FunctionCall(
                            name=function_name, arguments=function_args
                        ),
                    )
                )

            content = model_output[: model_output.find(self.tool_calls_start_token)]
            return ExtractedToolCallInformation(
                tools_called=True,
                tool_calls=tool_calls,
                content=content if content else None,
            )

        except Exception:
            logger.exception("Error in extracting tool call from response.")
            return ExtractedToolCallInformation(
                tools_called=False, tool_calls=[], content=model_output
            )

extract_tool_calls_streaming ΒΆ

extract_tool_calls_streaming(
    previous_text: str,
    current_text: str,
    delta_text: str,
    previous_token_ids: Sequence[int],
    current_token_ids: Sequence[int],
    delta_token_ids: Sequence[int],
    request: ChatCompletionRequest,
) -> DeltaMessage | None
Source code in vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
def extract_tool_calls_streaming(
    self,
    previous_text: str,
    current_text: str,
    delta_text: str,
    previous_token_ids: Sequence[int],
    current_token_ids: Sequence[int],
    delta_token_ids: Sequence[int],
    request: ChatCompletionRequest,
) -> DeltaMessage | None:
    logger.debug("delta_text: %s", delta_text)
    logger.debug("delta_token_ids: %s", delta_token_ids)

    # Flag to defer section exit until after tool parsing completes
    deferred_section_exit = False

    # Add delta to buffer for split marker detection
    self.token_buffer += delta_text

    # Enforce buffer size limit to prevent memory issues
    if len(self.token_buffer) > self.buffer_max_size:
        if not self._buffer_overflow_logged:
            logger.warning(
                "Token buffer exceeded max size (%d bytes), flushing excess. "
                "This may indicate very long markers or unusual tokenization.",
                self.buffer_max_size,
            )
            self._buffer_overflow_logged = True
        # Keep only the most recent content that might contain partial markers
        self.token_buffer = self.token_buffer[-self.buffer_max_size // 2 :]

    # Check buffer for section markers (handles split tokens)
    buffered_text, found_section_begin, found_section_end = (
        self._check_and_strip_markers(self.token_buffer)
    )

    # Track section state transitions
    if found_section_begin and not self.in_tool_section:
        logger.debug("Entering tool section")
        self.in_tool_section = True
        self.token_buffer = buffered_text  # Use cleaned buffer
        self.section_char_count = 0  # Reset counter for new section
    if found_section_end and self.in_tool_section:
        logger.debug("Detected section end marker")
        # CRITICAL: Don't exit early if tool_call_end is in this chunk.
        # Tool parser must emit final arguments/close first to avoid dropping
        # the final tool update and leaking tokens into reasoning channel.
        has_tool_end = self.tool_call_end_token_id in delta_token_ids
        if has_tool_end:
            # Defer exit until after tool parsing completes
            deferred_section_exit = True
            logger.debug("Deferring section exit: tool_call_end in same chunk")
            self.token_buffer = buffered_text
        else:
            # No tool call ending, safe to exit immediately
            logger.debug("Exiting tool section")
            remaining = buffered_text
            self._reset_section_state()
            # Return remaining text as reasoning content if non-empty
            if remaining.strip():
                return DeltaMessage(content=remaining)
            # Return empty delta to maintain function contract
            # (always returns DeltaMessage)
            return DeltaMessage(content="")
    else:
        self.token_buffer = buffered_text

    # Check if any variant of section start token is in current_token_ids
    has_section_token = any(
        tid in current_token_ids for tid in self.tool_calls_start_token_ids
    )

    # Early return: if no section token detected yet, return as reasoning content
    if not has_section_token and not self.in_tool_section:
        logger.debug("No tool call tokens found!")
        # Don't clear buffer - it needs to accumulate partial markers across deltas
        # Buffer overflow is already protected by lines 215-224
        return DeltaMessage(content=delta_text)

    # Strip section markers from delta_text for subsequent processing
    # NOTE: This preprocessing happens BEFORE the regex-based tool call
    # parsing (from PR #24847) to ensure markers are removed cleanly
    # before pattern matching. No double-stripping occurs because
    # section markers and tool call markers are distinct.
    delta_text, _, _ = self._check_and_strip_markers(delta_text)

    # Error recovery: If in tool section for too long, force exit
    if self.in_tool_section:
        self.section_char_count += len(delta_text)
        if self.section_char_count > self.max_section_chars:
            logger.warning(
                "Tool section exceeded max length (%d chars), forcing exit. "
                "This may indicate malformed model output.",
                self.max_section_chars,
            )
            self._reset_section_state()
            # Deferred exit already handled by forced exit above
            # Return remaining content as reasoning (or empty delta if no content)
            return DeltaMessage(content=delta_text if delta_text.strip() else "")

    try:
        # figure out where we are in the parsing by counting tool call
        # start & end tags
        prev_tool_start_count = previous_token_ids.count(
            self.tool_call_start_token_id
        )
        prev_tool_end_count = previous_token_ids.count(self.tool_call_end_token_id)
        cur_tool_start_count = current_token_ids.count(
            self.tool_call_start_token_id
        )
        cur_tool_end_count = current_token_ids.count(self.tool_call_end_token_id)
        tool_call_portion = None
        text_portion = None

        # case: if we're generating text, OR rounding out a tool call
        if (
            cur_tool_start_count == cur_tool_end_count
            and prev_tool_end_count == cur_tool_end_count
            and self.tool_call_end_token not in delta_text
        ):
            # CRITICAL FIX: Suppress content if in tool section but
            # no tool calls started
            if self.in_tool_section and cur_tool_start_count == 0:
                logger.debug(
                    "In tool section but no tool calls started yet. "
                    "Suppressing: %s",
                    delta_text,
                )
                # Return empty delta to maintain iterator contract
                return DeltaMessage(content="")
            logger.debug("Generating text content! skipping tool parsing.")
            return DeltaMessage(content=delta_text)

        if self.tool_call_end_token in delta_text:
            logger.debug("tool_call_end_token in delta_text")
            full_text = current_text + delta_text
            tool_call_portion = (
                full_text.split(self.tool_call_start_token)[-1]
                .split(self.tool_call_end_token)[0]
                .rstrip()
            )
            delta_text = delta_text.split(self.tool_call_end_token)[0].rstrip()
            text_portion = delta_text.split(self.tool_call_end_token)[-1].lstrip()

        # case -- we're starting a new tool call
        if (
            cur_tool_start_count > cur_tool_end_count
            and cur_tool_start_count > prev_tool_start_count
        ):
            if len(delta_token_ids) > 1:
                tool_call_portion = current_text.split(self.tool_call_start_token)[
                    -1
                ]
            else:
                tool_call_portion = None
                delta = None

            text_portion = None

            # set cursors and state appropriately
            self.current_tool_id += 1
            self.current_tool_name_sent = False
            self.streamed_args_for_tool.append("")
            logger.debug("Starting on a new tool %s", self.current_tool_id)

        # case -- we're updating an existing tool call
        elif (
            cur_tool_start_count > cur_tool_end_count
            and cur_tool_start_count == prev_tool_start_count
        ):
            # get the portion of the text that's the tool call
            tool_call_portion = current_text.split(self.tool_call_start_token)[-1]
            text_portion = None

        # case -- the current tool call is being closed.
        elif (
            cur_tool_start_count == cur_tool_end_count
            and cur_tool_end_count >= prev_tool_end_count
        ):
            if self.prev_tool_call_arr is None or len(self.prev_tool_call_arr) == 0:
                logger.debug("attempting to close tool call, but no tool call")
                # Handle deferred section exit before returning
                if deferred_section_exit and self.in_tool_section:
                    self._reset_section_state()
                return None
            diff = self.prev_tool_call_arr[self.current_tool_id].get("arguments")
            if diff:
                diff = (
                    diff.encode("utf-8").decode("unicode_escape")
                    if diff is str
                    else diff
                )
                if '"}' not in delta_text:
                    # Handle deferred section exit before returning
                    if deferred_section_exit and self.in_tool_section:
                        self._reset_section_state()
                    return None
                end_loc = delta_text.rindex('"}')
                diff = delta_text[:end_loc] + '"}'
                logger.debug(
                    "Finishing tool and found diff that had not "
                    "been streamed yet: %s",
                    diff,
                )
                self.streamed_args_for_tool[self.current_tool_id] += diff
                # Handle deferred section exit before returning
                if deferred_section_exit and self.in_tool_section:
                    logger.debug("Completing deferred section exit")
                    self._reset_section_state()
                return DeltaMessage(
                    tool_calls=[
                        DeltaToolCall(
                            index=self.current_tool_id,
                            function=DeltaFunctionCall(arguments=diff).model_dump(
                                exclude_none=True
                            ),
                        )
                    ]
                )

        # case -- otherwise we're just generating text
        else:
            # Check if we're in tool section - if so, suppress
            if self.in_tool_section:
                logger.debug("In tool section, suppressing text generation")
                # Handle deferred section exit before returning
                if deferred_section_exit:
                    self._reset_section_state()
                return DeltaMessage(content="")
            text = delta_text.replace(self.tool_call_start_token, "")
            text = text.replace(self.tool_call_end_token, "")
            delta = DeltaMessage(tool_calls=[], content=text)
            # Handle deferred section exit before returning
            if deferred_section_exit and self.in_tool_section:
                self._reset_section_state()
            return delta

        current_tool_call = dict()
        if tool_call_portion:
            current_tool_call_matches = self.stream_tool_call_portion_regex.match(
                tool_call_portion
            )
            if current_tool_call_matches:
                tool_id, tool_args = current_tool_call_matches.groups()
                tool_name = tool_id.split(":")[0].split(".")[-1]
                current_tool_call["id"] = tool_id
                current_tool_call["name"] = tool_name
                current_tool_call["arguments"] = tool_args
            else:
                current_tool_call_name_matches = (
                    self.stream_tool_call_name_regex.match(tool_call_portion)
                )
                if current_tool_call_name_matches:
                    (tool_id_str,) = current_tool_call_name_matches.groups()
                    tool_name = tool_id_str.split(":")[0].split(".")[-1]
                    current_tool_call["id"] = tool_id_str
                    current_tool_call["name"] = tool_name
                    current_tool_call["arguments"] = ""
                else:
                    logger.debug("Not enough token")
                    return None

        # case - we haven't sent the tool name yet. If it's available, send
        #   it. otherwise, wait until it's available.
        if not self.current_tool_name_sent:
            if current_tool_call is None:
                return None
            function_name: str | None = current_tool_call.get("name")
            tool_id = current_tool_call.get("id")
            if function_name:
                self.current_tool_name_sent = True
                return DeltaMessage(
                    tool_calls=[
                        DeltaToolCall(
                            index=self.current_tool_id,
                            type="function",
                            id=tool_id,
                            function=DeltaFunctionCall(
                                name=function_name
                            ).model_dump(exclude_none=True),
                        )
                    ]
                )
            else:
                return None

        # case -- otherwise, send the tool call delta

        # if the tool call portion is None, send the delta as text
        if tool_call_portion is None:
            # if there's text but not tool calls, send that -
            # otherwise None to skip chunk
            delta = (
                DeltaMessage(content=delta_text)
                if text_portion is not None
                else None
            )
            return delta

        # now, the nitty-gritty of tool calls
        # now we have the portion to parse as tool call.

        logger.debug(
            "Trying to parse current tool call with ID %s", self.current_tool_id
        )

        # if we're starting a new tool call, push an empty object in as
        #   a placeholder for the arguments
        if len(self.prev_tool_call_arr) <= self.current_tool_id:
            self.prev_tool_call_arr.append({})

        # main logic for tool parsing here - compare prev. partially-parsed
        #   JSON to the current partially-parsed JSON
        prev_arguments = self.prev_tool_call_arr[self.current_tool_id].get(
            "arguments"
        )
        cur_arguments = current_tool_call.get("arguments")

        logger.debug("diffing old arguments: %s", prev_arguments)
        logger.debug("against new ones: %s", cur_arguments)

        # case -- no arguments have been created yet. skip sending a delta.
        if not cur_arguments and not prev_arguments:
            logger.debug("Skipping text %s - no arguments", delta_text)
            delta = None

        # case -- prev arguments are defined, but non are now.
        #   probably impossible, but not a fatal error - just keep going
        elif not cur_arguments and prev_arguments:
            logger.error(
                "should be impossible to have arguments reset "
                "mid-call. skipping streaming anything."
            )
            delta = None

        # case -- we now have the first info about arguments available from
        #   autocompleting the JSON
        elif cur_arguments and not prev_arguments:
            delta = DeltaMessage(
                tool_calls=[
                    DeltaToolCall(
                        index=self.current_tool_id,
                        function=DeltaFunctionCall(
                            arguments=cur_arguments
                        ).model_dump(exclude_none=True),
                    )
                ]
            )
            self.streamed_args_for_tool[self.current_tool_id] = cur_arguments

        # last case -- we have an update to existing arguments.
        elif cur_arguments and prev_arguments:
            if (
                isinstance(delta_text, str)
                and cur_arguments != prev_arguments
                and len(cur_arguments) > len(prev_arguments)
                and cur_arguments.startswith(prev_arguments)
            ):
                delta_arguments = cur_arguments[len(prev_arguments) :]
                logger.debug("got diff %s", delta_text)

                delta = DeltaMessage(
                    tool_calls=[
                        DeltaToolCall(
                            index=self.current_tool_id,
                            function=DeltaFunctionCall(
                                arguments=delta_arguments
                            ).model_dump(exclude_none=True),
                        )
                    ]
                )
                self.streamed_args_for_tool[self.current_tool_id] = cur_arguments
            else:
                delta = None

        # handle saving the state for the current tool into
        # the "prev" list for use in diffing for the next iteration
        if self.current_tool_id == len(self.prev_tool_call_arr) - 1:
            self.prev_tool_call_arr[self.current_tool_id] = current_tool_call
        else:
            self.prev_tool_call_arr.append(current_tool_call)

        # Handle deferred section exit after tool parsing completes
        if deferred_section_exit and self.in_tool_section:
            logger.debug("Completing deferred section exit")
            self._reset_section_state()

        return delta

    except Exception:
        logger.exception("Error trying to handle streaming tool call.")
        return None  # do not stream a delta. skip this token ID.

reset_streaming_state ΒΆ

reset_streaming_state() -> None

Reset all streaming state. Call this between requests to prevent state leakage when parser instance is reused.

Source code in vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py
def reset_streaming_state(self) -> None:
    """
    Reset all streaming state. Call this between requests to prevent
    state leakage when parser instance is reused.
    """
    # Reset section state
    self._reset_section_state()

    # Reset parent class state
    self.current_tool_name_sent = False
    self.prev_tool_call_arr = []
    self.current_tool_id = -1
    self.streamed_args_for_tool = []

    logger.debug("Streaming state reset")