Skip to content

Commit 9be9cc2

Browse files
seanzhougooglecopybara-github
authored andcommitted
feat: Support static instructions
Static instructions: Always added to system instructions for context caching Dynamic instructions: Added to system instructions when no static instruction exists (for backward compatibility), OR inserted before last batch of continuous user content when static instructions exist PiperOrigin-RevId: 809170679
1 parent f4e1fd9 commit 9be9cc2

File tree

7 files changed

+893
-23
lines changed

7 files changed

+893
-23
lines changed

‎src/google/adk/agents/llm_agent.py‎

Lines changed: 57 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,18 @@ class LlmAgent(BaseAgent):
134134
"""The config type for this agent."""
135135

136136
instruction: Union[str, InstructionProvider] = ''
137-
"""Instructions for the LLM model, guiding the agent's behavior."""
137+
"""Dynamic instructions for the LLM model, guiding the agent's behavior.
138+
139+
These instructions can contain placeholders like {variable_name} that will be
140+
resolved at runtime using session state and context.
141+
142+
**Behavior depends on static_instruction:**
143+
- If static_instruction is None: instruction goes to system_instruction
144+
- If static_instruction is set: instruction goes to user content in the request
145+
146+
This allows for context caching optimization where static content (static_instruction)
147+
comes first in the prompt, followed by dynamic content (instruction).
148+
"""
138149

139150
global_instruction: Union[str, InstructionProvider] = ''
140151
"""Instructions for all the agents in the entire agent tree.
@@ -145,6 +156,48 @@ class LlmAgent(BaseAgent):
145156
or personality.
146157
"""
147158

159+
static_instruction: Optional[types.Content] = None
160+
"""Static instruction content sent literally as system instruction at the beginning.
161+
162+
This field is for content that never changes and doesn't contain placeholders.
163+
It's sent directly to the model without any processing or variable substitution.
164+
165+
This field is primarily for context caching optimization. Static instructions
166+
are sent as system instruction at the beginning of the request, allowing
167+
for improved performance when the static portion remains unchanged. Live API
168+
has its own cache mechanism, thus this field doesn't work with Live API.
169+
170+
**Impact on instruction field:**
171+
- When static_instruction is None: instruction → system_instruction
172+
- When static_instruction is set: instruction → user content (after static content)
173+
174+
**Context Caching:**
175+
- **Implicit Cache**: Automatic caching by model providers (no config needed)
176+
- **Explicit Cache**: Cache explicitly created by user for instructions, tools and contents
177+
178+
See below for more information of Implicit Cache and Explicit Cache
179+
Gemini API: https://ai.google.dev/gemini-api/docs/caching?lang=python
180+
Vertex API: https://cloud.google.com/vertex-ai/generative-ai/docs/context-cache/context-cache-overview
181+
182+
Setting static_instruction alone does NOT enable caching automatically.
183+
For explicit caching control, configure context_cache_config at App level.
184+
185+
**Content Support:**
186+
Can contain text, files, binaries, or any combination as types.Content
187+
supports multiple part types (text, inline_data, file_data, etc.).
188+
189+
**Example:**
190+
```python
191+
static_instruction = types.Content(
192+
role='user',
193+
parts=[
194+
types.Part(text='You are a helpful assistant.'),
195+
types.Part(file_data=types.FileData(...))
196+
]
197+
)
198+
```
199+
"""
200+
148201
tools: list[ToolUnion] = Field(default_factory=list)
149202
"""Tools available to this agent."""
150203

@@ -462,9 +515,7 @@ def __maybe_save_output_to_state(self, event: Event):
462515
):
463516

464517
result = ''.join(
465-
part.text
466-
for part in event.content.parts
467-
if part.text and not part.thought
518+
[part.text if part.text else '' for part in event.content.parts]
468519
)
469520
if self.output_schema:
470521
# If the result from the final chunk is just whitespace or empty,
@@ -600,6 +651,8 @@ def _parse_config(
600651
kwargs['model'] = config.model
601652
if config.instruction:
602653
kwargs['instruction'] = config.instruction
654+
if config.static_instruction:
655+
kwargs['static_instruction'] = config.static_instruction
603656
if config.disallow_transfer_to_parent:
604657
kwargs['disallow_transfer_to_parent'] = config.disallow_transfer_to_parent
605658
if config.disallow_transfer_to_peers:

‎src/google/adk/agents/llm_agent_config.py‎

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,25 @@ class LlmAgentConfig(BaseAgentConfig):
5353
),
5454
)
5555

56-
instruction: str = Field(description='Required. LlmAgent.instruction.')
56+
instruction: str = Field(
57+
description=(
58+
'Required. LlmAgent.instruction. Dynamic instructions with'
59+
' placeholder support. Behavior: if static_instruction is None, goes'
60+
' to system_instruction; if static_instruction is set, goes to user'
61+
' content after static content.'
62+
)
63+
)
64+
65+
static_instruction: Optional[types.Content] = Field(
66+
default=None,
67+
description=(
68+
'Optional. LlmAgent.static_instruction. Static content sent literally'
69+
' at position 0 without placeholder processing. When set, changes'
70+
' instruction behavior to go to user content instead of'
71+
' system_instruction. Supports context caching and rich content'
72+
' (text, files, binaries).'
73+
),
74+
)
5775

5876
disallow_transfer_to_parent: Optional[bool] = Field(
5977
default=None,

‎src/google/adk/flows/llm_flows/contents.py‎

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,11 @@ async def run_async(
5858
agent.name,
5959
)
6060

61+
# Add dynamic instructions to the last user content if static instructions exist
62+
await _add_dynamic_instructions_to_user_content(
63+
invocation_context, llm_request
64+
)
65+
6166
# Maintain async generator behavior
6267
if False: # Ensures it behaves as a generator
6368
yield # This is a no-op but maintains generator structure
@@ -557,3 +562,49 @@ def _is_live_model_audio_event(event: Event) -> bool:
557562
if part.file_data and part.file_data.mime_type == 'audio/pcm':
558563
return True
559564
return False
565+
566+
567+
async def _add_dynamic_instructions_to_user_content(
568+
invocation_context: InvocationContext, llm_request: LlmRequest
569+
) -> None:
570+
"""Add dynamic instructions to the last user content when static instructions exist."""
571+
from ...agents.readonly_context import ReadonlyContext
572+
from ...utils import instructions_utils
573+
574+
agent = invocation_context.agent
575+
576+
dynamic_instructions = []
577+
578+
# Handle agent dynamic instructions if static instruction exists
579+
if agent.static_instruction and agent.instruction:
580+
# Static instruction exists, so add dynamic instruction to content
581+
raw_si, bypass_state_injection = await agent.canonical_instruction(
582+
ReadonlyContext(invocation_context)
583+
)
584+
si = raw_si
585+
if not bypass_state_injection:
586+
si = await instructions_utils.inject_session_state(
587+
raw_si, ReadonlyContext(invocation_context)
588+
)
589+
if si: # Only add if not empty
590+
dynamic_instructions.append(si)
591+
592+
if not dynamic_instructions:
593+
return
594+
595+
# Find the start of the last continuous batch of user content
596+
# Walk backwards to find the first non-user content, then insert before next user content
597+
insert_index = len(llm_request.contents)
598+
for i in range(len(llm_request.contents) - 1, -1, -1):
599+
if llm_request.contents[i].role != 'user':
600+
insert_index = i + 1
601+
break
602+
elif i == 0:
603+
# All content from start is user content
604+
insert_index = 0
605+
break
606+
607+
# Create new user content with dynamic instructions
608+
instruction_parts = [types.Part(text=instr) for instr in dynamic_instructions]
609+
new_content = types.Content(role='user', parts=instruction_parts)
610+
llm_request.contents.insert(insert_index, new_content)

‎src/google/adk/flows/llm_flows/instructions.py‎

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -16,16 +16,13 @@
1616

1717
from __future__ import annotations
1818

19-
import re
2019
from typing import AsyncGenerator
21-
from typing import Generator
2220
from typing import TYPE_CHECKING
2321

2422
from typing_extensions import override
2523

2624
from ...agents.readonly_context import ReadonlyContext
2725
from ...events.event import Event
28-
from ...sessions.state import State
2926
from ...utils import instructions_utils
3027
from ._base_llm_processor import BaseLlmRequestProcessor
3128

@@ -50,10 +47,8 @@ async def run_async(
5047

5148
root_agent: BaseAgent = agent.root_agent
5249

53-
# Appends global instructions if set.
54-
if (
55-
isinstance(root_agent, LlmAgent) and root_agent.global_instruction
56-
): # not empty str
50+
# Handle global instructions
51+
if isinstance(root_agent, LlmAgent) and root_agent.global_instruction:
5752
raw_si, bypass_state_injection = (
5853
await root_agent.canonical_global_instruction(
5954
ReadonlyContext(invocation_context)
@@ -66,8 +61,14 @@ async def run_async(
6661
)
6762
llm_request.append_instructions([si])
6863

69-
# Appends agent instructions if set.
70-
if agent.instruction: # not empty str
64+
# Handle static_instruction - add via append_instructions
65+
if agent.static_instruction:
66+
llm_request.append_instructions(agent.static_instruction)
67+
68+
# Handle instruction based on whether static_instruction exists
69+
if agent.instruction and not agent.static_instruction:
70+
# Only add to system instructions if no static instruction exists
71+
# If static instruction exists, content processor will handle it
7172
raw_si, bypass_state_injection = await agent.canonical_instruction(
7273
ReadonlyContext(invocation_context)
7374
)
@@ -79,8 +80,8 @@ async def run_async(
7980
llm_request.append_instructions([si])
8081

8182
# Maintain async generator behavior
82-
if False: # Ensures it behaves as a generator
83-
yield # This is a no-op but maintains generator structure
83+
return
84+
yield # This line ensures it behaves as a generator but is never reached
8485

8586

8687
request_processor = _InstructionsLlmRequestProcessor()

‎src/google/adk/models/llm_request.py‎

Lines changed: 65 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,9 @@
1414

1515
from __future__ import annotations
1616

17+
import logging
1718
from typing import Optional
19+
from typing import Union
1820

1921
from google.genai import types
2022
from pydantic import BaseModel
@@ -86,17 +88,73 @@ class LlmRequest(BaseModel):
8688
cache_metadata: Optional[CacheMetadata] = None
8789
"""Cache metadata from previous requests, used for cache management."""
8890

89-
def append_instructions(self, instructions: list[str]) -> None:
91+
def append_instructions(
92+
self, instructions: Union[list[str], types.Content]
93+
) -> None:
9094
"""Appends instructions to the system instruction.
9195
9296
Args:
93-
instructions: The instructions to append.
97+
instructions: The instructions to append. Can be:
98+
- list[str]: Strings to append/concatenate to system instruction
99+
- types.Content: Content object to append to system instruction
100+
101+
Note: Only text content is supported. Model API requires system_instruction
102+
to be a string. Non-text parts in Content will be handled differently.
103+
104+
Behavior:
105+
- list[str]: concatenates with existing system_instruction using \\n\\n
106+
- types.Content: extracts text from parts and concatenates
94107
"""
95108

96-
if self.config.system_instruction:
97-
self.config.system_instruction += '\n\n' + '\n\n'.join(instructions)
98-
else:
99-
self.config.system_instruction = '\n\n'.join(instructions)
109+
# Handle Content object - extract only text parts
110+
if isinstance(instructions, types.Content):
111+
# TODO: Handle non-text contents in instruction by putting non-text parts
112+
# into llm_request.contents and adding a reference in the system instruction
113+
# that references the contents.
114+
115+
# Extract text from all text parts
116+
text_parts = [part.text for part in instructions.parts if part.text]
117+
118+
if not text_parts:
119+
return # No text content to append
120+
121+
new_text = "\n\n".join(text_parts)
122+
if not self.config.system_instruction:
123+
self.config.system_instruction = new_text
124+
elif isinstance(self.config.system_instruction, str):
125+
self.config.system_instruction += "\n\n" + new_text
126+
else:
127+
# Log warning for unsupported system_instruction types
128+
logging.warning(
129+
"Cannot append to system_instruction of unsupported type: %s. "
130+
"Only string system_instruction is supported.",
131+
type(self.config.system_instruction),
132+
)
133+
return
134+
135+
# Handle list of strings
136+
if isinstance(instructions, list) and all(
137+
isinstance(inst, str) for inst in instructions
138+
):
139+
if not instructions: # Handle empty list
140+
return
141+
142+
new_text = "\n\n".join(instructions)
143+
if not self.config.system_instruction:
144+
self.config.system_instruction = new_text
145+
elif isinstance(self.config.system_instruction, str):
146+
self.config.system_instruction += "\n\n" + new_text
147+
else:
148+
# Log warning for unsupported system_instruction types
149+
logging.warning(
150+
"Cannot append to system_instruction of unsupported type: %s. "
151+
"Only string system_instruction is supported.",
152+
type(self.config.system_instruction),
153+
)
154+
return
155+
156+
# Invalid input
157+
raise TypeError("instructions must be list[str] or types.Content")
100158

101159
def append_tools(self, tools: list[BaseTool]) -> None:
102160
"""Appends tools to the request.
@@ -138,4 +196,4 @@ def set_output_schema(self, base_model: type[BaseModel]) -> None:
138196
"""
139197

140198
self.config.response_schema = base_model
141-
self.config.response_mime_type = 'application/json'
199+
self.config.response_mime_type = "application/json"

0 commit comments

Comments
 (0)