'use client';
import * as React from 'react';
import { Plate, usePlateEditor } from 'platejs/react';
import { EditorKit } from '@/components/editor/editor-kit';
import { Editor, EditorContainer } from '@/components/ui/editor';
import { DEMO_VALUES } from './values/demo-values';
export default function Demo({ id }: { id: string }) {
const editor = usePlateEditor({
plugins: EditorKit,
value: DEMO_VALUES[id],
});
return (
<Plate editor={editor}>
<EditorContainer variant="demo">
<Editor />
</EditorContainer>
</Plate>
);
}
Features
- Context-aware command menu that adapts to cursor, text selection, and block selection workflows.
- Streaming Markdown/MDX insertion with table, column, and code block support powered by
streamInsertChunk. - Insert and chat review modes with localized insert previews plus undo-safe batching via
withAIBatchandtf.ai.undo(). - Block selection aware transforms to replace or append entire sections using
tf.aiChat.replaceSelectionandtf.aiChat.insertBelow. - Direct integration with
@ai-sdk/reactsoapi.aiChat.submitcan stream responses from Vercel AI SDK helpers. - Suggestion and comment utilities that diff AI edits, accept/reject changes, and map AI feedback back to document ranges.
Kit Usage
Installation
The fastest way to add AI functionality is with the AIKit. It ships the configured AIPlugin, AIChatPlugin, Markdown streaming helpers, cursor overlay, and their Plate UI components.
'use client';
import { useEffect, useRef } from 'react';
import cloneDeep from 'lodash/cloneDeep.js';
import { BaseAIPlugin, withAIBatch } from '@platejs/ai';
import {
AIChatPlugin,
AIPlugin,
applyAISuggestions,
getInsertPreviewStart,
resetStreamInsertChunk,
streamInsertChunk,
useChatChunk,
} from '@platejs/ai/react';
import { ElementApi, getPluginType, KEYS, PathApi } from 'platejs';
import { usePluginOption } from 'platejs/react';
import { AILoadingBar, AIMenu } from '@/components/ui/ai-menu';
import { AIAnchorElement, AILeaf } from '@/components/ui/ai-node';
import { createAIStreamBatcher } from '@/lib/ai-stream-batching';
import { useChat } from '../use-chat';
import { CursorOverlayKit } from './cursor-overlay-kit';
import { MarkdownKit } from './markdown-kit';
export const aiChatPlugin = AIChatPlugin.extend({
options: {
chatOptions: {
api: '/api/ai/command',
body: {},
},
},
render: {
afterContainer: AILoadingBar,
afterEditable: AIMenu,
node: AIAnchorElement,
},
shortcuts: { show: { keys: 'mod+j' } },
useHooks: ({ editor, getOption }) => {
useChat();
const mode = usePluginOption(AIChatPlugin, 'mode');
const toolName = usePluginOption(AIChatPlugin, 'toolName');
const insertStreamBatcherRef = useRef<ReturnType<
typeof createAIStreamBatcher
> | null>(null);
if (!insertStreamBatcherRef.current) {
insertStreamBatcherRef.current = createAIStreamBatcher({
applyChunk: (chunk) => {
editor.tf.withoutSaving(() => {
if (!getOption('streaming')) return;
editor.tf.withScrolling(() => {
streamInsertChunk(editor, chunk, {
textProps: {
[getPluginType(editor, KEYS.ai)]: true,
},
});
});
});
},
});
}
useEffect(
() => () => {
insertStreamBatcherRef.current?.reset();
},
[]
);
useChatChunk({
onChunk: ({ chunk, isFirst, nodes, text: content }) => {
if (isFirst && mode === 'insert') {
const { startBlock, startInEmptyParagraph } =
getInsertPreviewStart(editor);
editor.getTransforms(BaseAIPlugin).ai.beginPreview({
originalBlocks:
startInEmptyParagraph &&
startBlock &&
ElementApi.isElement(startBlock)
? [cloneDeep(startBlock)]
: [],
});
editor.tf.withoutSaving(() => {
editor.tf.insertNodes(
{
children: [{ text: '' }],
type: getPluginType(editor, KEYS.aiChat),
},
{
at: PathApi.next(editor.selection!.focus.path.slice(0, 1)),
}
);
});
editor.setOption(AIChatPlugin, 'streaming', true);
}
if (mode === 'insert' && nodes.length > 0) {
insertStreamBatcherRef.current?.queue({ chunk, isFirst });
}
if (toolName === 'edit' && mode === 'chat') {
withAIBatch(
editor,
() => {
applyAISuggestions(editor, content);
},
{
split: isFirst,
}
);
}
},
onFinish: () => {
insertStreamBatcherRef.current?.flush();
editor.setOption(AIChatPlugin, 'streaming', false);
resetStreamInsertChunk(editor);
insertStreamBatcherRef.current?.reset();
},
});
},
});
export const AIKit = [
...CursorOverlayKit,
...MarkdownKit,
AIPlugin.withComponent(AILeaf),
aiChatPlugin,
];AIMenu: Floating command surface for prompts, tool shortcuts, and chat review.AILoadingBar: Displays streaming status at the editor container.AIAnchorElement: Invisible anchor node used to position the floating menu during streaming.AILeaf: Renders AI-marked text with subtle styling.
Add Kit
import { createPlateEditor } from 'platejs/react';
import { AIKit } from '@/components/editor/plugins/ai-kit';
const editor = createPlateEditor({
plugins: [
// ...otherPlugins,
...AIKit,
],
});Add API Route
Expose a streaming command endpoint that proxies your model provider:
import type {
ChatMessage,
ToolName,
} from '@/components/editor/use-chat';
import type { NextRequest } from 'next/server';
import { createGateway } from '@ai-sdk/gateway';
import {
type LanguageModel,
type UIMessageStreamWriter,
createUIMessageStream,
createUIMessageStreamResponse,
generateText,
Output,
streamText,
tool,
} from 'ai';
import { NextResponse } from 'next/server';
import { type SlateEditor, createSlateEditor, nanoid } from 'platejs';
import { z } from 'zod';
import { BaseEditorKit } from '@/components/editor/editor-base-kit';
import { markdownJoinerTransform } from '@/lib/markdown-joiner-transform';
import {
buildEditTableMultiCellPrompt,
getChooseToolPrompt,
getCommentPrompt,
getEditPrompt,
getGeneratePrompt,
} from './prompt';
export async function POST(req: NextRequest) {
const { apiKey: key, ctx, messages: messagesRaw, model } = await req.json();
const { children, selection, toolName: toolNameParam } = ctx;
const editor = createSlateEditor({
plugins: BaseEditorKit,
selection,
value: children,
});
const apiKey = key || process.env.AI_GATEWAY_API_KEY;
if (!apiKey) {
return NextResponse.json(
{ error: 'Missing AI Gateway API key.' },
{ status: 401 }
);
}
const isSelecting = editor.api.isExpanded();
const gatewayProvider = createGateway({
apiKey,
});
try {
const stream = createUIMessageStream<ChatMessage>({
execute: async ({ writer }) => {
let toolName = toolNameParam;
if (!toolName) {
const prompt = getChooseToolPrompt({
isSelecting,
messages: messagesRaw,
});
const enumOptions = isSelecting
? ['generate', 'edit', 'comment']
: ['generate', 'comment'];
const modelId = model || 'google/gemini-2.5-flash';
const { output: AIToolName } = await generateText({
model: gatewayProvider(modelId),
output: Output.choice({ options: enumOptions }),
prompt,
});
writer.write({
data: AIToolName as ToolName,
type: 'data-toolName',
});
toolName = AIToolName;
}
const stream = streamText({
experimental_transform: markdownJoinerTransform(),
model: gatewayProvider(model || 'openai/gpt-4o-mini'),
// Not used
prompt: '',
tools: {
comment: getCommentTool(editor, {
messagesRaw,
model: gatewayProvider(model || 'google/gemini-2.5-flash'),
writer,
}),
table: getTableTool(editor, {
messagesRaw,
model: gatewayProvider(model || 'google/gemini-2.5-flash'),
writer,
}),
},
prepareStep: async (step) => {
if (toolName === 'comment') {
return {
...step,
toolChoice: { toolName: 'comment', type: 'tool' },
};
}
if (toolName === 'edit') {
const [editPrompt, editType] = getEditPrompt(editor, {
isSelecting,
messages: messagesRaw,
});
// Table editing uses the table tool
if (editType === 'table') {
return {
...step,
toolChoice: { toolName: 'table', type: 'tool' },
};
}
return {
...step,
activeTools: [],
model:
editType === 'selection'
? //The selection task is more challenging, so we chose to use Gemini 2.5 Flash.
gatewayProvider(model || 'google/gemini-2.5-flash')
: gatewayProvider(model || 'openai/gpt-4o-mini'),
messages: [
{
content: editPrompt,
role: 'user',
},
],
};
}
if (toolName === 'generate') {
const generatePrompt = getGeneratePrompt(editor, {
isSelecting,
messages: messagesRaw,
});
return {
...step,
activeTools: [],
messages: [
{
content: generatePrompt,
role: 'user',
},
],
model: gatewayProvider(model || 'openai/gpt-4o-mini'),
};
}
},
});
writer.merge(stream.toUIMessageStream({ sendFinish: false }));
},
});
return createUIMessageStreamResponse({ stream });
} catch {
return NextResponse.json(
{ error: 'Failed to process AI request' },
{ status: 500 }
);
}
}
const getCommentTool = (
editor: SlateEditor,
{
messagesRaw,
model,
writer,
}: {
messagesRaw: ChatMessage[];
model: LanguageModel;
writer: UIMessageStreamWriter<ChatMessage>;
}
) =>
tool({
description: 'Comment on the content',
inputSchema: z.object({}),
strict: true,
execute: async () => {
const commentSchema = z.object({
blockId: z
.string()
.describe(
'The id of the starting block. If the comment spans multiple blocks, use the id of the first block.'
),
comment: z
.string()
.describe('A brief comment or explanation for this fragment.'),
content: z
.string()
.describe(
String.raw`The original document fragment to be commented on.It can be the entire block, a small part within a block, or span multiple blocks. If spanning multiple blocks, separate them with two \n\n.`
),
});
const { partialOutputStream } = streamText({
model,
output: Output.array({ element: commentSchema }),
prompt: getCommentPrompt(editor, {
messages: messagesRaw,
}),
});
let lastLength = 0;
for await (const partialArray of partialOutputStream) {
for (let i = lastLength; i < partialArray.length; i++) {
const comment = partialArray[i];
const commentDataId = nanoid();
writer.write({
id: commentDataId,
data: {
comment,
status: 'streaming',
},
type: 'data-comment',
});
}
lastLength = partialArray.length;
}
writer.write({
id: nanoid(),
data: {
comment: null,
status: 'finished',
},
type: 'data-comment',
});
},
});
const getTableTool = (
editor: SlateEditor,
{
messagesRaw,
model,
writer,
}: {
messagesRaw: ChatMessage[];
model: LanguageModel;
writer: UIMessageStreamWriter<ChatMessage>;
}
) =>
tool({
description: 'Edit table cells',
inputSchema: z.object({}),
strict: true,
execute: async () => {
const cellUpdateSchema = z.object({
content: z
.string()
.describe(
String.raw`The new content for the cell. Can contain multiple paragraphs separated by \n\n.`
),
id: z.string().describe('The id of the table cell to update.'),
});
const { partialOutputStream } = streamText({
model,
output: Output.array({ element: cellUpdateSchema }),
prompt: buildEditTableMultiCellPrompt(editor, messagesRaw),
});
let lastLength = 0;
for await (const partialArray of partialOutputStream) {
for (let i = lastLength; i < partialArray.length; i++) {
const cellUpdate = partialArray[i];
writer.write({
id: nanoid(),
data: {
cellUpdate,
status: 'streaming',
},
type: 'data-table',
});
}
lastLength = partialArray.length;
}
writer.write({
id: nanoid(),
data: {
cellUpdate: null,
status: 'finished',
},
type: 'data-table',
});
},
});Configure Environment
Set your AI Gateway key locally (replace with your provider secret if you are not using a gateway):
AI_GATEWAY_API_KEY="your-api-key"Manual Usage
Installation
pnpm add @platejs/ai @platejs/markdown @platejs/selection @ai-sdk/react ai
@platejs/suggestion is optional but required for diff-based edit suggestions.
Add Plugins
import { createPlateEditor } from 'platejs/react';
import { AIChatPlugin, AIPlugin } from '@platejs/ai/react';
import { BlockSelectionPlugin } from '@platejs/selection/react';
import { MarkdownPlugin } from '@platejs/markdown';
export const editor = createPlateEditor({
plugins: [
BlockSelectionPlugin,
MarkdownPlugin,
AIPlugin,
AIChatPlugin, // extended in the next step
],
});BlockSelectionPlugin: Enables multi-block selections thatAIChatPluginrelies on for insert/replace transforms.MarkdownPlugin: Provides Markdown serialization used by streaming utilities.AIPlugin: Adds the AI mark and transforms for undoing AI batches.AIChatPlugin: Supplies the AI combobox, API helpers, and transforms.
Use AIPlugin.withComponent with your own element (or AILeaf) to highlight AI-generated text.
Configure AIChatPlugin
Extend AIChatPlugin to hook streaming and edits. The example mirrors the core logic from AIKit while keeping the UI headless.
import cloneDeep from 'lodash/cloneDeep';
import { BaseAIPlugin, withAIBatch } from '@platejs/ai';
import {
AIChatPlugin,
applyAISuggestions,
getInsertPreviewStart,
resetStreamInsertChunk,
streamInsertChunk,
useChatChunk,
} from '@platejs/ai/react';
import { ElementApi, getPluginType, KEYS, PathApi } from 'platejs';
import { usePluginOption } from 'platejs/react';
export const aiChatPlugin = AIChatPlugin.extend({
options: {
chatOptions: {
api: '/api/ai/command',
body: {
model: 'openai/gpt-4o-mini',
},
},
trigger: ' ',
triggerPreviousCharPattern: /^\s?$/,
},
useHooks: ({ editor, getOption }) => {
const mode = usePluginOption(AIChatPlugin, 'mode');
const toolName = usePluginOption(AIChatPlugin, 'toolName');
useChatChunk({
onChunk: ({ chunk, isFirst, text }) => {
if (isFirst && mode === 'insert') {
const { startBlock, startInEmptyParagraph } =
getInsertPreviewStart(editor);
editor.getTransforms(BaseAIPlugin).ai.beginPreview({
originalBlocks:
startInEmptyParagraph &&
startBlock &&
ElementApi.isElement(startBlock)
? [cloneDeep(startBlock)]
: [],
});
editor.setOption(AIChatPlugin, 'streaming', true);
editor.tf.withoutSaving(() => {
editor.tf.insertNodes(
{
children: [{ text: '' }],
type: getPluginType(editor, KEYS.aiChat),
},
{
at: PathApi.next(editor.selection!.focus.path.slice(0, 1)),
}
);
});
}
if (mode === 'insert') {
editor.tf.withoutSaving(() => {
if (!getOption('streaming')) return;
editor.tf.withScrolling(() => {
streamInsertChunk(editor, chunk, {
textProps: {
[getPluginType(editor, KEYS.ai)]: true,
},
});
});
});
}
if (toolName === 'edit' && mode === 'chat') {
withAIBatch(
editor,
() => {
applyAISuggestions(editor, text);
},
{ split: isFirst }
);
}
},
onFinish: () => {
editor.setOption(AIChatPlugin, 'streaming', false);
resetStreamInsertChunk(editor);
},
});
},
});useChatChunk: WatchesUseChatHelpersstatus and yields incremental chunks.tf.ai.beginPreview: Captures the rollback slice and selection for insert-mode preview before the first streamed chunk is written.streamInsertChunk: Streams Markdown/MDX into the document, reusing the existing block when possible.applyAISuggestions: Converts responses into transient suggestion nodes whentoolName === 'edit'.withAIBatch: Marks saved AI batches so suggestion review and accepted AI changes stay undo-safe.
Provide your own render components (toolbar button, floating menu, etc.) when you extend the plugin.
Build API Route
Handle api.aiChat.submit requests on the server. Each request includes the chat messages from @ai-sdk/react and a ctx payload that contains the editor children, current selection, and last toolName.
Complete API example
import { createGateway } from '@ai-sdk/gateway';
import { convertToCoreMessages, streamText } from 'ai';
import { createSlateEditor } from 'platejs';
import { BaseEditorKit } from '@/registry/components/editor/editor-base-kit';
import { markdownJoinerTransform } from '@/registry/lib/markdown-joiner-transform';
export async function POST(req: Request) {
const { apiKey, ctx, messages, model } = await req.json();
const editor = createSlateEditor({
plugins: BaseEditorKit,
selection: ctx.selection,
value: ctx.children,
});
const gateway = createGateway({
apiKey: apiKey ?? process.env.AI_GATEWAY_API_KEY!,
});
const result = streamText({
experimental_transform: markdownJoinerTransform(),
messages: convertToCoreMessages(messages),
model: gateway(model ?? 'openai/gpt-4o-mini'),
system: ctx.toolName === 'edit' ? 'You are an editor that rewrites user text.' : undefined,
});
return result.toDataStreamResponse();
}ctx.childrenandctx.selectionare rehydrated into a Slate editor so you can build rich prompts (see Prompt Templates).- Forward provider settings (model, apiKey, temperature, gateway flags, etc.) through
chatOptions.body; everything you add is passed verbatim in the JSON payload and can be read before callingcreateGateway. - Always read secrets from the server. The client should only send opaque identifiers or short-lived tokens.
- Return a streaming response so
useChatanduseChatChunkcan process tokens incrementally.
Connect useChat
Bridge the editor and your model endpoint with @ai-sdk/react. Store helpers on the plugin so transforms can reload, stop, or show chat state.
import { useEffect, useMemo } from 'react';
import { type UIMessage } from 'ai';
import { type UseChatHelpers, useChat } from '@ai-sdk/react';
import {
AIChatPlugin,
createAIChatTextStreamTransport,
withAIChatTextStream,
} from '@platejs/ai/react';
import { useEditorPlugin } from 'platejs/react';
type ChatMessage = UIMessage<{}, { toolName: 'comment' | 'edit' | 'generate'; comment?: unknown }>;
export const useEditorAIChat = () => {
const { editor, setOption } = useEditorPlugin(AIChatPlugin);
const transport = useMemo(
() =>
createAIChatTextStreamTransport<ChatMessage>({
api: '/api/ai/command',
chatId: 'editor',
}),
[]
);
const baseChat = useChat<ChatMessage>({
id: 'editor',
transport,
onData(data) {
if (data.type === 'data-toolName') {
editor.setOption(AIChatPlugin, 'toolName', data.data);
}
},
});
const chat = withAIChatTextStream(baseChat, transport);
useEffect(() => {
setOption('chat', chat as UseChatHelpers<ChatMessage>);
}, [chat.__plateTextStreamChannelId, chat.error, chat.messages, chat.status, setOption]);
return chat;
};Combine the helper with useEditorChat to keep the floating menu anchored correctly:
import { useEditorChat } from '@platejs/ai/react';
useEditorChat({
onOpenChange: (open) => {
if (!open) chat.stop?.();
},
});Now you can submit prompts programmatically:
import { AIChatPlugin } from '@platejs/ai/react';
editor.getApi(AIChatPlugin).aiChat.submit('', {
prompt: {
default: 'Continue the document after {block}',
selecting: 'Rewrite {selection} with a clearer tone',
},
toolName: 'generate',
});Prompt Templates
Client Prompting
api.aiChat.submitaccepts anEditorPrompt. Provide a string, an object withdefault/selecting/blockSelecting, or a function that receives{ editor, isSelecting, isBlockSelecting }. The helpergetEditorPromptin the client turns that value into the final string.- Combine it with
replacePlaceholders(editor, template, { prompt })to expand{editor},{block},{blockSelection}, and{prompt}using Markdown generated by@platejs/ai.
import { replacePlaceholders } from '@platejs/ai';
editor.getApi(AIChatPlugin).aiChat.submit('Improve tone', {
prompt: ({ isSelecting }) =>
isSelecting
? replacePlaceholders(editor, 'Rewrite {blockSelection} using a friendly tone.')
: replacePlaceholders(editor, 'Continue {block} with two more sentences.'),
toolName: 'generate',
});Server Prompting
The demo backend in apps/www/src/app/api/ai/command reconstructs the editor from ctx and builds structured prompts:
getChooseToolPromptdecides whether the request isgenerate,edit, orcomment.getGeneratePrompt,getEditPrompt, andgetCommentPrompttransform the current editor state into instructions tailored to each mode.- Utility helpers like
getMarkdown,getMarkdownWithSelection, andbuildStructuredPrompt(seeapps/www/src/app/api/ai/command/prompts.ts) make it easy to embed block ids, selections, and MDX tags into the LLM request.
Augment the payload you send from the client to fine-tune server prompts:
editor.setOption(aiChatPlugin, 'chatOptions', {
api: '/api/ai/command',
body: {
model: 'openai/gpt-4o-mini',
tone: 'playful',
temperature: 0.4,
},
});Everything under chatOptions.body arrives in the route handler, letting you swap providers, pass user-specific metadata, or branch into different prompt templates.
createAIChatTextStreamTransport: forwards rawtext-start/text-delta/text-endevents from the AI SDK transport.withAIChatTextStream: annotates the returned chat helpers souseChatChunkcan subscribe to the transport's instance-local text stream instead of diffing the accumulated assistant message.
Keyboard Shortcuts
| Key | Description |
|---|---|
| Space | Open the AI menu in an empty block (cursor mode) |
| Cmd + J | Show the AI menu (set via shortcuts.show) |
| Escape | Hide the AI menu and stop streaming |
Streaming
The streaming utilities keep complex layouts intact while responses arrive:
streamInsertChunk(editor, chunk, options)deserializes Markdown chunks, updates the current block in place, and appends new blocks as needed. UsetextProps/elementPropsto tag streamed nodes (e.g., mark AI text).streamDeserializeMdandstreamDeserializeInlineMdprovide lower-level access if you need to control streaming for custom node types.streamSerializeMdmirrors the editor state so you can detect drift between streamed content and the response buffer.resetStreamInsertChunk(editor)clears the active insert-stream session when streaming finishes or when you intentionally restart the stream from scratch.
Streaming Example
'use client';
import {
Component,
type ErrorInfo,
type ReactNode,
startTransition,
useEffect,
useMemo,
useRef,
useState,
} from 'react';
import {
AIChatPlugin,
resetStreamInsertChunk,
streamInsertChunk,
} from '@platejs/ai/react';
import { getPluginType, KEYS } from 'platejs';
import { Plate, usePlateEditor } from 'platejs/react';
import { Button } from '@/components/ui/button';
import {
Dialog,
DialogContent,
DialogDescription,
DialogFooter,
DialogHeader,
DialogTitle,
} from '@/components/ui/dialog';
import { Input } from '@/components/ui/input';
import { cn } from '@/lib/utils';
import { EditorKit } from '@/components/editor/editor-kit';
import { models } from '@/components/editor/settings-dialog';
import { CopilotKit } from '@/components/editor/plugins/copilot-kit';
import {
appendMarkdownStreamingChunks,
createMarkdownStreamingChunkAccumulator,
flushMarkdownStreamingChunks,
type MarkdownStreamingChunk,
type MarkdownStreamingChunkAccumulator,
transformMarkdownStreamingChunks,
} from '@/lib/markdown-streaming-chunks';
import {
DEFAULT_MARKDOWN_STREAMING_DEMO_SCENARIO_ID,
DEFAULT_PLAYBACK_BURST_SIZE,
DEFAULT_PLAYBACK_DELAY_IN_MS,
type MarkdownStreamingDemoScenarioId,
getNextPlaybackIndex,
getPlaybackDelayInMs,
markdownStreamingDemoScenarios,
playbackBurstSizeOptions,
playbackDelayOptions,
} from '@/lib/markdown-streaming-demo-data';
import {
MARKDOWN_STREAMING_DEMO_GATEWAY_KEY_STORAGE_KEY,
normalizeMarkdownStreamingDemoGatewayApiKey,
shouldPromptForMarkdownStreamingDemoGatewayKey,
} from '@/lib/markdown-streaming-demo-ai';
import { Editor, EditorContainer } from '@/components/ui/editor';
const TRAILING_LINEBREAK_REGEX = /(\n+)$/;
const COPY_FEEDBACK_DURATION_MS = 1800;
const DEFAULT_AI_MODEL = 'openai/gpt-4.1-mini';
const DEFAULT_AI_PROMPT =
'Write a markdown article about streaming markdown editors with headings, bullet lists, a table, one blockquote, one link, and one fenced code block.';
const HOSTED_AI_KEY_DIALOG_INTENTS = {
generate: 'generate',
save: 'save',
} as const;
type HostedAiKeyDialogIntent =
(typeof HOSTED_AI_KEY_DIALOG_INTENTS)[keyof typeof HOSTED_AI_KEY_DIALOG_INTENTS];
const EMPTY_EDITOR_VALUE = [
{
children: [{ text: '' }],
type: 'p',
},
] as const;
type SourceMode = 'preset' | 'ai' | 'pasted';
type AiStreamStatus = 'idle' | 'loading' | 'streaming' | 'done' | 'error';
type TChunk = MarkdownStreamingChunk;
type AppliedStreamingState = {
appliedCount: number;
sourceIdentity: string;
streamedChunks: string[];
};
type TChunks = {
chunks: {
index: number;
text: string;
}[];
linebreaks: number;
};
type EditorRenderBoundaryProps = {
children: ReactNode;
currentChunkLabel: string;
onReset: () => void;
resetKey: string;
};
type EditorRenderBoundaryState = {
errorMessage: string | null;
};
class EditorRenderBoundary extends Component<
EditorRenderBoundaryProps,
EditorRenderBoundaryState
> {
state: EditorRenderBoundaryState = {
errorMessage: null,
};
static getDerivedStateFromError(error: unknown): EditorRenderBoundaryState {
return {
errorMessage:
error instanceof Error ? error.message : 'Unknown editor render error.',
};
}
componentDidCatch(error: unknown, info: ErrorInfo) {
console.error('Registry markdown streaming demo crashed.', error, info);
}
componentDidUpdate(previousProps: EditorRenderBoundaryProps) {
if (
this.state.errorMessage &&
previousProps.resetKey !== this.props.resetKey
) {
this.setState({ errorMessage: null });
}
}
render() {
if (this.state.errorMessage) {
return (
<div className="flex h-full items-center justify-center rounded-xl border border-red-200 bg-red-50 p-6">
<div className="space-y-3 text-red-900 text-sm">
<strong className="block text-base">Editor output crashed</strong>
<p>{this.state.errorMessage}</p>
<p>Current chunk: {this.props.currentChunkLabel}</p>
<Button
type="button"
variant="outline"
onClick={this.props.onReset}
>
Reset editor pane
</Button>
</div>
</div>
);
}
return this.props.children;
}
}
function serializeChunksForClipboard(chunks: readonly string[]) {
return JSON.stringify(chunks, null, 2);
}
function parseSerializedChunks(source: string) {
const parsed = JSON.parse(source) as unknown;
if (
!Array.isArray(parsed) ||
parsed.some((chunk) => typeof chunk !== 'string')
) {
throw new Error('Chunks must be a JSON array of strings.');
}
return [...parsed];
}
function cloneEditorValue() {
return JSON.parse(JSON.stringify(EMPTY_EDITOR_VALUE));
}
function resetStreamingState(editor: any) {
editor.setOption(AIChatPlugin, 'streaming', false);
resetStreamInsertChunk(editor);
if (editor.selection) {
editor.tf.deselect();
}
editor.tf.setValue(cloneEditorValue());
}
function applyChunk(editor: any, chunk: string) {
streamInsertChunk(editor, chunk, {
textProps: {
[getPluginType(editor, KEYS.ai)]: true,
},
});
}
function replayChunks(editor: any, chunks: string[], count: number) {
resetStreamingState(editor);
const chunkBatch = chunks.slice(0, count).join('');
if (chunkBatch.length > 0) {
applyChunk(editor, chunkBatch);
}
}
function isChunkPrefix(
previousChunks: readonly string[],
nextChunks: readonly string[]
) {
if (previousChunks.length > nextChunks.length) {
return false;
}
return previousChunks.every((chunk, index) => chunk === nextChunks[index]);
}
function splitChunksByLinebreak(chunks: readonly string[]) {
const result: TChunks[] = [];
let current: { index: number; text: string }[] = [];
for (let index = 0; index < chunks.length; index += 1) {
const chunk = chunks[index];
current.push({ index, text: chunk });
const match = TRAILING_LINEBREAK_REGEX.exec(chunk);
if (match) {
result.push({
chunks: [...current],
linebreaks: match[1].length,
});
current = [];
}
}
if (current.length > 0) {
result.push({
chunks: [...current],
linebreaks: 0,
});
}
return result;
}
function transformChunks(chunks: readonly string[]): TChunk[] {
return transformMarkdownStreamingChunks(chunks);
}
function encodeEditorTree(editorChildren: unknown) {
return JSON.stringify(editorChildren, null, 2);
}
function InfoCard({
children,
description,
title,
}: {
children: ReactNode;
description: ReactNode;
title: string;
}) {
return (
<section className="flex h-[700px] min-h-0 flex-col rounded-3xl border border-slate-200 bg-white p-6 shadow-sm">
<div className="mb-4 space-y-1">
<h2 className="font-semibold text-2xl text-slate-900">{title}</h2>
<p className="text-slate-500 text-sm">{description}</p>
</div>
<div className="min-h-0 flex-1">{children}</div>
</section>
);
}
function Tokens({
activeIndex,
chunkClick,
chunks,
}: {
activeIndex: number;
chunks: TChunks[];
chunkClick?: (index: number) => void;
}) {
return (
<div className="h-full overflow-y-auto rounded-2xl bg-slate-100 p-4 font-mono">
{chunks.map((chunk, index) => (
<div key={index} className="py-1">
{chunk.chunks.map((c, chunkIndex) => {
const lineBreak = c.text.replaceAll('\n', '⤶');
const space = lineBreak.replaceAll(' ', '␣');
return (
<span
key={chunkIndex}
role="button"
className={cn(
'mx-1 inline-block rounded border p-1 text-xs transition',
activeIndex && c.index < activeIndex
? 'bg-amber-400'
: 'bg-white hover:bg-slate-50'
)}
onClick={() => chunkClick?.(c.index + 1)}
>
{space}
</span>
);
})}
</div>
))}
</div>
);
}
function MarkdownStreamingDemoGatewayKeyDialog({
apiKeyDraft,
error,
onApiKeyDraftChange,
onOpenChange,
onSubmit,
open,
submitLabel,
}: {
apiKeyDraft: string;
error: string | null;
onApiKeyDraftChange: (nextApiKey: string) => void;
onOpenChange: (open: boolean) => void;
onSubmit: () => void;
open: boolean;
submitLabel: string;
}) {
return (
<Dialog open={open} onOpenChange={onOpenChange}>
<DialogContent>
<DialogHeader>
<DialogTitle className="text-xl">Add AI Gateway API key</DialogTitle>
<DialogDescription>
This hosted demo does not ship with a server-side key. Add an AI
Gateway API key to stream a live markdown response from this
browser.
</DialogDescription>
</DialogHeader>
<form
className="space-y-4"
onSubmit={(event) => {
event.preventDefault();
onSubmit();
}}
>
<div className="space-y-2">
<label
className="block font-medium text-slate-800 text-sm"
htmlFor="markdown-streaming-demo-gateway-key"
>
AI Gateway API key
</label>
<Input
id="markdown-streaming-demo-gateway-key"
value={apiKeyDraft}
onChange={(event) => onApiKeyDraftChange(event.target.value)}
placeholder="vck_..."
autoComplete="off"
data-1p-ignore
type="password"
/>
</div>
<p
className={cn('text-sm', error ? 'text-red-600' : 'text-slate-500')}
>
{error
? error
: 'The key is stored in this browser tab only and is sent with demo requests.'}
</p>
<DialogFooter>
<Button
type="button"
variant="outline"
onClick={() => onOpenChange(false)}
>
Cancel
</Button>
<Button type="submit">{submitLabel}</Button>
</DialogFooter>
</form>
</DialogContent>
</Dialog>
);
}
export default function MarkdownStreamingDemo() {
const [selectedScenario, setSelectedScenario] =
useState<MarkdownStreamingDemoScenarioId>(
DEFAULT_MARKDOWN_STREAMING_DEMO_SCENARIO_ID
);
const [sourceMode, setSourceMode] = useState<SourceMode>('preset');
const [activeIndex, setActiveIndex] = useState(0);
const [isPlaying, setIsPlaying] = useState(false);
const [playbackBurstSize, setPlaybackBurstSize] = useState(
DEFAULT_PLAYBACK_BURST_SIZE
);
const [playbackDelayInMs, setPlaybackDelayInMs] = useState(
DEFAULT_PLAYBACK_DELAY_IN_MS
);
const [treeJson, setTreeJson] = useState(
encodeEditorTree(EMPTY_EDITOR_VALUE)
);
const [aiPrompt, setAiPrompt] = useState(DEFAULT_AI_PROMPT);
const [selectedModel, setSelectedModel] = useState(DEFAULT_AI_MODEL);
const [aiRawChunks, setAiRawChunks] = useState<string[]>([]);
const [aiTransformedChunks, setAiTransformedChunks] = useState<TChunk[]>([]);
const [aiError, setAiError] = useState<string | null>(null);
const [aiStatus, setAiStatus] = useState<AiStreamStatus>('idle');
const [aiProvider, setAiProvider] = useState<string | null>(null);
const [hostedGatewayApiKey, setHostedGatewayApiKey] = useState('');
const [hostedGatewayApiKeyDraft, setHostedGatewayApiKeyDraft] = useState('');
const [hostedGatewayApiKeyDialogError, setHostedGatewayApiKeyDialogError] =
useState<string | null>(null);
const [hostedGatewayApiKeyDialogIntent, setHostedGatewayApiKeyDialogIntent] =
useState<HostedAiKeyDialogIntent>(HOSTED_AI_KEY_DIALOG_INTENTS.save);
const [isHostedGatewayApiKeyDialogOpen, setIsHostedGatewayApiKeyDialogOpen] =
useState(false);
const [requiresHostedGatewayApiKey, setRequiresHostedGatewayApiKey] =
useState(false);
const [copyChunksStatus, setCopyChunksStatus] = useState<
'idle' | 'copied' | 'error'
>('idle');
const [pasteChunksStatus, setPasteChunksStatus] = useState<
'idle' | 'loaded' | 'error'
>('idle');
const [pastedChunks, setPastedChunks] = useState<string[]>([]);
const abortControllerRef = useRef<AbortController | null>(null);
const aiChunkAccumulatorRef = useRef<MarkdownStreamingChunkAccumulator>(
createMarkdownStreamingChunkAccumulator()
);
const aiFlushFrameRef = useRef<number | null>(null);
const aiPendingRawChunksRef = useRef<string[]>([]);
const aiTransformedChunkCountRef = useRef(0);
const copyStatusTimerRef = useRef<number | null>(null);
const pasteStatusTimerRef = useRef<number | null>(null);
const appliedStreamingStateRef = useRef<AppliedStreamingState>({
appliedCount: 0,
sourceIdentity: 'preset:columns',
streamedChunks: [],
});
const editor = usePlateEditor(
{
plugins: [...CopilotKit, ...EditorKit],
value: cloneEditorValue(),
},
[]
);
const selectedScenarioDefinition =
markdownStreamingDemoScenarios[selectedScenario];
const scenarioEntries = Object.entries(markdownStreamingDemoScenarios) as [
MarkdownStreamingDemoScenarioId,
(typeof markdownStreamingDemoScenarios)[MarkdownStreamingDemoScenarioId],
][];
const currentScenarioChunks = selectedScenarioDefinition.chunks;
const rawChunks = useMemo(() => {
if (sourceMode === 'ai') {
return aiRawChunks;
}
if (sourceMode === 'pasted') {
return pastedChunks;
}
return currentScenarioChunks;
}, [aiRawChunks, currentScenarioChunks, pastedChunks, sourceMode]);
const transformedCurrentChunks = useMemo(
() =>
sourceMode === 'ai' ? aiTransformedChunks : transformChunks(rawChunks),
[aiTransformedChunks, rawChunks, sourceMode]
);
const currentMarkdown = useMemo(
() =>
transformedCurrentChunks
.slice(0, activeIndex)
.map((item) => item.chunk)
.join(''),
[activeIndex, transformedCurrentChunks]
);
const rawActiveIndex = useMemo(() => {
if (activeIndex === 0) {
return 0;
}
const lastAppliedChunk =
transformedCurrentChunks[
Math.min(activeIndex - 1, transformedCurrentChunks.length - 1)
];
return lastAppliedChunk ? lastAppliedChunk.rawEndIndex + 1 : 0;
}, [activeIndex, transformedCurrentChunks]);
const rawChunksByLine = useMemo(
() => splitChunksByLinebreak(rawChunks),
[rawChunks]
);
const currentSourceLabel =
sourceMode === 'ai'
? aiStatus === 'idle'
? 'AI prompt'
: 'Live AI stream'
: sourceMode === 'pasted'
? 'Pasted chunks'
: selectedScenarioDefinition.label;
const sourceIdentity =
sourceMode === 'preset' ? `preset:${selectedScenario}` : sourceMode;
const currentChunkLabel =
activeIndex === 0 ? 'before first chunk' : `#${activeIndex}`;
const editorBoundaryResetKey = `${sourceIdentity}:${activeIndex}`;
const hasHostedGatewayApiKey = hostedGatewayApiKey.length > 0;
function openHostedGatewayApiKeyDialog(intent: HostedAiKeyDialogIntent) {
setHostedGatewayApiKeyDialogIntent(intent);
setHostedGatewayApiKeyDialogError(null);
setHostedGatewayApiKeyDraft(hostedGatewayApiKey);
setIsHostedGatewayApiKeyDialogOpen(true);
}
async function saveHostedGatewayApiKey() {
const gatewayApiKey = normalizeMarkdownStreamingDemoGatewayApiKey(
hostedGatewayApiKeyDraft
);
if (!gatewayApiKey) {
setHostedGatewayApiKeyDialogError('AI Gateway API key is required.');
return;
}
try {
window.sessionStorage.setItem(
MARKDOWN_STREAMING_DEMO_GATEWAY_KEY_STORAGE_KEY,
gatewayApiKey
);
} catch {}
setHostedGatewayApiKey(gatewayApiKey);
setHostedGatewayApiKeyDraft(gatewayApiKey);
setHostedGatewayApiKeyDialogError(null);
setIsHostedGatewayApiKeyDialogOpen(false);
if (
hostedGatewayApiKeyDialogIntent === HOSTED_AI_KEY_DIALOG_INTENTS.generate
) {
await handleGenerateAiStream({ gatewayApiKey });
}
}
function handleGenerateAiButtonClick() {
if (requiresHostedGatewayApiKey && !hasHostedGatewayApiKey) {
openHostedGatewayApiKeyDialog(HOSTED_AI_KEY_DIALOG_INTENTS.generate);
return;
}
void handleGenerateAiStream({ gatewayApiKey: hostedGatewayApiKey });
}
function stopAiStream() {
abortControllerRef.current?.abort();
abortControllerRef.current = null;
}
function clearAiChunkFlushFrame() {
if (aiFlushFrameRef.current != null) {
window.cancelAnimationFrame(aiFlushFrameRef.current);
aiFlushFrameRef.current = null;
}
}
function resetAiStreamingChunks() {
clearAiChunkFlushFrame();
aiChunkAccumulatorRef.current = createMarkdownStreamingChunkAccumulator();
aiPendingRawChunksRef.current = [];
aiTransformedChunkCountRef.current = 0;
setAiRawChunks([]);
setAiTransformedChunks([]);
}
function flushAiChunkQueue(options?: { final?: boolean }) {
clearAiChunkFlushFrame();
const pendingRawChunks = aiPendingRawChunksRef.current;
aiPendingRawChunksRef.current = [];
const nextTransformedChunks = appendMarkdownStreamingChunks(
aiChunkAccumulatorRef.current,
pendingRawChunks
);
if (options?.final) {
nextTransformedChunks.push(
...flushMarkdownStreamingChunks(aiChunkAccumulatorRef.current)
);
}
if (pendingRawChunks.length === 0 && nextTransformedChunks.length === 0) {
return;
}
const nextTransformedChunkCount =
aiTransformedChunkCountRef.current + nextTransformedChunks.length;
aiTransformedChunkCountRef.current = nextTransformedChunkCount;
startTransition(() => {
if (pendingRawChunks.length > 0) {
setAiRawChunks((previous) => [...previous, ...pendingRawChunks]);
}
if (nextTransformedChunks.length > 0) {
setAiTransformedChunks((previous) => [
...previous,
...nextTransformedChunks,
]);
setActiveIndex(nextTransformedChunkCount);
}
});
}
function scheduleAiChunkFlush() {
if (aiFlushFrameRef.current != null) {
return;
}
aiFlushFrameRef.current = window.requestAnimationFrame(() => {
aiFlushFrameRef.current = null;
flushAiChunkQueue();
});
}
function enqueueAiRawChunk(chunk: string) {
aiPendingRawChunksRef.current.push(chunk);
scheduleAiChunkFlush();
}
useEffect(() => {
const shouldPromptForGatewayKey =
shouldPromptForMarkdownStreamingDemoGatewayKey(window.location.hostname);
if (!shouldPromptForGatewayKey) {
return;
}
setRequiresHostedGatewayApiKey(true);
try {
const storedGatewayApiKey = normalizeMarkdownStreamingDemoGatewayApiKey(
window.sessionStorage.getItem(
MARKDOWN_STREAMING_DEMO_GATEWAY_KEY_STORAGE_KEY
) ?? undefined
);
if (!storedGatewayApiKey) {
return;
}
setHostedGatewayApiKey(storedGatewayApiKey);
setHostedGatewayApiKeyDraft(storedGatewayApiKey);
} catch {}
}, []);
function hasQueuedOrAppliedAiChunks() {
return (
aiPendingRawChunksRef.current.length > 0 ||
aiChunkAccumulatorRef.current.rawChunkCount > 0
);
}
async function handleCopyChunks() {
try {
await navigator.clipboard.writeText(
serializeChunksForClipboard(rawChunks)
);
setCopyChunksStatus('copied');
} catch {
setCopyChunksStatus('error');
}
}
async function handlePasteChunks() {
let initialValue = '';
try {
initialValue = await navigator.clipboard.readText();
} catch {}
// biome-ignore lint/suspicious/noAlert: dev-only chunk import helper for local debugging.
const pastedValue = window.prompt(
'Paste the chunk array exported from Copy chunks.',
initialValue
);
if (pastedValue == null) {
return;
}
try {
const parsedChunks = parseSerializedChunks(pastedValue);
stopAiStream();
resetAiStreamingChunks();
setSourceMode('pasted');
setPastedChunks(parsedChunks);
setAiStatus('idle');
setAiError(null);
setAiProvider(null);
setIsPlaying(false);
setActiveIndex(transformChunks(parsedChunks).length);
setPasteChunksStatus('loaded');
} catch (error) {
setPasteChunksStatus('error');
setAiError(
error instanceof Error
? error.message
: 'Failed to parse pasted chunks.'
);
}
}
function switchToPresetMode(nextScenario?: MarkdownStreamingDemoScenarioId) {
stopAiStream();
resetAiStreamingChunks();
setSourceMode('preset');
setPastedChunks([]);
setAiStatus('idle');
setAiError(null);
setAiProvider(null);
setIsPlaying(false);
setActiveIndex(0);
if (nextScenario) {
setSelectedScenario(nextScenario);
}
}
async function handleGenerateAiStream(options?: { gatewayApiKey?: string }) {
const prompt = aiPrompt.trim();
if (!prompt) {
setAiError('Prompt is required.');
return;
}
const gatewayApiKey = normalizeMarkdownStreamingDemoGatewayApiKey(
options?.gatewayApiKey
);
stopAiStream();
const controller = new AbortController();
abortControllerRef.current = controller;
resetAiStreamingChunks();
setSourceMode('ai');
setAiError(null);
setAiStatus('loading');
setAiProvider(null);
setIsPlaying(false);
setActiveIndex(0);
resetStreamingState(editor);
setTreeJson(encodeEditorTree(EMPTY_EDITOR_VALUE));
try {
const response = await fetch('/api/dev/markdown-stream', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
gatewayApiKey,
model: selectedModel,
prompt,
}),
signal: controller.signal,
});
if (!response.ok) {
let message = `AI request failed with status ${response.status}.`;
try {
const payload = (await response.json()) as { error?: string };
if (payload.error) {
message = payload.error;
}
} catch {}
throw new Error(message);
}
if (!response.body) {
throw new Error('AI response did not include a stream body.');
}
setAiStatus('streaming');
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = '';
while (true) {
const { done, value } = await reader.read();
if (done) {
break;
}
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop() ?? '';
for (const rawLine of lines) {
const line = rawLine.trim();
if (!line) {
continue;
}
const event = JSON.parse(line) as {
chunk?: string;
error?: string;
provider?: string;
type: 'chunk' | 'done' | 'error';
};
if (event.type === 'chunk' && typeof event.chunk === 'string') {
enqueueAiRawChunk(event.chunk);
}
if (typeof event.provider === 'string') {
setAiProvider(event.provider);
}
if (event.type === 'error') {
throw new Error(event.error ?? 'Unknown AI streaming error.');
}
if (event.type === 'done') {
setAiStatus('done');
}
}
}
if (buffer.trim()) {
const event = JSON.parse(buffer.trim()) as {
chunk?: string;
error?: string;
provider?: string;
type: 'chunk' | 'done' | 'error';
};
if (event.type === 'chunk' && typeof event.chunk === 'string') {
enqueueAiRawChunk(event.chunk);
}
if (typeof event.provider === 'string') {
setAiProvider(event.provider);
}
if (event.type === 'error') {
throw new Error(event.error ?? 'Unknown AI streaming error.');
}
}
flushAiChunkQueue({ final: true });
setAiStatus((previous) =>
previous === 'error' || previous === 'idle' ? previous : 'done'
);
} catch (error) {
if (controller.signal.aborted) {
flushAiChunkQueue({ final: true });
setAiStatus((previous) =>
previous === 'done'
? previous
: hasQueuedOrAppliedAiChunks()
? 'done'
: 'idle'
);
return;
}
setAiStatus('error');
setAiError(error instanceof Error ? error.message : 'Unknown AI error.');
} finally {
if (abortControllerRef.current === controller) {
abortControllerRef.current = null;
}
}
}
useEffect(
() => () => {
stopAiStream();
clearAiChunkFlushFrame();
if (copyStatusTimerRef.current != null) {
window.clearTimeout(copyStatusTimerRef.current);
}
if (pasteStatusTimerRef.current != null) {
window.clearTimeout(pasteStatusTimerRef.current);
}
},
[]
);
useEffect(() => {
if (copyChunksStatus === 'idle') return;
if (copyStatusTimerRef.current != null) {
window.clearTimeout(copyStatusTimerRef.current);
}
copyStatusTimerRef.current = window.setTimeout(() => {
setCopyChunksStatus('idle');
copyStatusTimerRef.current = null;
}, COPY_FEEDBACK_DURATION_MS);
return () => {
if (copyStatusTimerRef.current != null) {
window.clearTimeout(copyStatusTimerRef.current);
}
};
}, [copyChunksStatus]);
useEffect(() => {
if (pasteChunksStatus === 'idle') return;
if (pasteStatusTimerRef.current != null) {
window.clearTimeout(pasteStatusTimerRef.current);
}
pasteStatusTimerRef.current = window.setTimeout(() => {
setPasteChunksStatus('idle');
pasteStatusTimerRef.current = null;
}, COPY_FEEDBACK_DURATION_MS);
return () => {
if (pasteStatusTimerRef.current != null) {
window.clearTimeout(pasteStatusTimerRef.current);
}
};
}, [pasteChunksStatus]);
useEffect(() => {
const chunks = transformedCurrentChunks.map((item) => item.chunk);
const appliedState = appliedStreamingStateRef.current;
const needsReplay =
appliedState.sourceIdentity !== sourceIdentity ||
activeIndex < appliedState.appliedCount ||
!isChunkPrefix(appliedState.streamedChunks, chunks);
if (activeIndex === 0) {
if (
appliedState.appliedCount !== 0 ||
appliedState.sourceIdentity !== sourceIdentity
) {
resetStreamingState(editor);
appliedStreamingStateRef.current = {
appliedCount: 0,
sourceIdentity,
streamedChunks: [],
};
}
setTreeJson(encodeEditorTree(editor.children));
return;
}
if (needsReplay) {
replayChunks(editor, chunks, activeIndex);
appliedStreamingStateRef.current = {
appliedCount: activeIndex,
sourceIdentity,
streamedChunks: chunks.slice(0, activeIndex),
};
setTreeJson(encodeEditorTree(editor.children));
return;
}
if (activeIndex > appliedState.appliedCount) {
const chunkBatch = chunks
.slice(appliedState.appliedCount, activeIndex)
.join('');
if (chunkBatch.length > 0) {
applyChunk(editor, chunkBatch);
}
appliedStreamingStateRef.current = {
appliedCount: activeIndex,
sourceIdentity,
streamedChunks: chunks.slice(0, activeIndex),
};
}
setTreeJson(encodeEditorTree(editor.children));
}, [activeIndex, editor, sourceIdentity, transformedCurrentChunks]);
useEffect(() => {
if (!isPlaying) return;
if (activeIndex >= transformedCurrentChunks.length) {
setIsPlaying(false);
return;
}
const timer = window.setTimeout(
() => {
setActiveIndex((previous) =>
getNextPlaybackIndex(
previous,
transformedCurrentChunks.length,
playbackBurstSize
)
);
},
getPlaybackDelayInMs(
playbackDelayInMs,
transformedCurrentChunks[activeIndex]?.delayInMs ?? 0
)
);
return () => window.clearTimeout(timer);
}, [
activeIndex,
isPlaying,
playbackBurstSize,
playbackDelayInMs,
transformedCurrentChunks,
]);
return (
<>
<section className="h-full overflow-y-auto p-8 md:p-12">
<div className="space-y-6">
<section className="rounded-3xl border border-slate-200 bg-white p-6 shadow-sm">
<div className="flex flex-col gap-6 xl:flex-row xl:items-end xl:justify-between">
<div className="space-y-4">
<div>
<p className="font-semibold text-slate-500 text-xs uppercase tracking-[0.2em]">
Plate / Blocks
</p>
<h1 className="mt-2 font-semibold text-4xl text-slate-950">
Markdown streaming demo
</h1>
<p className="mt-3 max-w-3xl text-slate-600 text-sm">
This registry example now includes the same richer debugging
workflow from the local dev demo, while still using
Plate's real
<code className="mx-1 rounded bg-slate-100 px-1.5 py-0.5">
streamInsertChunk
</code>
path only.
</p>
</div>
<div className="grid gap-3 sm:grid-cols-2 xl:grid-cols-6">
<div className="rounded-2xl bg-slate-50 px-4 py-3">
<span className="block text-slate-500 text-xs uppercase tracking-wide">
Scenario
</span>
<strong className="mt-1 block text-slate-900">
{selectedScenarioDefinition.label}
</strong>
</div>
<div className="rounded-2xl bg-slate-50 px-4 py-3">
<span className="block text-slate-500 text-xs uppercase tracking-wide">
Source
</span>
<strong className="mt-1 block text-slate-900">
{currentSourceLabel}
</strong>
</div>
<div className="rounded-2xl bg-slate-50 px-4 py-3">
<span className="block text-slate-500 text-xs uppercase tracking-wide">
Progress
</span>
<strong className="mt-1 block text-slate-900">
{activeIndex}/{transformedCurrentChunks.length}
</strong>
</div>
<div className="rounded-2xl bg-slate-50 px-4 py-3">
<span className="block text-slate-500 text-xs uppercase tracking-wide">
Adapter
</span>
<strong className="mt-1 block text-slate-900">
Plate streamInsertChunk
</strong>
</div>
<div className="rounded-2xl bg-slate-50 px-4 py-3">
<span className="block text-slate-500 text-xs uppercase tracking-wide">
Preset Delay
</span>
<strong className="mt-1 block text-slate-900">
{playbackDelayInMs} ms
</strong>
</div>
<div className="rounded-2xl bg-slate-50 px-4 py-3">
<span className="block text-slate-500 text-xs uppercase tracking-wide">
Burst Size
</span>
<strong className="mt-1 block text-slate-900">
{playbackBurstSize}
</strong>
</div>
</div>
</div>
<div className="flex flex-wrap gap-3">
<label className="flex min-w-72 flex-col gap-2 text-slate-600 text-sm">
<span className="font-medium text-slate-800">Scenario</span>
<select
className="rounded-2xl border border-slate-200 bg-white px-4 py-3 text-slate-900 shadow-sm"
value={selectedScenario}
onChange={(event) => {
switchToPresetMode(
event.target.value as MarkdownStreamingDemoScenarioId
);
}}
>
{scenarioEntries.map(([scenarioId, scenario]) => (
<option key={scenarioId} value={scenarioId}>
{scenario.label}
</option>
))}
</select>
</label>
<label className="flex min-w-48 flex-col gap-2 text-slate-600 text-sm">
<span className="font-medium text-slate-800">
Preset delay
</span>
<select
className="rounded-2xl border border-slate-200 bg-white px-4 py-3 text-slate-900 shadow-sm"
value={playbackDelayInMs}
onChange={(event) => {
setPlaybackDelayInMs(Number(event.target.value));
}}
>
{playbackDelayOptions.map((option) => (
<option key={option.value} value={option.value}>
{option.label}
</option>
))}
</select>
</label>
<label className="flex min-w-48 flex-col gap-2 text-slate-600 text-sm">
<span className="font-medium text-slate-800">Burst size</span>
<select
className="rounded-2xl border border-slate-200 bg-white px-4 py-3 text-slate-900 shadow-sm"
value={playbackBurstSize}
onChange={(event) => {
setPlaybackBurstSize(Number(event.target.value));
}}
>
{playbackBurstSizeOptions.map((option) => (
<option key={option.value} value={option.value}>
{option.label}
</option>
))}
</select>
</label>
<div className="flex flex-wrap items-end gap-3">
<Button
type="button"
variant="outline"
onClick={() => {
setIsPlaying(false);
setActiveIndex(0);
}}
>
Reset
</Button>
<Button
type="button"
variant="outline"
disabled={activeIndex === 0}
onClick={() => {
setIsPlaying(false);
setActiveIndex((previous) => Math.max(0, previous - 1));
}}
>
Prev
</Button>
<Button
type="button"
variant="outline"
disabled={activeIndex >= transformedCurrentChunks.length}
onClick={() => {
setIsPlaying(false);
setActiveIndex((previous) =>
Math.min(transformedCurrentChunks.length, previous + 1)
);
}}
>
Next
</Button>
<Button
type="button"
onClick={() => setIsPlaying((previous) => !previous)}
>
{isPlaying ? 'Pause' : 'Play'}
</Button>
<Button
type="button"
variant="outline"
onClick={() => {
setIsPlaying(false);
setActiveIndex(transformedCurrentChunks.length);
}}
>
Jump to end
</Button>
</div>
</div>
</div>
</section>
<section className="rounded-3xl border border-slate-200 bg-white p-6 shadow-sm">
<div className="space-y-1">
<h2 className="font-semibold text-2xl text-slate-900">
AI Prompt
</h2>
<p className="text-slate-500 text-sm">
Stream a real AI markdown response into the registry example to
catch chunk-boundary bugs that preset cases might miss.
</p>
</div>
<div className="mt-5 grid gap-4 xl:grid-cols-[minmax(0,2fr)_minmax(260px,1fr)]">
<label className="flex min-h-0 flex-col gap-2 text-slate-600 text-sm">
<span className="font-medium text-slate-800">Prompt</span>
<textarea
className="min-h-40 rounded-2xl border border-slate-200 bg-white px-4 py-3 text-slate-900 shadow-sm"
value={aiPrompt}
onChange={(event) => setAiPrompt(event.target.value)}
placeholder="Describe the markdown response you want to stream..."
/>
</label>
<label className="flex min-h-0 flex-col gap-2 text-slate-600 text-sm">
<span className="font-medium text-slate-800">Model</span>
<select
className="rounded-2xl border border-slate-200 bg-white px-4 py-3 text-slate-900 shadow-sm"
value={selectedModel}
onChange={(event) => setSelectedModel(event.target.value)}
>
{models.map((model) => (
<option key={model.value} value={model.value}>
{model.label}
</option>
))}
</select>
</label>
</div>
<div className="mt-5 flex flex-wrap items-center justify-between gap-4">
<div className="flex flex-wrap gap-3">
<Button
type="button"
disabled={aiStatus === 'loading' || aiStatus === 'streaming'}
onClick={handleGenerateAiButtonClick}
>
Generate with AI
</Button>
{requiresHostedGatewayApiKey ? (
<Button
type="button"
variant="outline"
disabled={
aiStatus === 'loading' || aiStatus === 'streaming'
}
onClick={() =>
openHostedGatewayApiKeyDialog(
HOSTED_AI_KEY_DIALOG_INTENTS.save
)
}
>
{hasHostedGatewayApiKey ? 'Change key' : 'Set key'}
</Button>
) : null}
<Button
type="button"
variant="outline"
disabled={aiStatus !== 'loading' && aiStatus !== 'streaming'}
onClick={() => stopAiStream()}
>
Stop AI
</Button>
<Button
type="button"
variant="outline"
disabled={sourceMode === 'preset'}
onClick={() => switchToPresetMode()}
>
Use preset scenario
</Button>
</div>
<div className="flex flex-wrap gap-x-4 gap-y-2 text-slate-600 text-sm">
<span>Status: {aiStatus}</span>
<span>Model: {selectedModel}</span>
<span>Provider path: {aiProvider ?? 'pending'}</span>
<span>Raw AI chunks: {aiRawChunks.length}</span>
<span>Joined chunks: {transformedCurrentChunks.length}</span>
</div>
</div>
<p
className={cn(
'mt-4 text-sm',
aiError ? 'text-red-600' : 'text-slate-500'
)}
>
{aiError
? aiError
: requiresHostedGatewayApiKey
? hasHostedGatewayApiKey
? 'Hosted demo uses the AI Gateway API key stored in this browser tab for live AI streaming.'
: 'Hosted demo does not ship with a server-side key. Add an AI Gateway API key to test live AI streaming.'
: 'Prefer AI_GATEWAY_API_KEY if you want to switch across OpenAI, Google, Anthropic, and other providers. If that is missing, the demo falls back to OPENAI_API_KEY for OpenAI models only.'}
</p>
</section>
<div className="grid gap-6 xl:grid-cols-2">
<InfoCard
title="Chunks"
description={
<>
These are the original chunks before the local MarkdownJoiner
pass. Joined chunks: {transformedCurrentChunks.length}
</>
}
>
<div className="flex h-full min-h-0 flex-col">
<div className="mb-4 flex flex-wrap justify-end gap-3">
<Button
type="button"
variant="outline"
disabled={rawChunks.length === 0}
onClick={() => {
void handleCopyChunks();
}}
>
{copyChunksStatus === 'copied'
? 'Copied'
: copyChunksStatus === 'error'
? 'Copy failed'
: 'Copy raw chunks'}
</Button>
<Button
type="button"
variant="outline"
onClick={() => {
void handlePasteChunks();
}}
>
{pasteChunksStatus === 'loaded'
? 'Loaded'
: pasteChunksStatus === 'error'
? 'Paste failed'
: 'Paste raw chunks'}
</Button>
</div>
<div className="min-h-0 flex-1">
<Tokens
activeIndex={rawActiveIndex}
chunks={rawChunksByLine}
chunkClick={(index) => {
const joinedIndex = transformedCurrentChunks.findIndex(
(chunk) => chunk.rawEndIndex >= index - 1
);
setIsPlaying(false);
setActiveIndex(
joinedIndex === -1
? transformedCurrentChunks.length
: joinedIndex + 1
);
}}
/>
</div>
</div>
</InfoCard>
<InfoCard
title="Editor output"
description="Real Plate streamInsertChunk running against the current joined chunks."
>
<EditorRenderBoundary
currentChunkLabel={currentChunkLabel}
onReset={() => {
setIsPlaying(false);
resetStreamingState(editor);
appliedStreamingStateRef.current = {
appliedCount: 0,
sourceIdentity,
streamedChunks: [],
};
setTreeJson(encodeEditorTree(editor.children));
}}
resetKey={editorBoundaryResetKey}
>
<Plate editor={editor}>
<EditorContainer className="h-full overflow-y-auto rounded-2xl border border-slate-200 bg-white">
<Editor
variant="demo"
className="min-h-full pb-[20vh]"
placeholder="Streaming output will appear here..."
spellCheck={false}
/>
</EditorContainer>
</Plate>
</EditorRenderBoundary>
</InfoCard>
<InfoCard
title="Raw markdown"
description="The currently streamed prefix concatenated as markdown text."
>
<textarea
className="h-full w-full resize-none rounded-2xl border border-slate-200 bg-white p-4 font-mono text-slate-900 text-sm shadow-sm"
readOnly
value={currentMarkdown}
/>
</InfoCard>
<InfoCard
title="Editor tree"
description="The raw Slate tree after the current streaming step."
>
<pre className="h-full overflow-auto whitespace-pre-wrap break-words rounded-2xl bg-slate-900 p-4 font-mono text-slate-100 text-sm">
{treeJson}
</pre>
</InfoCard>
</div>
</div>
</section>
<MarkdownStreamingDemoGatewayKeyDialog
apiKeyDraft={hostedGatewayApiKeyDraft}
error={hostedGatewayApiKeyDialogError}
onApiKeyDraftChange={setHostedGatewayApiKeyDraft}
onOpenChange={setIsHostedGatewayApiKeyDialogOpen}
onSubmit={() => {
void saveHostedGatewayApiKey();
}}
open={isHostedGatewayApiKeyDialogOpen}
submitLabel={
hostedGatewayApiKeyDialogIntent ===
HOSTED_AI_KEY_DIALOG_INTENTS.generate
? 'Save and generate'
: 'Save key'
}
/>
</>
);
}
Plate Plus
Combobox menu with free-form prompt input
- Additional trigger methods:
- Block menu button
- Slash command menu
- Beautifully crafted UI
Hooks
useAIChatEditor
Registers an auxiliary editor for chat previews and deserializes Markdown with block-level memoization.
import { usePlateEditor } from 'platejs/react';
import { MarkdownPlugin } from '@platejs/markdown';
import { AIChatPlugin, useAIChatEditor } from '@platejs/ai/react';
const aiPreviewEditor = usePlateEditor({
plugins: [MarkdownPlugin, AIChatPlugin],
});
useAIChatEditor(aiPreviewEditor, responseMarkdown, {
parser: { exclude: ['space'] },
});useEditorChat
Connects UseChatHelpers to editor state so the AI menu knows whether to anchor to cursor, selection, or block selection.
useChatChunk
Streams chat responses chunk-by-chunk and gives you full control over insertion.
Utilities
withAIBatch
Groups editor operations into a single history batch and flags it as AI-generated so tf.ai.undo() removes it safely.
applyAISuggestions
Diffs AI output against stored chatNodes and writes transient suggestion nodes. Requires @platejs/suggestion.
Complementary helpers allow you to finalize or discard the diff:
acceptAISuggestions(editor): Converts transient suggestion nodes into permanent suggestions.rejectAISuggestions(editor): Removes transient suggestion nodes and clears suggestion marks.
aiCommentToRange
Maps streamed comment metadata back to document ranges so comments can be inserted automatically.
findTextRangeInBlock
Fuzzy-search helper that uses LCS to find the closest match inside a block.
getEditorPrompt
Generates prompts that respect cursor, selection, or block selection states.
replacePlaceholders
Replaces placeholders like {editor}, {blockSelection}, and {prompt} with serialized Markdown.
Plugins
AIPlugin
Adds an ai mark to streamed text and exposes transforms to remove AI nodes or undo the last AI batch. Use .withComponent to render AI-marked text with a custom component.
AIChatPlugin
Main plugin that powers the AI menu, chat state, and transforms.
' './^\s?$/.false to cancel opening in specific contexts.useChat so API calls can access them.'insert'.false.false.API
api.aiChat.submit(input, options?)
Submits a prompt to your model provider. When mode is omitted it defaults to 'insert' for a collapsed cursor and 'chat' otherwise.
api.aiChat.reset(options?)
Clears chat state, removes AI nodes, and optionally undoes the last AI batch.
api.aiChat.node(options?)
Retrieves the first AI node that matches the specified criteria.
api.aiChat.reload()
Replays the last prompt using the stored UseChatHelpers, restoring the original selection or block selection before resubmitting.
api.aiChat.stop()
Stops streaming and calls chat.stop.
api.aiChat.show()
Opens the AI menu, clears previous chat messages, and resets tool state.
api.aiChat.hide(options?)
Closes the AI menu, optionally undoing the last AI batch and refocusing the editor.
Transforms
tf.aiChat.accept()
Accepts the latest response. In insert mode it removes AI marks and places the caret at the end of the streamed content. In chat mode it applies the pending suggestions.
tf.aiChat.insertBelow(sourceEditor, options?)
Inserts the chat preview (sourceEditor) below the current selection or block selection.
tf.aiChat.replaceSelection(sourceEditor, options?)
Replaces the current selection or block selection with the chat preview.
tf.aiChat.removeAnchor(options?)
Removes the temporary anchor node used to position the AI menu.
tf.ai.insertNodes(nodes, options?)
Inserts nodes tagged with the AI mark at the current selection (or options.target).
tf.ai.removeMarks(options?)
Clears the AI mark from matching nodes.
tf.ai.removeNodes(options?)
Removes text nodes that are marked as AI-generated.
tf.ai.beginPreview(options?)
Captures the rollback slice and selection for insert-mode AI preview. Call it once before writing the first unsaved preview chunk.
tf.ai.acceptPreview()
Commits the active preview as one fresh undoable batch, strips preview-only markers, and clears preview bookkeeping.
tf.ai.cancelPreview()
Restores the rollback point for the active preview and clears preview bookkeeping.
tf.ai.discardPreview()
Clears preview bookkeeping without restoring content. Use it when the previewed content should stay in place.
tf.ai.hasPreview()
Reports whether an insert-mode preview rollback point is currently active.
tf.ai.undo()
Undoes the latest AI history entry when it was created by withAIBatch. If an insert-mode preview is active, it cancels that preview first instead of replaying every streamed chunk. In both cases it avoids re-applying AI output from redo.
Customization
Adding Custom AI Commands
'use client';
import * as React from 'react';
import {
AIChatPlugin,
AIPlugin,
useEditorChat,
useLastAssistantMessage,
} from '@platejs/ai/react';
import { getTransientCommentKey } from '@platejs/comment';
import { BlockSelectionPlugin, useIsSelecting } from '@platejs/selection/react';
import { getTransientSuggestionKey } from '@platejs/suggestion';
import { Command as CommandPrimitive } from 'cmdk';
import {
Album,
BadgeHelp,
BookOpenCheck,
Check,
CornerUpLeft,
FeatherIcon,
ListEnd,
ListMinus,
ListPlus,
Loader2Icon,
PauseIcon,
PenLine,
SmileIcon,
Wand,
X,
} from 'lucide-react';
import {
type NodeEntry,
type SlateEditor,
isHotkey,
KEYS,
NodeApi,
TextApi,
} from 'platejs';
import {
useEditorPlugin,
useFocusedLast,
useHotkeys,
usePluginOption,
} from 'platejs/react';
import { type PlateEditor, useEditorRef } from 'platejs/react';
import { Button } from '@/components/ui/button';
import {
Command,
CommandGroup,
CommandItem,
CommandList,
} from '@/components/ui/command';
import {
Popover,
PopoverAnchor,
PopoverContent,
} from '@/components/ui/popover';
import { cn } from '@/lib/utils';
import { commentPlugin } from '@/components/editor/plugins/comment-kit';
import { AIChatEditor } from './ai-chat-editor';
export function AIMenu() {
const { api, editor } = useEditorPlugin(AIChatPlugin);
const mode = usePluginOption(AIChatPlugin, 'mode');
const toolName = usePluginOption(AIChatPlugin, 'toolName');
const streaming = usePluginOption(AIChatPlugin, 'streaming');
const isSelecting = useIsSelecting();
const isFocusedLast = useFocusedLast();
const open = usePluginOption(AIChatPlugin, 'open') && isFocusedLast;
const [value, setValue] = React.useState('');
const [input, setInput] = React.useState('');
const chat = usePluginOption(AIChatPlugin, 'chat');
const { messages, status } = chat;
const [anchorElement, setAnchorElement] = React.useState<HTMLElement | null>(
null
);
const content = useLastAssistantMessage()?.parts.find(
(part) => part.type === 'text'
)?.text;
React.useEffect(() => {
if (!streaming) return;
const timeoutId = setTimeout(() => {
const anchorEntry = api.aiChat.node({ anchor: true });
if (!anchorEntry) return;
const anchorDom = editor.api.toDOMNode(anchorEntry[0])!;
setAnchorElement(anchorDom);
}, 0);
return () => {
clearTimeout(timeoutId);
};
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [streaming]);
const setOpen = (open: boolean) => {
if (open) {
api.aiChat.show();
} else {
api.aiChat.hide();
}
};
const show = (anchorElement: HTMLElement) => {
setAnchorElement(anchorElement);
setOpen(true);
};
useEditorChat({
onOpenBlockSelection: (blocks: NodeEntry[]) => {
show(editor.api.toDOMNode(blocks.at(-1)![0])!);
},
onOpenChange: (open) => {
if (!open) {
setAnchorElement(null);
setInput('');
}
},
onOpenCursor: () => {
const [ancestor] = editor.api.block({ highest: true })!;
if (!editor.api.isAt({ end: true }) && !editor.api.isEmpty(ancestor)) {
editor
.getApi(BlockSelectionPlugin)
.blockSelection.set(ancestor.id as string);
}
show(editor.api.toDOMNode(ancestor)!);
},
onOpenSelection: () => {
show(editor.api.toDOMNode(editor.api.blocks().at(-1)![0])!);
},
});
useHotkeys('esc', () => {
api.aiChat.stop();
// remove when you implement the route /api/ai/command
(chat as any)._abortFakeStream();
});
const isLoading = status === 'streaming' || status === 'submitted';
React.useEffect(() => {
if (toolName === 'edit' && mode === 'chat' && !isLoading) {
let anchorNode = editor.api.node({
at: [],
reverse: true,
match: (n) => !!n[KEYS.suggestion] && !!n[getTransientSuggestionKey()],
});
if (!anchorNode) {
anchorNode = editor
.getApi(BlockSelectionPlugin)
.blockSelection.getNodes({ selectionFallback: true, sort: true })
.at(-1);
}
if (!anchorNode) return;
const block = editor.api.block({ at: anchorNode[1] });
setAnchorElement(editor.api.toDOMNode(block![0]!)!);
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [isLoading]);
if (isLoading && mode === 'insert') return null;
if (toolName === 'comment') return null;
if (toolName === 'edit' && mode === 'chat' && isLoading) return null;
return (
<Popover open={open} onOpenChange={setOpen} modal={false}>
<PopoverAnchor virtualRef={{ current: anchorElement! }} />
<PopoverContent
className="border-none bg-transparent p-0 shadow-none"
style={{
width: anchorElement?.offsetWidth,
}}
onEscapeKeyDown={(e) => {
e.preventDefault();
api.aiChat.hide();
}}
align="center"
side="bottom"
>
<Command
className="w-full rounded-lg border shadow-md"
value={value}
onValueChange={setValue}
>
{mode === 'chat' &&
isSelecting &&
content &&
toolName === 'generate' && <AIChatEditor content={content} />}
{isLoading ? (
<div className="flex grow select-none items-center gap-2 p-2 text-muted-foreground text-sm">
<Loader2Icon className="size-4 animate-spin" />
{messages.length > 1 ? 'Editing...' : 'Thinking...'}
</div>
) : (
<CommandPrimitive.Input
className={cn(
'flex h-9 w-full min-w-0 border-input bg-transparent px-3 py-1 text-base outline-none transition-[color,box-shadow] placeholder:text-muted-foreground md:text-sm dark:bg-input/30',
'aria-invalid:border-destructive aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40',
'border-b focus-visible:ring-transparent'
)}
value={input}
onKeyDown={(e) => {
if (isHotkey('backspace')(e) && input.length === 0) {
e.preventDefault();
api.aiChat.hide();
}
if (isHotkey('enter')(e) && !e.shiftKey && !value) {
e.preventDefault();
void api.aiChat.submit(input);
setInput('');
}
}}
onValueChange={setInput}
placeholder="Ask AI anything..."
data-plate-focus
autoFocus
/>
)}
{!isLoading && (
<CommandList>
<AIMenuItems
input={input}
setInput={setInput}
setValue={setValue}
/>
</CommandList>
)}
</Command>
</PopoverContent>
</Popover>
);
}
type EditorChatState =
| 'cursorCommand'
| 'cursorSuggestion'
| 'selectionCommand'
| 'selectionSuggestion';
const AICommentIcon = () => (
<svg
fill="none"
height="24"
stroke="currentColor"
strokeLinecap="round"
strokeLinejoin="round"
strokeWidth="2"
viewBox="0 0 24 24"
width="24"
xmlns="http://www.w3.org/2000/svg"
>
<path d="M0 0h24v24H0z" fill="none" stroke="none" />
<path d="M8 9h8" />
<path d="M8 13h4.5" />
<path d="M10 19l-1 -1h-3a3 3 0 0 1 -3 -3v-8a3 3 0 0 1 3 -3h12a3 3 0 0 1 3 3v4.5" />
<path d="M17.8 20.817l-2.172 1.138a.392 .392 0 0 1 -.568 -.41l.415 -2.411l-1.757 -1.707a.389 .389 0 0 1 .217 -.665l2.428 -.352l1.086 -2.193a.392 .392 0 0 1 .702 0l1.086 2.193l2.428 .352a.39 .39 0 0 1 .217 .665l-1.757 1.707l.414 2.41a.39 .39 0 0 1 -.567 .411l-2.172 -1.138z" />
</svg>
);
const aiChatItems = {
accept: {
icon: <Check />,
label: 'Accept',
value: 'accept',
onSelect: ({ aiEditor, editor }) => {
const { mode, toolName } = editor.getOptions(AIChatPlugin);
if (mode === 'chat' && toolName === 'generate') {
return editor
.getTransforms(AIChatPlugin)
.aiChat.replaceSelection(aiEditor);
}
editor.getTransforms(AIChatPlugin).aiChat.accept();
editor.tf.focus({ edge: 'end' });
},
},
comment: {
icon: <AICommentIcon />,
label: 'Comment',
value: 'comment',
onSelect: ({ editor, input }) => {
editor.getApi(AIChatPlugin).aiChat.submit(input, {
mode: 'insert',
prompt:
'Please comment on the following content and provide reasonable and meaningful feedback.',
toolName: 'comment',
});
},
},
continueWrite: {
icon: <PenLine />,
label: 'Continue writing',
value: 'continueWrite',
onSelect: ({ editor, input }) => {
const ancestorNode = editor.api.block({ highest: true });
if (!ancestorNode) return;
const isEmpty = NodeApi.string(ancestorNode[0]).trim().length === 0;
void editor.getApi(AIChatPlugin).aiChat.submit(input, {
mode: 'insert',
prompt: isEmpty
? `<Document>
{editor}
</Document>
Start writing a new paragraph AFTER <Document> ONLY ONE SENTENCE`
: 'Continue writing AFTER <Block> ONLY ONE SENTENCE. DONT REPEAT THE TEXT.',
toolName: 'generate',
});
},
},
discard: {
icon: <X />,
label: 'Discard',
shortcut: 'Escape',
value: 'discard',
onSelect: ({ editor }) => {
editor.getTransforms(AIPlugin).ai.undo();
editor.getApi(AIChatPlugin).aiChat.hide();
},
},
emojify: {
icon: <SmileIcon />,
label: 'Emojify',
value: 'emojify',
onSelect: ({ editor, input }) => {
void editor.getApi(AIChatPlugin).aiChat.submit(input, {
prompt:
'Add a small number of contextually relevant emojis within each block only. You may insert emojis, but do not remove, replace, or rewrite existing text, and do not modify Markdown syntax, links, or line breaks.',
toolName: 'edit',
});
},
},
explain: {
icon: <BadgeHelp />,
label: 'Explain',
value: 'explain',
onSelect: ({ editor, input }) => {
void editor.getApi(AIChatPlugin).aiChat.submit(input, {
prompt: {
default: 'Explain {editor}',
selecting: 'Explain',
},
toolName: 'generate',
});
},
},
fixSpelling: {
icon: <Check />,
label: 'Fix spelling & grammar',
value: 'fixSpelling',
onSelect: ({ editor, input }) => {
void editor.getApi(AIChatPlugin).aiChat.submit(input, {
prompt:
'Fix spelling, grammar, and punctuation errors within each block only, without changing meaning, tone, or adding new information.',
toolName: 'edit',
});
},
},
generateMarkdownSample: {
icon: <BookOpenCheck />,
label: 'Generate Markdown sample',
value: 'generateMarkdownSample',
onSelect: ({ editor, input }) => {
void editor.getApi(AIChatPlugin).aiChat.submit(input, {
prompt: 'Generate a markdown sample',
toolName: 'generate',
});
},
},
generateMdxSample: {
icon: <BookOpenCheck />,
label: 'Generate MDX sample',
value: 'generateMdxSample',
onSelect: ({ editor, input }) => {
void editor.getApi(AIChatPlugin).aiChat.submit(input, {
prompt: 'Generate a mdx sample',
toolName: 'generate',
});
},
},
improveWriting: {
icon: <Wand />,
label: 'Improve writing',
value: 'improveWriting',
onSelect: ({ editor, input }) => {
void editor.getApi(AIChatPlugin).aiChat.submit(input, {
prompt:
'Improve the writing for clarity and flow, without changing meaning or adding new information.',
toolName: 'edit',
});
},
},
insertBelow: {
icon: <ListEnd />,
label: 'Insert below',
value: 'insertBelow',
onSelect: ({ aiEditor, editor }) => {
/** Format: 'none' Fix insert table */
void editor
.getTransforms(AIChatPlugin)
.aiChat.insertBelow(aiEditor, { format: 'none' });
},
},
makeLonger: {
icon: <ListPlus />,
label: 'Make longer',
value: 'makeLonger',
onSelect: ({ editor, input }) => {
void editor.getApi(AIChatPlugin).aiChat.submit(input, {
prompt:
'Make the content longer by elaborating on existing ideas within each block only, without changing meaning or adding new information.',
toolName: 'edit',
});
},
},
makeShorter: {
icon: <ListMinus />,
label: 'Make shorter',
value: 'makeShorter',
onSelect: ({ editor, input }) => {
void editor.getApi(AIChatPlugin).aiChat.submit(input, {
prompt:
'Make the content shorter by reducing verbosity within each block only, without changing meaning or removing essential information.',
toolName: 'edit',
});
},
},
replace: {
icon: <Check />,
label: 'Replace selection',
value: 'replace',
onSelect: ({ aiEditor, editor }) => {
void editor.getTransforms(AIChatPlugin).aiChat.replaceSelection(aiEditor);
},
},
simplifyLanguage: {
icon: <FeatherIcon />,
label: 'Simplify language',
value: 'simplifyLanguage',
onSelect: ({ editor, input }) => {
void editor.getApi(AIChatPlugin).aiChat.submit(input, {
prompt:
'Simplify the language by using clearer and more straightforward wording within each block only, without changing meaning or adding new information.',
toolName: 'edit',
});
},
},
summarize: {
icon: <Album />,
label: 'Add a summary',
value: 'summarize',
onSelect: ({ editor, input }) => {
void editor.getApi(AIChatPlugin).aiChat.submit(input, {
mode: 'insert',
prompt: {
default: 'Summarize {editor}',
selecting: 'Summarize',
},
toolName: 'generate',
});
},
},
tryAgain: {
icon: <CornerUpLeft />,
label: 'Try again',
value: 'tryAgain',
onSelect: ({ editor }) => {
void editor.getApi(AIChatPlugin).aiChat.reload();
},
},
} satisfies Record<
string,
{
icon: React.ReactNode;
label: string;
value: string;
component?: React.ComponentType<{ menuState: EditorChatState }>;
filterItems?: boolean;
items?: { label: string; value: string }[];
shortcut?: string;
onSelect?: ({
aiEditor,
editor,
input,
}: {
aiEditor: SlateEditor;
editor: PlateEditor;
input: string;
}) => void;
}
>;
const menuStateItems: Record<
EditorChatState,
{
items: (typeof aiChatItems)[keyof typeof aiChatItems][];
heading?: string;
}[]
> = {
cursorCommand: [
{
items: [
aiChatItems.comment,
aiChatItems.generateMdxSample,
aiChatItems.generateMarkdownSample,
aiChatItems.continueWrite,
aiChatItems.summarize,
aiChatItems.explain,
],
},
],
cursorSuggestion: [
{
items: [aiChatItems.accept, aiChatItems.discard, aiChatItems.tryAgain],
},
],
selectionCommand: [
{
items: [
aiChatItems.improveWriting,
aiChatItems.comment,
aiChatItems.emojify,
aiChatItems.makeLonger,
aiChatItems.makeShorter,
aiChatItems.fixSpelling,
aiChatItems.simplifyLanguage,
],
},
],
selectionSuggestion: [
{
items: [
aiChatItems.accept,
aiChatItems.discard,
aiChatItems.insertBelow,
aiChatItems.tryAgain,
],
},
],
};
export const AIMenuItems = ({
input,
setInput,
setValue,
}: {
input: string;
setInput: (value: string) => void;
setValue: (value: string) => void;
}) => {
const editor = useEditorRef();
const { messages } = usePluginOption(AIChatPlugin, 'chat');
const aiEditor = usePluginOption(AIChatPlugin, 'aiEditor')!;
const isSelecting = useIsSelecting();
const menuState = React.useMemo(() => {
if (messages && messages.length > 0) {
return isSelecting ? 'selectionSuggestion' : 'cursorSuggestion';
}
return isSelecting ? 'selectionCommand' : 'cursorCommand';
}, [isSelecting, messages]);
const menuGroups = React.useMemo(() => {
const items = menuStateItems[menuState];
return items;
}, [menuState]);
React.useEffect(() => {
if (menuGroups.length > 0 && menuGroups[0].items.length > 0) {
setValue(menuGroups[0].items[0].value);
}
}, [menuGroups, setValue]);
return (
<>
{menuGroups.map((group, index) => (
<CommandGroup key={index} heading={group.heading}>
{group.items.map((menuItem) => (
<CommandItem
key={menuItem.value}
className="[&_svg]:text-muted-foreground"
value={menuItem.value}
onSelect={() => {
menuItem.onSelect?.({
aiEditor,
editor,
input,
});
setInput('');
}}
>
{menuItem.icon}
<span>{menuItem.label}</span>
</CommandItem>
))}
</CommandGroup>
))}
</>
);
};
export function AILoadingBar() {
const editor = useEditorRef();
const toolName = usePluginOption(AIChatPlugin, 'toolName');
const chat = usePluginOption(AIChatPlugin, 'chat');
const mode = usePluginOption(AIChatPlugin, 'mode');
const { status } = chat;
const { api } = useEditorPlugin(AIChatPlugin);
const isLoading = status === 'streaming' || status === 'submitted';
const handleComments = (type: 'accept' | 'reject') => {
if (type === 'accept') {
editor.tf.unsetNodes([getTransientCommentKey()], {
at: [],
match: (n) => TextApi.isText(n) && !!n[KEYS.comment],
});
}
if (type === 'reject') {
editor
.getTransforms(commentPlugin)
.comment.unsetMark({ transient: true });
}
api.aiChat.hide();
};
useHotkeys('esc', () => {
api.aiChat.stop();
// remove when you implement the route /api/ai/command
(chat as any)._abortFakeStream();
});
if (
isLoading &&
(mode === 'insert' ||
toolName === 'comment' ||
(toolName === 'edit' && mode === 'chat'))
) {
return (
<div
className={cn(
'-translate-x-1/2 absolute bottom-4 left-1/2 z-20 flex items-center gap-3 rounded-md border border-border bg-muted px-3 py-1.5 text-muted-foreground text-sm shadow-md transition-all duration-300'
)}
>
<span className="h-4 w-4 animate-spin rounded-full border-2 border-muted-foreground border-t-transparent" />
<span>{status === 'submitted' ? 'Thinking...' : 'Writing...'}</span>
<Button
size="sm"
variant="ghost"
className="flex items-center gap-1 text-xs"
onClick={() => api.aiChat.stop()}
>
<PauseIcon className="h-4 w-4" />
Stop
<kbd className="ml-1 rounded bg-border px-1 font-mono text-[10px] text-muted-foreground shadow-sm">
Esc
</kbd>
</Button>
</div>
);
}
if (toolName === 'comment' && status === 'ready') {
return (
<div
className={cn(
'-translate-x-1/2 absolute bottom-4 left-1/2 z-50 flex flex-col items-center gap-0 rounded-xl border border-border/50 bg-popover p-1 text-muted-foreground text-sm shadow-xl backdrop-blur-sm',
'p-3'
)}
>
{/* Header with controls */}
<div className="flex w-full items-center justify-between gap-3">
<div className="flex items-center gap-5">
<Button
size="sm"
disabled={isLoading}
onClick={() => handleComments('accept')}
>
Accept
</Button>
<Button
size="sm"
disabled={isLoading}
onClick={() => handleComments('reject')}
>
Reject
</Button>
</div>
</div>
</div>
);
}
return null;
}Extend the aiChatItems map to add new commands. Each command receives { aiEditor, editor, input } and can dispatch api.aiChat.submit with custom prompts or transforms.
Simple Custom Command
summarizeInBullets: {
icon: <ListIcon />,
label: 'Summarize in bullets',
value: 'summarizeInBullets',
onSelect: ({ editor }) => {
void editor.getApi(AIChatPlugin).aiChat.submit('', {
prompt: 'Summarize the current selection using bullet points',
toolName: 'generate',
});
},
},Command with Complex Logic
generateTOC: {
icon: <BookIcon />,
label: 'Generate table of contents',
value: 'generateTOC',
onSelect: ({ editor }) => {
const headings = editor.api.nodes({
match: (n) => ['h1', 'h2', 'h3'].includes(n.type as string),
});
const prompt =
headings.length === 0
? 'Create a realistic table of contents for this document'
: 'Generate a table of contents that reflects the existing headings';
void editor.getApi(AIChatPlugin).aiChat.submit('', {
mode: 'insert',
prompt,
toolName: 'generate',
});
},
},The menu automatically switches between command and suggestion states:
cursorCommand: Cursor is collapsed and no response yet.selectionCommand: Text is selected and no response yet.cursorSuggestion/selectionSuggestion: A response exists, so actions like Accept, Try Again, or Insert Below are shown.
Use toolName ('generate' | 'edit' | 'comment') to control how streaming hooks process the response. For example, 'edit' enables diff-based suggestions, and 'comment' allows you to convert streamed comments into discussion threads with aiCommentToRange.
On This Page
FeaturesKit UsageInstallationAdd KitAdd API RouteConfigure EnvironmentManual UsageInstallationAdd PluginsConfigure AIChatPluginBuild API RouteConnect useChatPrompt TemplatesClient PromptingServer PromptingKeyboard ShortcutsStreamingStreaming ExamplePlate PlusHooksuseAIChatEditoruseEditorChatuseChatChunkUtilitieswithAIBatchapplyAISuggestionsaiCommentToRangefindTextRangeInBlockgetEditorPromptreplacePlaceholdersPluginsAIPluginAIChatPluginAPIapi.aiChat.submit(input, options?)api.aiChat.reset(options?)api.aiChat.node(options?)api.aiChat.reload()api.aiChat.stop()api.aiChat.show()api.aiChat.hide(options?)Transformstf.aiChat.accept()tf.aiChat.insertBelow(sourceEditor, options?)tf.aiChat.replaceSelection(sourceEditor, options?)tf.aiChat.removeAnchor(options?)tf.ai.insertNodes(nodes, options?)tf.ai.removeMarks(options?)tf.ai.removeNodes(options?)tf.ai.beginPreview(options?)tf.ai.acceptPreview()tf.ai.cancelPreview()tf.ai.discardPreview()tf.ai.hasPreview()tf.ai.undo()CustomizationAdding Custom AI CommandsSimple Custom CommandCommand with Complex Logic