diff --git a/public/__redirects b/public/__redirects
index af966e9f9c7bf2..54424049b6e8f7 100644
--- a/public/__redirects
+++ b/public/__redirects
@@ -1966,6 +1966,7 @@
# Features section
/workers-ai/markdown-conversion/ /workers-ai/features/markdown-conversion/ 301
# workflows
+/workflows/get-started/cli-quick-start/ /workflows/get-started/guide/ 301
/workflows/reference/storage-options/ /workers/platform/storage-options/ 301
/workflows/tutorials/ /workflows/examples 301
# workers KV
diff --git a/src/content/docs/workers/wrangler/commands.mdx b/src/content/docs/workers/wrangler/commands.mdx
index c74fb1f739f978..9f6095c595d921 100644
--- a/src/content/docs/workers/wrangler/commands.mdx
+++ b/src/content/docs/workers/wrangler/commands.mdx
@@ -566,6 +566,21 @@ Manage and configure [Workflows](/workflows/).
+### `restart`
+
+Restart a Workflow instance
+
+```sh
+wrangler workflows instances restart
+```
+
+- `WORKFLOW_NAME`
+ - The name of a registered Workflow.
+- `ID`
+ - The ID of a Workflow instance.
+
+
+
After starting `wrangler tail`, you will receive a live feed of console and exception logs for each request your Worker receives.
diff --git a/src/content/docs/workflows/build/rules-of-workflows.mdx b/src/content/docs/workflows/build/rules-of-workflows.mdx
index b0edc01401a4e9..b8a39d402cdc34 100644
--- a/src/content/docs/workflows/build/rules-of-workflows.mdx
+++ b/src/content/docs/workflows/build/rules-of-workflows.mdx
@@ -589,6 +589,10 @@ export default {
+### Limit timeouts to 30 minutes or less
+
+When setting a [WorkflowStep timeout](/workflows/build/workers-api/#workflowstep), ensure that its duration is 30 minutes or less. If your use case requires a timeout greater than 30 minutes, consider using `step.waitForEvent()` instead.
+
### Keep step return values under 1 MiB
Each step can persist up to 1 MiB (2^20 bytes) of state. If your step returns data exceeding this limit, the step will fail. This is a common issue when fetching large API responses or processing large files.
diff --git a/src/content/docs/workflows/build/trigger-workflows.mdx b/src/content/docs/workflows/build/trigger-workflows.mdx
index 385155b9543e3c..bc8818ac793718 100644
--- a/src/content/docs/workflows/build/trigger-workflows.mdx
+++ b/src/content/docs/workflows/build/trigger-workflows.mdx
@@ -204,4 +204,4 @@ Refer to the [Workflows REST API documentation](/api/resources/workflows/subreso
## Command line (CLI)
-Refer to the [CLI quick start](/workflows/get-started/cli-quick-start/) to learn more about how to manage and trigger Workflows via the command-line.
+Refer to the [CLI quick start](/workflows/get-started/guide/) to learn more about how to manage and trigger Workflows via the command-line.
diff --git a/src/content/docs/workflows/get-started/cli-quick-start.mdx b/src/content/docs/workflows/get-started/cli-quick-start.mdx
deleted file mode 100644
index 7f3b5f7d7b5359..00000000000000
--- a/src/content/docs/workflows/get-started/cli-quick-start.mdx
+++ /dev/null
@@ -1,270 +0,0 @@
----
-title: CLI quick start
-pcx_content_type: get-started
-reviewed: 2024-10-23
-sidebar:
- order: 3
-
----
-
-import { Render, PackageManagers, WranglerConfig } from "~/components"
-
-Workflows allow you to build durable, multi-step applications using the Workers platform. A Workflow can automatically retry, persist state, run for hours or days, and coordinate between third-party APIs.
-
-You can build Workflows to post-process file uploads to [R2 object storage](/r2/), automate generation of [Workers AI](/workers-ai/) embeddings into a [Vectorize](/vectorize/) vector database, or to trigger user lifecycle emails using your favorite email API.
-
-## Prerequisites
-
-:::caution
-
-This guide is for users who are already familiar with Cloudflare Workers the [durable execution](/workflows/reference/glossary/) programming model it enables.
-
-If you are new to either, we recommend the [introduction to Workflows](/workflows/get-started/guide/) guide, which walks you through how a Workflow is defined, how to persist state, and how to deploy and run your first Workflow.
-
-:::
-
-
-## 1. Create a Workflow
-
-Workflows are defined as part of a Worker script.
-
-To create a Workflow, use the `create cloudflare` (C3) CLI tool, specifying the Workflows starter template:
-
-```sh
-npm create cloudflare@latest workflows-starter -- --template "cloudflare/workflows-starter"
-```
-
-This will create a new folder called `workflows-tutorial`, which contains two files:
-
-* `src/index.ts` - this is where your Worker script, including your Workflows definition, is defined.
-* wrangler.jsonc - the [Wrangler configuration file](/workers/wrangler/configuration/) for your Workers project and your Workflow.
-
-Open the `src/index.ts` file in your text editor. This file contains the following code, which is the most basic instance of a Workflow definition:
-
-```ts title="src/index.ts"
-import { WorkflowEntrypoint, WorkflowStep, WorkflowEvent } from 'cloudflare:workers';
-
-type Env = {
- // Add your bindings here, e.g. Workers KV, D1, Workers AI, etc.
- MY_WORKFLOW: Workflow;
-};
-
-// User-defined params passed to your workflow
-type Params = {
- email: string;
- metadata: Record;
-};
-
-export class MyWorkflow extends WorkflowEntrypoint {
- async run(event: WorkflowEvent, step: WorkflowStep) {
- // Can access bindings on `this.env`
- // Can access params on `event.payload`
-
- const files = await step.do('my first step', async () => {
- // Fetch a list of files from $SOME_SERVICE
- return {
- files: [
- 'doc_7392_rev3.pdf',
- 'report_x29_final.pdf',
- 'memo_2024_05_12.pdf',
- 'file_089_update.pdf',
- 'proj_alpha_v2.pdf',
- 'data_analysis_q2.pdf',
- 'notes_meeting_52.pdf',
- 'summary_fy24_draft.pdf',
- ],
- };
- });
-
- const apiResponse = await step.do('some other step', async () => {
- let resp = await fetch('https://api.cloudflare.com/client/v4/ips');
- return await resp.json();
- });
-
- await step.sleep('wait on something', '1 minute');
-
- await step.do(
- 'make a call to write that could maybe, just might, fail',
- // Define a retry strategy
- {
- retries: {
- limit: 5,
- delay: '5 second',
- backoff: 'exponential',
- },
- timeout: '15 minutes',
- },
- async () => {
- // Do stuff here, with access to the state from our previous steps
- if (Math.random() > 0.5) {
- throw new Error('API call to $STORAGE_SYSTEM failed');
- }
- },
- );
- }
-}
-
-export default {
- async fetch(req: Request, env: Env): Promise {
- let id = new URL(req.url).searchParams.get('instanceId');
-
- // Get the status of an existing instance, if provided
- if (id) {
- let instance = await env.MY_WORKFLOW.get(id);
- return Response.json({
- status: await instance.status(),
- });
- }
-
- // Spawn a new instance and return the ID and status
- let instance = await env.MY_WORKFLOW.create();
- return Response.json({
- id: instance.id,
- details: await instance.status(),
- });
- },
-};
-```
-
-Specifically, the code above:
-
-1. Extends the Workflows base class (`WorkflowsEntrypoint`) and defines a `run` method for our Workflow.
-2. Passes in our `Params` type as a [type parameter](/workflows/build/events-and-parameters/) so that events that trigger our Workflow are typed.
-3. Defines several steps that return state.
-4. Defines a custom retry configuration for a step.
-5. Binds to the Workflow from a Worker's `fetch` handler so that we can create (trigger) instances of our Workflow via a HTTP call.
-
-You can edit this Workflow by adding (or removing) additional `step` calls, changing the retry configuration, and/or making your own API calls. This Workflow template is designed to illustrate some of Workflows APIs.
-
-## 2. Deploy a Workflow
-
-Workflows are deployed via [`wrangler`](/workers/wrangler/install-and-update/), which is installed when you first ran `npm create cloudflare` above. Workflows are Worker scripts, and are deployed the same way:
-
-```sh
-npx wrangler@latest deploy
-```
-
-:::note
-
-Workflows cannot be deployed to Workers for Platforms namespaces, as Workflows do not support Workers for Platforms.
-
-:::
-
-## 3. Run a Workflow
-
-You can run a Workflow via the `wrangler` CLI, via a Worker binding, or via the Workflows [REST API](/api/resources/workflows/methods/list/).
-
-### `wrangler` CLI
-
-```sh
-# Trigger a Workflow from the CLI, and pass (optional) parameters as an event to the Workflow.
-npx wrangler@latest workflows trigger workflows-tutorial --params={"email": "user@example.com", "metadata": {"id": "1"}}
-```
-
-Refer to the [events and parameters documentation](/workflows/build/events-and-parameters/) to understand how events are passed to Workflows.
-
-### Worker binding
-
-You can [bind to a Workflow](/workers/runtime-apis/bindings/#what-is-a-binding) from any handler in a Workers script, allowing you to programatically trigger and pass parameters to a Workflow instance from your own application code.
-
-To bind a Workflow to a Worker, you need to define a `[[workflows]]` binding in your Wrangler configuration:
-
-
-
-```toml
-[[workflows]]
-# name of your workflow
-name = "workflows-starter"
-# binding name env.MY_WORKFLOW
-binding = "MY_WORKFLOW"
-# this is class that extends the Workflow class in src/index.ts
-class_name = "MyWorkflow"
-```
-
-
-
-You can then invoke the methods on this binding directly from your Worker script's `env` parameter. The `Workflow` type has methods for:
-
-* `create()` - creating (triggering) a new instance of the Workflow, returning the ID.
-* `createBatch()` - creating (triggering) a batch of new instances of the Workflow, returning the IDs.
-* `get()`- retrieve a Workflow instance by its ID.
-* `status()` - get the current status of a unique Workflow instance.
-
-For example, the following Worker will fetch the status of an existing Workflow instance by ID (if supplied), else it will create a new Workflow instance and return its ID:
-
-```ts title="src/index.ts"
-// Import the Workflow definition
-import { WorkflowEntrypoint, WorkflowStep, WorkflowEvent} from 'cloudflare:workers';
-
-interface Env {
- // Matches the binding definition in your Wrangler configuration file
- MY_WORKFLOW: Workflow;
-}
-
-export default {
- async fetch(req: Request, env: Env): Promise {
- let id = new URL(req.url).searchParams.get('instanceId');
-
- // Get the status of an existing instance, if provided
- if (id) {
- let instance = await env.MY_WORKFLOW.get(id);
- return Response.json({
- status: await instance.status(),
- });
- }
-
- // Spawn a new instance and return the ID and status
- let instance = await env.MY_WORKFLOW.create();
- return Response.json({
- id: instance.id,
- details: await instance.status(),
- });
- },
-};
-```
-
-Refer to the [triggering Workflows](/workflows/build/trigger-workflows/) documentation for how to trigger a Workflow from other Workers' handler functions.
-
-## 4. Manage Workflows
-
-:::note
-
-The `wrangler workflows` command requires Wrangler version `3.83.0` or greater. Use `npx wrangler@latest` to always use the latest Wrangler version when invoking commands.
-
-:::
-
-The `wrangler workflows` command group has several sub-commands for managing and inspecting Workflows and their instances:
-
-* List Workflows: `wrangler workflows list`
-* Inspect the instances of a Workflow: `wrangler workflows instances list YOUR_WORKFLOW_NAME`
-* View the state of a running Workflow instance by its ID: `wrangler workflows instances describe YOUR_WORKFLOW_NAME WORKFLOW_ID`
-
-You can also view the state of the latest instance of a Workflow by using the `latest` keyword instead of an ID:
-
-```sh
-npx wrangler@latest workflows instances describe workflows-starter latest
-# Or by ID:
-# npx wrangler@latest workflows instances describe workflows-starter 12dc179f-9f77-4a37-b973-709dca4189ba
-```
-
-The output of `instances describe` shows:
-
-* The status (success, failure, running) of each step
-* Any state emitted by the step
-* Any `sleep` state, including when the Workflow will wake up
-* Retries associated with each step
-* Errors, including exception messages
-
-:::note
-
-You do not have to wait for a Workflow instance to finish executing to inspect its current status. The `wrangler workflows instances describe` sub-command will show the status of an in-progress instance, including any persisted state, if it is sleeping, and any errors or retries. This can be especially useful when debugging a Workflow during development.
-
-:::
-
-## Next steps
-
-* Learn more about [how events are passed to a Workflow](/workflows/build/events-and-parameters/).
-* Binding to and triggering Workflow instances using the [Workers API](/workflows/build/workers-api/).
-* The [Rules of Workflows](/workflows/build/rules-of-workflows/) and best practices for building applications using Workflows.
-
-If you have any feature requests or notice any bugs, share your feedback directly with the Cloudflare team by joining the [Cloudflare Developers community on Discord](https://discord.cloudflare.com).
diff --git a/src/content/docs/workflows/get-started/durable-agents.mdx b/src/content/docs/workflows/get-started/durable-agents.mdx
new file mode 100644
index 00000000000000..051fdb7bbff323
--- /dev/null
+++ b/src/content/docs/workflows/get-started/durable-agents.mdx
@@ -0,0 +1,518 @@
+---
+title: Build a Durable AI Agent
+pcx_content_type: get-started
+sidebar:
+ order: 3
+---
+
+import {
+ Details,
+ LinkCard,
+ Render,
+ PackageManagers,
+ WranglerConfig,
+} from "~/components";
+
+In this guide, you'll build an AI agent that researches GitHub repositories. Give it a task like _"Compare open-source LLM projects"_ and it will:
+
+1. Search GitHub for relevant repositories
+2. Fetch details about each one (stars, forks, activity)
+3. Analyze and compare them
+4. Return a recommendation
+
+Each LLM call and tool call becomes a **step** — a self-contained, individually retriable unit of work. If any step fails, Workflows retries it automatically. If the entire Workflow crashes mid-task, it resumes from the last successful step.
+
+| Challenge | Solution with Workflows |
+| ---------------------------- | --------------------------------------------------------- |
+| Long-running agent loops | Durable execution that survives any interruption |
+| Unreliable LLM and API calls | Automatic retry with independent checkpoints |
+| Waiting for human approval | `waitForEvent()` pauses for hours or days |
+| Polling for job completion | `step.sleep()` between checks without consuming resources |
+
+This guide uses the Anthropic SDK, but the same patterns apply to any LLM SDK (OpenAI, Google AI, Mistral, etc.).
+
+## Quick start
+
+If you want to skip the steps and pull down the complete agent:
+
+```sh
+npm create cloudflare@latest -- --template cloudflare/docs-examples/workflows/durable-ai-agent
+```
+
+Use this option if you are familiar with Cloudflare Workflows or want to explore the code first.
+
+Follow the steps below to learn how to build a durable AI agent from scratch.
+
+## Prerequisites
+
+
+
+You'll also need an [Anthropic API key](https://platform.claude.com/settings/keys) for LLM calls. New accounts include free credits.
+
+## 1. Create a new Worker project
+
+
+
+
+
+Move into your project:
+
+```sh
+cd durable-ai-agent
+```
+
+Install dependencies:
+
+```sh
+npm install @anthropic-ai/sdk
+```
+
+## 2. Define your tools
+
+Tools are functions the LLM can call to interact with external systems. You define the schema (what inputs the tool accepts) and the implementation (what it does). The LLM decides when to use each tool based on the task.
+
+Create `src/tools.ts` with two complementary tools:
+
+```ts title="src/tools.ts"
+export interface SearchReposInput {
+ query: string;
+ limit?: number;
+}
+
+export interface GetRepoInput {
+ owner: string;
+ repo: string;
+}
+
+interface GitHubSearchResponse {
+ items: Array<{ full_name: string; stargazers_count: number }>;
+}
+
+interface GitHubRepoResponse {
+ full_name: string;
+ description: string;
+ stargazers_count: number;
+ forks_count: number;
+ open_issues_count: number;
+ language: string;
+ license: { name: string } | null;
+ updated_at: string;
+}
+
+export const searchReposTool = {
+ name: "search_repos" as const,
+ description:
+ "Search GitHub repositories by keyword. Returns top results. Use get_repo for details.",
+ input_schema: {
+ type: "object" as const,
+ properties: {
+ query: {
+ type: "string",
+ description: "Search query (e.g., 'typescript orm')",
+ },
+ limit: { type: "number", description: "Max results (default 5)" },
+ },
+ required: ["query"],
+ },
+ run: async (input: SearchReposInput): Promise => {
+ const response = await fetch(
+ `https://api.github.com/search/repositories?q=${encodeURIComponent(input.query)}&sort=stars&per_page=${input.limit ?? 5}`,
+ {
+ headers: {
+ Accept: "application/vnd.github+json",
+ "User-Agent": "DurableAgent/1.0",
+ },
+ },
+ );
+ if (!response.ok) return `Search failed: ${response.status}`;
+ const data = await response.json();
+ return JSON.stringify(
+ data.items.map((r) => ({ name: r.full_name, stars: r.stargazers_count })),
+ );
+ },
+};
+
+export const getRepoTool = {
+ name: "get_repo" as const,
+ description:
+ "Get detailed info about a GitHub repository including stars, forks, and description.",
+ input_schema: {
+ type: "object" as const,
+ properties: {
+ owner: {
+ type: "string",
+ description: "Repository owner (e.g., 'cloudflare')",
+ },
+ repo: {
+ type: "string",
+ description: "Repository name (e.g., 'workers-sdk')",
+ },
+ },
+ required: ["owner", "repo"],
+ },
+ run: async (input: GetRepoInput): Promise => {
+ const response = await fetch(
+ `https://api.github.com/repos/${input.owner}/${input.repo}`,
+ {
+ headers: {
+ Accept: "application/vnd.github+json",
+ "User-Agent": "DurableAgent/1.0",
+ },
+ },
+ );
+ if (!response.ok) return `Repo not found: ${input.owner}/${input.repo}`;
+ const data = await response.json();
+ return JSON.stringify({
+ name: data.full_name,
+ description: data.description,
+ stars: data.stargazers_count,
+ forks: data.forks_count,
+ issues: data.open_issues_count,
+ language: data.language,
+ license: data.license?.name ?? "None",
+ updated: data.updated_at,
+ });
+ },
+};
+
+export const tools = [searchReposTool, getRepoTool];
+```
+
+These tools complement each other: `search_repos` finds repositories, and `get_repo` fetches details about specific ones.
+
+## 3. Write your agent Workflow
+
+A Workflow extends `WorkflowEntrypoint` and implements a `run` method. The [`step`](/workflows/build/workers-api/#step) object provides methods to define durable steps. `step.do(name, callback)` executes code and persists the result. If the Workflow is interrupted, it resumes from the last successful step. For a gentler introduction, see [Build your first Workflow](/workflows/get-started/guide/).
+
+The agent loop sends messages to the LLM, executes any tool calls, and repeats until the task is complete. Each LLM call and tool execution is wrapped in `step.do()` for durability.
+
+Create `src/workflow.ts`:
+
+```ts title="src/workflow.ts"
+import { WorkflowEntrypoint, WorkflowStep } from "cloudflare:workers";
+import type { WorkflowEvent } from "cloudflare:workers";
+import Anthropic from "@anthropic-ai/sdk";
+import {
+ tools,
+ searchReposTool,
+ getRepoTool,
+ type SearchReposInput,
+ type GetRepoInput,
+} from "./tools";
+
+type Params = { task: string };
+
+export class AgentWorkflow extends WorkflowEntrypoint {
+ async run(event: WorkflowEvent, step: WorkflowStep) {
+ const client = new Anthropic({ apiKey: this.env.ANTHROPIC_API_KEY });
+
+ const messages: Anthropic.MessageParam[] = [
+ { role: "user", content: event.payload.task },
+ ];
+
+ const toolDefinitions = tools.map(({ run, ...rest }) => rest);
+
+ // Durable agent loop - each turn is checkpointed
+ for (let turn = 0; turn < 10; turn++) {
+ const response = (await step.do(
+ `llm-turn-${turn}`,
+ { retries: { limit: 3, delay: "10 seconds", backoff: "exponential" } },
+ async () => {
+ const msg = await client.messages.create({
+ model: "claude-sonnet-4-5-20250929",
+ max_tokens: 4096,
+ tools: toolDefinitions,
+ messages,
+ });
+ // Serialize for Workflow state
+ return JSON.parse(JSON.stringify(msg));
+ },
+ )) as Anthropic.Message;
+
+ if (!response || !response.content) continue;
+
+ messages.push({ role: "assistant", content: response.content });
+
+ if (response.stop_reason === "end_turn") {
+ const textBlock = response.content.find(
+ (b): b is Anthropic.TextBlock => b.type === "text",
+ );
+ return {
+ status: "complete",
+ turns: turn + 1,
+ result: textBlock?.text ?? null,
+ };
+ }
+
+ const toolResults: Anthropic.ToolResultBlockParam[] = [];
+
+ for (const block of response.content) {
+ if (block.type !== "tool_use") continue;
+
+ const result = await step.do(
+ `tool-${turn}-${block.id}`,
+ { retries: { limit: 2, delay: "5 seconds" } },
+ async () => {
+ switch (block.name) {
+ case "search_repos":
+ return searchReposTool.run(block.input as SearchReposInput);
+ case "get_repo":
+ return getRepoTool.run(block.input as GetRepoInput);
+ default:
+ return `Unknown tool: ${block.name}`;
+ }
+ },
+ );
+
+ toolResults.push({
+ type: "tool_result",
+ tool_use_id: block.id,
+ content: result,
+ });
+ }
+
+ messages.push({ role: "user", content: toolResults });
+ }
+
+ return { status: "max_turns_reached", turns: 10 };
+ }
+}
+```
+
+
+
+Each `step.do()` creates a checkpoint. If your Workflow crashes or the Worker restarts:
+
+- **After LLM step**: The response is persisted. On resume, it skips the LLM call and moves to tool execution.
+- **After tool step**: The result is persisted. If a later tool fails, earlier tools don't re-run.
+
+This is especially important for:
+
+- **LLM calls**: Expensive and slow, you don't want to repeat them
+- **External APIs**: May have rate limits or side effects
+- **Idempotency**: Some tools (like sending emails) shouldn't run twice
+
+
+
+## 4. Configure your Workflow
+
+Open `wrangler.jsonc` and add the `workflow` configuration:
+
+
+
+```json title="wrangler.jsonc"
+{
+ "$schema": "node_modules/wrangler/config-schema.json",
+ "name": "durable-ai-agent",
+ "main": "src/index.ts",
+ "compatibility_date": "2025-01-01",
+ "observability": {
+ "enabled": true
+ },
+ "workflows": [
+ {
+ "name": "agent-workflow",
+ "binding": "AGENT_WORKFLOW",
+ "class_name": "AgentWorkflow"
+ }
+ ]
+}
+```
+
+
+
+The `class_name` must match your exported class, and `binding` is the variable name you use to access the Workflow in your code (like `env.AGENT_WORKFLOW`).
+
+Generate types for your bindings:
+
+```sh
+npx wrangler types
+```
+
+This creates a `worker-configuration.d.ts` file with the `Env` type that includes your `AGENT_WORKFLOW` binding.
+
+## 5. Write your API
+
+The Worker exposes an HTTP API to start new agent instances and check their status. Each instance runs independently and can be polled for results.
+
+Replace `src/index.ts`:
+
+```ts title="src/index.ts"
+export { AgentWorkflow } from "./workflow";
+
+export default {
+ async fetch(request: Request, env: Env): Promise {
+ const url = new URL(request.url);
+
+ const instanceId = url.searchParams.get("instanceId");
+ if (instanceId) {
+ const instance = await env.AGENT_WORKFLOW.get(instanceId);
+ const status = await instance.status();
+
+ return Response.json({
+ status: status.status,
+ output: status.output,
+ });
+ }
+
+ if (request.method === "POST") {
+ const { task } = await request.json<{ task: string }>();
+ const instance = await env.AGENT_WORKFLOW.create({
+ params: { task },
+ });
+ return Response.json({ instanceId: instance.id });
+ }
+
+ return new Response("POST a task to start an agent", { status: 400 });
+ },
+} satisfies ExportedHandler;
+```
+
+## 6. Develop locally
+
+Create a [`.env` file](/workers/wrangler/environments/#secrets-in-local-development) for local development:
+
+```sh title=".env"
+ANTHROPIC_API_KEY=your-api-key-here
+```
+
+Start the dev server:
+
+```sh
+npx wrangler dev
+```
+
+Start an agent that searches and compares repositories:
+
+```sh
+curl -X POST http://localhost:8787 \
+ -H "Content-Type: application/json" \
+ -d '{"task": "Compare open-source LLM projects"}'
+```
+
+```json output
+{ "instanceId": "abc-123-def" }
+```
+
+Check progress (may take a few seconds to complete):
+
+```sh
+curl "http://localhost:8787?instanceId=abc-123-def"
+```
+
+The agent will search for repositories, fetch details, and return a comparison.
+
+## 7. Deploy
+
+```sh
+npx wrangler deploy
+```
+
+Add your API key as a secret:
+
+```sh
+npx wrangler secret put ANTHROPIC_API_KEY
+```
+
+Start an agent on your deployed Worker:
+
+```sh
+curl -X POST https://durable-ai-agent..workers.dev \
+ -H "Content-Type: application/json" \
+ -d '{"task": "Compare open-source LLM projects"}'
+```
+
+Inspect agent runs with the CLI:
+
+```sh
+npx wrangler workflows instances describe agent-workflow latest
+```
+
+This shows every step the agent took, including LLM calls, tool executions, timing, and any retries. You can also view this in the Cloudflare dashboard under **Compute & AI > Workflows > agent-workflow**.
+
+## Adding real-time updates with Agents SDK
+
+The polling approach works well for simple use cases, but for real-time UIs you can combine Workflows with the [Agents SDK](/agents/). The pattern:
+
+1. Agent handles WebSocket connections and client state
+2. Workflow runs the durable agent loop and pushes updates to the Agent
+3. Agent broadcasts state changes to all connected clients
+
+In your Workflow, push updates to the Agent:
+
+```ts
+// agentId passed via workflow params
+const agent = this.env.RESEARCH_AGENT.get(
+ this.env.RESEARCH_AGENT.idFromName(agentId),
+);
+await agent.updateProgress({
+ status: "searching",
+ message: "Found 5 repositories...",
+});
+```
+
+In your Agent, receive updates and broadcast to clients:
+
+```ts
+import { Agent } from "agents";
+
+export class ResearchAgent extends Agent {
+ async updateProgress(progress: { status: string; message: string }) {
+ this.setState({ ...this.state, ...progress }); // pushes to all connected clients
+ }
+}
+```
+
+Clients use `useAgent()` to subscribe to state changes:
+
+```tsx
+import { useAgent } from "agents/react";
+
+const [state, setState] = useState(initialState);
+
+useAgent({
+ agent: "research-agent",
+ onStateUpdate: (newState) => setState(newState),
+});
+// state updates in real-time as the Workflow progresses
+```
+
+This gives you durable execution (Workflows) with real-time UI updates (Agents SDK). For a complete example with a React UI, see the [durable-ai-agent template](https://github.com/cloudflare/templates/tree/main/durable-ai-agent).
+
+## Learn more
+
+
+
+
+
+
+
+
diff --git a/src/content/docs/workflows/get-started/guide.mdx b/src/content/docs/workflows/get-started/guide.mdx
index 0c72b48139939b..9e74c5c29a579c 100644
--- a/src/content/docs/workflows/get-started/guide.mdx
+++ b/src/content/docs/workflows/get-started/guide.mdx
@@ -1,532 +1,262 @@
---
-title: Guide
+title: Build your first Workflow
pcx_content_type: get-started
-reviewed: 2024-10-23
sidebar:
order: 1
-
---
-import { Render, PackageManagers, WranglerConfig } from "~/components"
+import {
+ Details,
+ LinkCard,
+ Render,
+ PackageManagers,
+ WranglerConfig,
+} from "~/components";
Workflows allow you to build durable, multi-step applications using the Workers platform. A Workflow can automatically retry, persist state, run for hours or days, and coordinate between third-party APIs.
-You can build Workflows to post-process file uploads to [R2 object storage](/r2/), automate generation of [Workers AI](/workers-ai/) embeddings into a [Vectorize](/vectorize/) vector database, or to trigger user lifecycle emails using your favorite email API.
-
-This guide will instruct you through:
+You can build Workflows to post-process file uploads to [R2 object storage](/r2/), automate generation of [Workers AI](/workers-ai/) embeddings into a [Vectorize](/vectorize/) vector database, or to trigger user lifecycle emails using [Email Service](/email-routing/).
-* Defining your first Workflow and publishing it
-* Deploying the Workflow to your Cloudflare account
-* Running (triggering) your Workflow and observing its output
-
-At the end of this guide, you should be able to author, deploy and debug your own Workflows applications.
+:::note
+The term "Durable Execution" is widely used to describe this programming model.
-## Prerequisites
+"Durable" describes the ability of the program to implicitly persist state without you having to manually write to an external store or serialize program state.
+:::
-
+In this guide, you will create and deploy a Workflow that fetches data, pauses, and processes results.
-## 1. Define your Workflow
+## Quick start
-To create your first Workflow, use the `create cloudflare` (C3) CLI tool, specifying the Workflows starter template:
+If you want to skip the steps and pull down the complete Workflow we are building in this guide, run:
```sh
npm create cloudflare@latest workflows-starter -- --template "cloudflare/workflows-starter"
```
-This will create a new folder called `workflows-starter`.
+Use this option if you are familiar with Cloudflare Workers or want to explore the code first and learn the details later.
-Open the `src/index.ts` file in your text editor. This file contains the following code, which is the most basic instance of a Workflow definition:
+Follow the steps below to learn how to build a Workflow from scratch.
-```ts title="src/index.ts"
-import { WorkflowEntrypoint, WorkflowStep, WorkflowEvent } from 'cloudflare:workers';
+## Prerequisites
-type Env = {
- // Add your bindings here, e.g. Workers KV, D1, Workers AI, etc.
- MY_WORKFLOW: Workflow;
-};
+
-// User-defined params passed to your workflow
-type Params = {
- email: string;
- metadata: Record;
-};
+## 1. Create a new Worker project
-export class MyWorkflow extends WorkflowEntrypoint {
- async run(event: WorkflowEvent, step: WorkflowStep) {
- // Can access bindings on `this.env`
- // Can access params on `event.payload`
-
- const files = await step.do('my first step', async () => {
- // Fetch a list of files from $SOME_SERVICE
- return {
- files: [
- 'doc_7392_rev3.pdf',
- 'report_x29_final.pdf',
- 'memo_2024_05_12.pdf',
- 'file_089_update.pdf',
- 'proj_alpha_v2.pdf',
- 'data_analysis_q2.pdf',
- 'notes_meeting_52.pdf',
- 'summary_fy24_draft.pdf',
- ],
- };
- });
+Open a terminal and run the `create cloudflare` (C3) CLI tool to create your Worker project:
- const apiResponse = await step.do('some other step', async () => {
- let resp = await fetch('https://api.cloudflare.com/client/v4/ips');
- return await resp.json();
- });
+
- await step.sleep('wait on something', '1 minute');
-
- await step.do(
- 'make a call to write that could maybe, just might, fail',
- // Define a retry strategy
- {
- retries: {
- limit: 5,
- delay: '5 second',
- backoff: 'exponential',
- },
- timeout: '15 minutes',
- },
- async () => {
- // Do stuff here, with access to the state from our previous steps
- if (Math.random() > 0.5) {
- throw new Error('API call to $STORAGE_SYSTEM failed');
- }
- },
- );
- }
-}
-```
+
-A Workflow definition:
+Move into your new project directory:
-1. Defines a `run` method that contains the primary logic for your workflow.
-2. Has at least one or more calls to `step.do` that encapsulates the logic of your Workflow.
-3. Allows steps to return (optional) state, allowing a Workflow to continue execution even if subsequent steps fail, without having to re-run all previous steps.
-
-A single Worker application can contain multiple Workflow definitions, as long as each Workflow has a unique class name. This can be useful for code re-use or to define Workflows which are related to each other conceptually.
+```sh
+cd my-workflow
+```
-Each Workflow is otherwise entirely independent: a Worker that defines multiple Workflows is no different from a set of Workers that define one Workflow each.
+
-## 2. Create your Workflows steps
+In your project directory, C3 will have generated the following:
-Each `step` in a Workflow is an independently retriable function.
+- `wrangler.jsonc`: Your [Wrangler configuration file](/workers/wrangler/configuration/#sample-wrangler-configuration).
+- `src/index.ts`: A minimal Worker written in TypeScript.
+- `package.json`: A minimal Node dependencies configuration file.
+- `tsconfig.json`: TypeScript configuration.
-A `step` is what makes a Workflow powerful, as you can encapsulate errors and persist state as your Workflow progresses from step to step, avoiding your application from having to start from scratch on failure and ultimately build more reliable applications.
+
-* A step can execute code (`step.do`) or sleep a Workflow (`step.sleep`).
-* If a step fails (throws an exception), it will be automatically be retried based on your retry logic.
-* If a step succeeds, any state it returns will be persisted within the Workflow.
+## 2. Write your Workflow
-At its most basic, a step looks like this:
+Create a new file `src/workflow.ts`:
-```ts title="src/index.ts"
-// Import the Workflow definition
-import { WorkflowEntrypoint, WorkflowEvent, WorkflowStep } from "cloudflare:workers"
+```ts title="src/workflow.ts"
+import { WorkflowEntrypoint, WorkflowStep } from "cloudflare:workers";
+import type { WorkflowEvent } from "cloudflare:workers";
-type Params = {}
+type Params = { name: string };
+type IPResponse = { result: { ipv4_cidrs: string[] } };
-// Create your own class that implements a Workflow
export class MyWorkflow extends WorkflowEntrypoint {
- // Define a run() method
- async run(event: WorkflowEvent, step: WorkflowStep) {
- // Define one or more steps that optionally return state.
- let state = step.do("my first step", async () => {
+ async run(event: WorkflowEvent, step: WorkflowStep) {
+ const data = await step.do("fetch data", async () => {
+ const response = await fetch("https://api.cloudflare.com/client/v4/ips");
+ return await response.json();
+ });
- })
+ await step.sleep("pause", "20 seconds");
- step.do("my second step", async () => {
+ const result = await step.do(
+ "process data",
+ { retries: { limit: 3, delay: "5 seconds", backoff: "linear" } },
+ async () => {
+ return {
+ name: event.payload.name,
+ ipCount: data.result.ipv4_cidrs.length,
+ };
+ },
+ );
- })
- }
+ return result;
+ }
}
```
-Each call to `step.do` accepts three arguments:
-
-1. (Required) A step name, which identifies the step in logs and telemetry
-2. (Required) A callback function that contains the code to run for your step, and any state you want the Workflow to persist
-3. (Optional) A `StepConfig` that defines the retry configuration (max retries, delay, and backoff algorithm) for the step
-
-When trying to decide whether to break code up into more than one step, a good rule of thumb is to ask "do I want _all_ of this code to run again if just one part of it fails?". In many cases, you do _not_ want to repeatedly call an API if the following data processing stage fails, or if you get an error when attempting to send a completion or welcome email.
-
-For example, each of the below tasks is ideally encapsulated in its own step, so that any failure — such as a file not existing, a third-party API being down or rate limited — does not cause your entire program to fail.
+A Workflow extends `WorkflowEntrypoint` and implements a `run` method. This code also passes in our `Params` type as a [type parameter](/workflows/build/events-and-parameters/) so that events that trigger our Workflow are typed.
-* Reading or writing files from [R2](/r2/)
-* Running an AI task using [Workers AI](/workers-ai/)
-* Querying a [D1 database](/d1/) or a database via [Hyperdrive](/hyperdrive/)
-* Calling a third-party API
+The [`step`](/workflows/build/workers-api/#step) object is the core of the Workflows API. It provides methods to define durable steps in your Workflow:
-If a subsequent step fails, your Workflow can retry from that step, using any state returned from a previous step. This can also help you avoid unnecessarily querying a database or calling an paid API repeatedly for data you have already fetched.
+- `step.do(name, callback)` - Executes code and persists the result. If the Workflow is interrupted or retried, it resumes from the last successful step rather than re-running completed work.
+- `step.sleep(name, duration)` - Pauses the Workflow for a duration (e.g., `"10 seconds"`, `"1 hour"`).
-:::note
-
-The term "Durable Execution" is widely used to describe this programming model.
+You can pass a [retry configuration](/workflows/build/sleeping-and-retrying/) to `step.do()` to customize how failures are handled. See the [full step API](/workflows/build/workers-api/#step) for additional methods like `sleepUntil` and `waitForEvent`.
-"Durable" describes the ability of the program (application) to implicitly persist state without you having to manually write to an external store or serialize program state.
+When deciding whether to break code into separate steps, ask yourself: "Do I want all of this code to run again if just one part fails?" Separate steps are ideal for operations like calling external APIs, querying databases, or reading files from storage — if a later step fails, your Workflow can retry from that point using data already fetched, avoiding redundant API calls or database queries.
-:::
+For more guidance on how to define your Workflow logic, refer to [Rules of Workflows](/workflows/build/rules-of-workflows/).
## 3. Configure your Workflow
-Before you can deploy a Workflow, you need to configure it.
-
-Open the Wrangler file at the root of your `workflows-starter` folder, which contains the following `[[workflows]]` configuration:
+Open `wrangler.jsonc`, which is your [Wrangler configuration file](/workers/wrangler/configuration/) for your Workers project and your Workflow, and add the `workflows` configuration:
-```toml title="wrangler.toml"
-#:schema node_modules/wrangler/config-schema.json
-name = "workflows-starter"
-main = "src/index.ts"
-compatibility_date = "2024-10-22"
-
-[[workflows]]
-# name of your workflow
-name = "workflows-starter"
-# binding name env.MY_WORKFLOW
-binding = "MY_WORKFLOW"
-# this is class that extends the Workflow class in src/index.ts
-class_name = "MyWorkflow"
+```json title="wrangler.jsonc"
+{
+ "$schema": "node_modules/wrangler/config-schema.json",
+ "name": "my-workflow",
+ "main": "src/index.ts",
+ "compatibility_date": "2025-12-21",
+ "observability": {
+ "enabled": true
+ },
+ "workflows": [
+ {
+ "name": "my-workflow",
+ "binding": "MY_WORKFLOW",
+ "class_name": "MyWorkflow"
+ }
+ ]
+}
```
-:::note
-
-If you have changed the name of the Workflow in your Wrangler commands, the JavaScript class name, or the name of the project you created, ensure that you update the values above to match the changes.
-
-:::
+The `class_name` must match your exported class, and `binding` is the variable name you use to access the Workflow in your code (like `env.MY_WORKFLOW`).
-This configuration tells the Workers platform which JavaScript class represents your Workflow, and sets a `binding` name that allows you to run the Workflow from other handlers or to call into Workflows from other Workers scripts.
+You can also access [bindings](/workers/runtime-apis/bindings/) (such as [KV](/kv/), [R2](/r2/), or [D1](/d1/)) via `this.env` within your Workflow. For more information on bindings within Workers, refer to [Bindings (env)](/workers/runtime-apis/bindings/).
-## 4. Bind to your Workflow
+Now, generate types for your bindings:
-We have a very basic Workflow definition, but now need to provide a way to call it from within our code. A Workflow can be triggered by:
-
-1. External HTTP requests via a `fetch()` handler
-2. Messages from a [Queue](/queues/)
-3. A schedule via [Cron Trigger](/workers/configuration/cron-triggers/)
-4. Via the [Workflows REST API](/api/resources/workflows/methods/list/) or [wrangler CLI](/workers/wrangler/commands/#workflows)
-
-Return to the `src/index.ts` file we created in the previous step and add a `fetch` handler that _binds_ to our Workflow. This binding allows us to create new Workflow instances, fetch the status of an existing Workflow, pause and/or terminate a Workflow.
-
-```ts title="src/index.ts"
-// This is in the same file as your Workflow definition
-
-export default {
- async fetch(req: Request, env: Env): Promise {
- let url = new URL(req.url);
-
- if (url.pathname.startsWith('/favicon')) {
- return Response.json({}, { status: 404 });
- }
-
- // Get the status of an existing instance, if provided
- let id = url.searchParams.get('instanceId');
- if (id) {
- let instance = await env.MY_WORKFLOW.get(id);
- return Response.json({
- status: await instance.status(),
- });
- }
-
- // Spawn a new instance and return the ID and status
- let instance = await env.MY_WORKFLOW.create();
- return Response.json({
- id: instance.id,
- details: await instance.status(),
- });
- },
-};
+```sh
+npx wrangler types
```
-The code here exposes a HTTP endpoint that generates a random ID and runs the Workflow, returning the ID and the Workflow status. It also accepts an optional `instanceId` query parameter that retrieves the status of a Workflow instance by its ID.
+This creates a `worker-configuration.d.ts` file with the `Env` type that includes your `MY_WORKFLOW` binding.
-:::note
+## 4. Write your API
-In a production application, you might choose to put authentication in front of your endpoint so that only authorized users can run a Workflow. Alternatively, you could pass messages to a Workflow [from a Queue consumer](/queues/reference/how-queues-works/#consumers) in order to allow for long-running tasks.
-
-:::
-
-### Review your Workflow code
-
-:::note
-
-This is the full contents of the `src/index.ts` file pulled down when you used the `cloudflare/workflows-starter` template at the beginning of this guide.
-
-:::
-
-Before you deploy, you can review the full Workflows code and the `fetch` handler that will allow you to trigger your Workflow over HTTP:
+Now, you'll need a place to call your Workflow. Replace `src/index.ts` with a [fetch handler](/workers/runtime-apis/handlers/fetch/) to start and check Workflow instances:
```ts title="src/index.ts"
-import { WorkflowEntrypoint, WorkflowStep, WorkflowEvent } from 'cloudflare:workers';
-
-type Env = {
- // Add your bindings here, e.g. Workers KV, D1, Workers AI, etc.
- MY_WORKFLOW: Workflow;
-};
-
-// User-defined params passed to your workflow
-type Params = {
- email: string;
- metadata: Record;
-};
-
-export class MyWorkflow extends WorkflowEntrypoint {
- async run(event: WorkflowEvent, step: WorkflowStep) {
- // Can access bindings on `this.env`
- // Can access params on `event.payload`
-
- const files = await step.do('my first step', async () => {
- // Fetch a list of files from $SOME_SERVICE
- return {
- files: [
- 'doc_7392_rev3.pdf',
- 'report_x29_final.pdf',
- 'memo_2024_05_12.pdf',
- 'file_089_update.pdf',
- 'proj_alpha_v2.pdf',
- 'data_analysis_q2.pdf',
- 'notes_meeting_52.pdf',
- 'summary_fy24_draft.pdf',
- ],
- };
- });
-
- const apiResponse = await step.do('some other step', async () => {
- let resp = await fetch('https://api.cloudflare.com/client/v4/ips');
- return await resp.json();
- });
-
- await step.sleep('wait on something', '1 minute');
-
- await step.do(
- 'make a call to write that could maybe, just might, fail',
- // Define a retry strategy
- {
- retries: {
- limit: 5,
- delay: '5 second',
- backoff: 'exponential',
- },
- timeout: '15 minutes',
- },
- async () => {
- // Do stuff here, with access to the state from our previous steps
- if (Math.random() > 0.5) {
- throw new Error('API call to $STORAGE_SYSTEM failed');
- }
- },
- );
- }
-}
+export { MyWorkflow } from "./workflow";
export default {
- async fetch(req: Request, env: Env): Promise {
- let url = new URL(req.url);
+ async fetch(request: Request, env: Env): Promise {
+ const url = new URL(request.url);
+ const instanceId = url.searchParams.get("instanceId");
- if (url.pathname.startsWith('/favicon')) {
- return Response.json({}, { status: 404 });
+ if (instanceId) {
+ const instance = await env.MY_WORKFLOW.get(instanceId);
+ return Response.json(await instance.status());
}
- // Get the status of an existing instance, if provided
- let id = url.searchParams.get('instanceId');
- if (id) {
- let instance = await env.MY_WORKFLOW.get(id);
- return Response.json({
- status: await instance.status(),
- });
- }
-
- // Spawn a new instance and return the ID and status
- let instance = await env.MY_WORKFLOW.create();
- return Response.json({
- id: instance.id,
- details: await instance.status(),
- });
+ const instance = await env.MY_WORKFLOW.create();
+ return Response.json({ instanceId: instance.id });
},
-};
+} satisfies ExportedHandler;
```
-## 5. Deploy your Workflow
+## 5. Develop locally
-Deploying a Workflow is identical to deploying a Worker.
+Start a local development server:
```sh
-npx wrangler deploy
-```
-```sh output
-# Note the "Workflows" binding mentioned here, showing that
-# wrangler has detected your Workflow
-Your worker has access to the following bindings:
-- Workflows:
- - MY_WORKFLOW: MyWorkflow (defined in workflows-starter)
-Uploaded workflows-starter (2.53 sec)
-Deployed workflows-starter triggers (1.12 sec)
- https://workflows-starter.YOUR_WORKERS_SUBDOMAIN.workers.dev
- workflow: workflows-starter
+npx wrangler dev
```
-A Worker with a valid Workflow definition will be automatically registered by Workflows. You can list your current Workflows using Wrangler:
+To start a Workflow instance, open a new terminal window and run:
```sh
-npx wrangler workflows list
-```
-```sh output
-Showing last 1 workflow:
-┌───────────────────┬───────────────────┬────────────┬─────────────────────────┬─────────────────────────┐
-│ Name │ Script name │ Class name │ Created │ Modified │
-├───────────────────┼───────────────────┼────────────┼─────────────────────────┼─────────────────────────┤
-│ workflows-starter │ workflows-starter │ MyWorkflow │ 10/23/2024, 11:33:58 AM │ 10/23/2024, 11:33:58 AM │
-└───────────────────┴───────────────────┴────────────┴─────────────────────────┴─────────────────────────┘
+curl http://localhost:8787
```
-:::note
+An `instanceId` will be automatically generated:
-Workflows cannot be deployed to Workers for Platforms namespaces, as Workflows do not support Workers for Platforms.
-
-:::
-
-## 6. Run and observe your Workflow
-
-With your Workflow deployed, you can now run it.
-
-1. A Workflow can run in parallel: each unique invocation of a Workflow is an _instance_ of that Workflow.
-2. An instance will run to completion (success or failure).
-3. Deploying newer versions of a Workflow will cause all instances after that point to run the newest Workflow code.
-
-:::note
-
-Because Workflows can be long running, it is possible to have running instances that represent different versions of your Workflow code over time.
-
-:::
-
-To trigger our Workflow, we will use the `wrangler` CLI and pass in an optional `--payload`. The `payload` will be passed to your Workflow's `run` method handler as an `Event`.
-
-```sh
-npx wrangler workflows trigger workflows-starter '{"hello":"world"}'
-```
-```sh output
-# Workflow instance "12dc179f-9f77-4a37-b973-709dca4189ba" has been queued successfully
+```json output
+{ "instanceId": "abc-123-def" }
```
-To inspect the current status of the Workflow instance we just triggered, we can either reference it by ID or by using the keyword `latest`:
+Check the status using the returned `instanceId`:
```sh
-npx wrangler@latest workflows instances describe workflows-starter latest
-# Or by ID:
-# npx wrangler@latest workflows instances describe workflows-starter 12dc179f-9f77-4a37-b973-709dca4189ba
-```
-```sh output
-Workflow Name: workflows-starter
-Instance Id: f72c1648-dfa3-45ea-be66-b43d11d216f8
-Version Id: cedc33a0-11fa-4c26-8a8e-7d28d381a291
-Status: ✅ Completed
-Trigger: 🌎 API
-Queued: 10/15/2024, 1:55:31 PM
-Success: ✅ Yes
-Start: 10/15/2024, 1:55:31 PM
-End: 10/15/2024, 1:56:32 PM
-Duration: 1 minute
-Last Successful Step: make a call to write that could maybe, just might, fail-1
-Steps:
-
- Name: my first step-1
- Type: 🎯 Step
- Start: 10/15/2024, 1:55:31 PM
- End: 10/15/2024, 1:55:31 PM
- Duration: 0 seconds
- Success: ✅ Yes
- Output: "{\"inputParams\":[{\"timestamp\":\"2024-10-15T13:55:29.363Z\",\"payload\":{\"hello\":\"world\"}}],\"files\":[\"doc_7392_rev3.pdf\",\"report_x29_final.pdf\",\"memo_2024_05_12.pdf\",\"file_089_update.pdf\",\"proj_alpha_v2.pdf\",\"data_analysis_q2.pdf\",\"notes_meeting_52.pdf\",\"summary_fy24_draft.pdf\",\"plan_2025_outline.pdf\"]}"
-┌────────────────────────┬────────────────────────┬───────────┬────────────┐
-│ Start │ End │ Duration │ State │
-├────────────────────────┼────────────────────────┼───────────┼────────────┤
-│ 10/15/2024, 1:55:31 PM │ 10/15/2024, 1:55:31 PM │ 0 seconds │ ✅ Success │
-└────────────────────────┴────────────────────────┴───────────┴────────────┘
-
- Name: some other step-1
- Type: 🎯 Step
- Start: 10/15/2024, 1:55:31 PM
- End: 10/15/2024, 1:55:31 PM
- Duration: 0 seconds
- Success: ✅ Yes
- Output: "{\"result\":{\"ipv4_cidrs\":[\"173.245.48.0/20\",\"103.21.244.0/22\",\"103.22.200.0/22\",\"103.31.4.0/22\",\"141.101.64.0/18\",\"108.162.192.0/18\",\"190.93.240.0/20\",\"188.114.96.0/20\",\"197.234.240.0/22\",\"198.41.128.0/17\",\"162.158.0.0/15\",\"104.16.0.0/13\",\"104.24.0.0/14\",\"172.64.0.0/13\",\"131.0.72.0/22\"],\"ipv6_cidrs\":[\"2400:cb00::/32\",\"2606:4700::/32\",\"2803:f800::/32\",\"2405:b500::/32\",\"2405:8100::/32\",\"2a06:98c0::/29\",\"2c0f:f248::/32\"],\"etag\":\"38f79d050aa027e3be3865e495dcc9bc\"},\"success\":true,\"errors\":[],\"messages\":[]}"
-┌────────────────────────┬────────────────────────┬───────────┬────────────┐
-│ Start │ End │ Duration │ State │
-├────────────────────────┼────────────────────────┼───────────┼────────────┤
-│ 10/15/2024, 1:55:31 PM │ 10/15/2024, 1:55:31 PM │ 0 seconds │ ✅ Success │
-└────────────────────────┴────────────────────────┴───────────┴────────────┘
-
- Name: wait on something-1
- Type: 💤 Sleeping
- Start: 10/15/2024, 1:55:31 PM
- End: 10/15/2024, 1:56:31 PM
- Duration: 1 minute
-
- Name: make a call to write that could maybe, just might, fail-1
- Type: 🎯 Step
- Start: 10/15/2024, 1:56:31 PM
- End: 10/15/2024, 1:56:32 PM
- Duration: 1 second
- Success: ✅ Yes
- Output: null
-┌────────────────────────┬────────────────────────┬───────────┬────────────┬───────────────────────────────────────────┐
-│ Start │ End │ Duration │ State │ Error │
-├────────────────────────┼────────────────────────┼───────────┼────────────┼───────────────────────────────────────────┤
-│ 10/15/2024, 1:56:31 PM │ 10/15/2024, 1:56:31 PM │ 0 seconds │ ❌ Error │ Error: API call to $STORAGE_SYSTEM failed │
-├────────────────────────┼────────────────────────┼───────────┼────────────┼───────────────────────────────────────────┤
-│ 10/15/2024, 1:56:32 PM │ 10/15/2024, 1:56:32 PM │ 0 seconds │ ✅ Success │ │
-└────────────────────────┴────────────────────────┴───────────┴────────────┴───────────────────────────────────────────┘
+curl "http://localhost:8787?instanceId=abc-123-def"
```
-From the output above, we can inspect:
+The Workflow will progress through its steps. After about 20 seconds (the sleep duration), it will complete.
-* The status (success, failure, running) of each step
-* Any state emitted by the step
-* Any `sleep` state, including when the Workflow will wake up
-* Retries associated with each step
-* Errors, including exception messages
-
-:::note
-
-You do not have to wait for a Workflow instance to finish executing to inspect its current status. The `wrangler workflows instances describe` sub-command will show the status of an in-progress instance, including any persisted state, if it is sleeping, and any errors or retries. This can be especially useful when debugging a Workflow during development.
-
-:::
-
-In the previous step, we also bound a Workers script to our Workflow. You can trigger a Workflow by visiting the (deployed) Workers script in a browser or with any HTTP client.
+## 6. Deploy your Workflow
```sh
-# This must match the URL provided in step 6
-curl -s https://workflows-starter.YOUR_WORKERS_SUBDOMAIN.workers.dev/
-```
-```sh output
-{"id":"16ac31e5-db9d-48ae-a58f-95b95422d0fa","details":{"status":"queued","error":null,"output":null}}
+npx wrangler deploy
```
-{/*
+Test in production using the same curl commands against your deployed URL. You can also [trigger a workflow instance](/workflows/build/trigger-workflows/) in production via Workers, Wrangler, or the Cloudflare dashboard.
-## 7. (Optional) Clean up
-
-You can optionally delete the Workflow, which will prevent the creation of any (all) instances by using `wrangler`:
+Once deployed, you can also inspect Workflow instances with the CLI:
```sh
-npx wrangler workflows delete my-workflow
+npx wrangler workflows instances describe my-workflow latest
```
-Re-deploying the Workers script containing your Workflow code will re-create the Workflow.
-
-*/}
-
----
-
-## Next steps
-
-* Learn more about [how events are passed to a Workflow](/workflows/build/events-and-parameters/).
-* Learn more about binding to and triggering Workflow instances using the [Workers API](/workflows/build/workers-api/).
-* Learn more about the [Rules of Workflows](/workflows/build/rules-of-workflows/) and best practices for building applications using Workflows.
-
-If you have any feature requests or notice any bugs, share your feedback directly with the Cloudflare team by joining the [Cloudflare Developers community on Discord](https://discord.cloudflare.com).
+The output of `instances describe` shows:
+
+- The status (success, failure, running) of each step
+- Any state emitted by the step
+- Any `sleep` state, including when the Workflow will wake up
+- Retries associated with each step
+- Errors, including exception messages
+
+## Learn more
+
+
+
+
+
+
+
+
diff --git a/src/content/docs/workflows/get-started/index.mdx b/src/content/docs/workflows/get-started/index.mdx
index 3d5d65fb0aba4a..2413de6193b8e9 100644
--- a/src/content/docs/workflows/get-started/index.mdx
+++ b/src/content/docs/workflows/get-started/index.mdx
@@ -5,9 +5,8 @@ sidebar:
order: 1
group:
hideIndex: true
-
---
-import { DirectoryListing } from "~/components"
+import { DirectoryListing } from "~/components";
diff --git a/src/content/docs/workflows/python/bindings.mdx b/src/content/docs/workflows/python/bindings.mdx
index 6c693048681873..43bb88d8d3c047 100644
--- a/src/content/docs/workflows/python/bindings.mdx
+++ b/src/content/docs/workflows/python/bindings.mdx
@@ -96,8 +96,9 @@ The `create` method returns a [`WorkflowInstance`](/workflows/build/workers-api/
Create (trigger) a batch of new workflow instances, up to 100 instances at a time. This is useful if you need to create multiple instances at once within the [instance creation limit](/workflows/reference/limits/).
-* create_batch(batch)
- * `batch` - list of `WorkflowInstanceCreateOptions` to pass when creating an instance, including a user-provided ID and payload parameters.
+- create_batch(batch)* `batch` - list of
+ `WorkflowInstanceCreateOptions` to pass when creating an instance, including a
+ user-provided ID and payload parameters.
Each element of the `batch` list is expected to include both `id` and `params` properties:
@@ -160,4 +161,4 @@ Refer to the [Workflows REST API documentation](/api/resources/workflows/subreso
## Command line (CLI)
-Refer to the [CLI quick start](/workflows/get-started/cli-quick-start/) to learn more about how to manage and trigger Workflows via the command-line.
+Refer to the [CLI quick start](/workflows/get-started/guide/) to learn more about how to manage and trigger Workflows via the command-line.
diff --git a/src/content/docs/workflows/reference/limits.mdx b/src/content/docs/workflows/reference/limits.mdx
index c7c2028b308725..193d7bd39ed309 100644
--- a/src/content/docs/workflows/reference/limits.mdx
+++ b/src/content/docs/workflows/reference/limits.mdx
@@ -11,6 +11,12 @@ Limits that apply to authoring, deploying, and running Workflows are detailed be
Many limits are inherited from those applied to Workers scripts and as documented in the [Workers limits](/workers/platform/limits/) documentation.
+:::note
+
+Workflows cannot be deployed to Workers for Platforms namespaces, as Workflows do not support Workers for Platforms.
+
+:::
+
| Feature | Workers Free | Workers Paid |
| --------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- |
| Workflow class definitions per script | 3MB max script size per [Worker size limits](/workers/platform/limits/#account-plan-limits) | 10MB max script size per [Worker size limits](/workers/platform/limits/#account-plan-limits) |