diff --git a/getting-started/CONCEPTS/STREAMING.md b/getting-started/CONCEPTS/STREAMING.md index c99ff131e..5ff1bd6b8 100644 --- a/getting-started/CONCEPTS/STREAMING.md +++ b/getting-started/CONCEPTS/STREAMING.md @@ -51,7 +51,10 @@ Once `endStream()` is called, the stream is considered ended and no further upda ### Current Limitations: - Streaming is only available in 1:1 chats. +- SendActivity requests are restricted to 1 RPS. Our SDK buffers to 1.5 seconds. +- For Powered by AI features, only the Feedback Loop and Generated by AI Label is currently supported. - Only rich text can be streamed. +- Due to future GA protocol changes, the `channelData` metadata must be included in the `entities` object as well. - Only one informative message can be set. This is reused for each message. - Examples include: - “Scanning through documents” @@ -70,7 +73,8 @@ You can configure streaming with your bot by following these steps: #### Optional additions: -- Set the informative message in the `ActionPlanner` declaration via the `StartStreamingMessage` config. +- Set the informative message in the `ActionPlanner` declaration via the `StartStreamingMessage` config. +- As previously, set the feedback loop toggle in the `AIOptions` object in the `app` declaration and specify a handler. - Set attachments in the final chunk via the `EndStreamHandler` in the `ActionPlanner` declaration. #### C# diff --git a/js/packages/teams-ai/src/AI.ts b/js/packages/teams-ai/src/AI.ts index 87cf4c7b1..d04bb3682 100644 --- a/js/packages/teams-ai/src/AI.ts +++ b/js/packages/teams-ai/src/AI.ts @@ -246,6 +246,13 @@ export class AI { return this._options.planner; } + /** + * @returns {boolean} Returns the feedback loop flag. + */ + public get enableFeedbackLoop(): boolean { + return this._options.enable_feedback_loop; + } + /** * Registers a handler for a named action. * @remarks diff --git a/js/packages/teams-ai/src/StreamingResponse.spec.ts b/js/packages/teams-ai/src/StreamingResponse.spec.ts index 70eadb147..c4279eb06 100644 --- a/js/packages/teams-ai/src/StreamingResponse.spec.ts +++ b/js/packages/teams-ai/src/StreamingResponse.spec.ts @@ -130,7 +130,11 @@ describe('StreamingResponse', function () { const activity = adapter.getNextReply(); assert.equal(activity.type, 'message', 'activity.type should be "message"'); assert.equal(activity.text, '', 'activity.text should be ""'); - assert.deepEqual(activity.channelData, { streamType: 'final' }, 'activity.channelData should match'); + assert.deepEqual( + activity.channelData, + { streamType: 'final', feedbackLoopEnabled: false }, + 'activity.channelData should match' + ); }); }); @@ -153,9 +157,63 @@ describe('StreamingResponse', function () { assert.equal(activities[2].text, 'firstsecond', 'final activity text should be "firstsecond"'); assert.deepEqual( activities[2].channelData, - { streamType: 'final', streamId: response.streamId }, + { streamType: 'final', streamId: response.streamId, feedbackLoopEnabled: false }, + 'final activity channelData should match' + ); + }); + }); + + it('should send a final message with powered by AI features', async () => { + const adapter = new TestAdapter(); + await adapter.sendTextToBot('test', async (context) => { + const response = new StreamingResponse(context); + response.queueTextChunk('first'); + response.queueTextChunk('second'); + response.setFeedbackLoop(true); + response.setGeneratedByAILabel(true); + await response.waitForQueue(); + await response.endStream(); + assert(response.updatesSent == 2, 'updatesSent should be 2'); + + // Validate sent activities + const activities = adapter.activeQueue; + assert.equal(activities.length, 3, 'should have sent 3 activities'); + assert.equal(activities[0].channelData.streamSequence, 1, 'first activity streamSequence should be 1'); + assert.equal(activities[0].entities!.length, 1, 'length of first activity entities should be 1'); + assert.deepEqual( + activities[0].entities, + [{ type: 'streaminfo', properties: { ...activities[0].channelData } }], + 'first activity entities should match' + ); + assert.equal(activities[1].channelData.streamSequence, 2, 'second activity streamSequence should be 2'); + assert.equal(activities[1].entities!.length, 1, 'length of second activity entities should be 1'); + assert.deepEqual( + activities[1].entities, + [{ type: 'streaminfo', properties: { ...activities[1].channelData } }], + 'second activity entities should match' + ); + assert.equal(activities[2].type, 'message', 'final activity type should be "message"'); + assert.equal(activities[2].text, 'firstsecond', 'final activity text should be "firstsecond"'); + + assert.deepEqual( + activities[2].channelData, + { streamType: 'final', streamId: response.streamId, feedbackLoopEnabled: true }, 'final activity channelData should match' ); + assert.deepEqual( + activities[2].entities, + [ + { type: 'streaminfo', properties: { streamType: 'final', streamId: response.streamId } }, + { + type: 'https://schema.org/Message', + '@type': 'Message', + '@context': 'https://schema.org', + '@id': '', + additionalType: ['AIGeneratedContent'] + } + ], + 'final activity entities obj should match' + ); }); }); @@ -191,7 +249,7 @@ describe('StreamingResponse', function () { assert.equal(activities[2].text, 'firstsecond', 'final activity text should be "firstsecond"'); assert.deepEqual( activities[2].channelData, - { streamType: 'final', streamId: response.streamId }, + { streamType: 'final', streamId: response.streamId, feedbackLoopEnabled: false }, 'final activity channelData should match' ); assert.notEqual(activities[2].attachments, null); diff --git a/js/packages/teams-ai/src/StreamingResponse.ts b/js/packages/teams-ai/src/StreamingResponse.ts index 82828a565..420efb2ee 100644 --- a/js/packages/teams-ai/src/StreamingResponse.ts +++ b/js/packages/teams-ai/src/StreamingResponse.ts @@ -6,7 +6,8 @@ * Licensed under the MIT License. */ -import { Activity, Attachment, TurnContext } from 'botbuilder-core'; +import { Activity, Attachment, TurnContext, Entity } from 'botbuilder-core'; +import { AIEntity } from './types'; /** * A helper class for streaming responses to the client. @@ -31,6 +32,10 @@ export class StreamingResponse { private _queueSync: Promise | undefined; private _chunkQueued = false; + // Powered by AI feature flags + private _enableFeedbackLoop = false; + private _enableGeneratedByAILabel = false; + /** * Creates a new StreamingResponse instance. * @param {TurnContext} context - Context for the current turn of conversation with the user. @@ -79,7 +84,7 @@ export class StreamingResponse { } /** - * Queues a chunk of partial message text to be sent to the client. + * Queues a chunk of partial message text to be sent to the client * @remarks * The text we be sent as quickly as possible to the client. Chunks may be combined before * delivery to the client. @@ -111,7 +116,7 @@ export class StreamingResponse { this.queueNextChunk(); // Wait for the queue to drain - return this._queueSync!; + return this.waitForQueue(); } /** @@ -122,6 +127,25 @@ export class StreamingResponse { this._attachments = attachments; } + /** + * Sets the Feedback Loop in Teams that allows a user to + * give thumbs up or down to a response. + * Default is `false`. + * @param enableFeedbackLoop If true, the feedback loop is enabled. + */ + public setFeedbackLoop(enableFeedbackLoop: boolean): void { + this._enableFeedbackLoop = enableFeedbackLoop; + } + + /** + * Sets the the Generated by AI label in Teams + * Default is `false`. + * @param enableGeneratedByAILabel If true, the label is added. + */ + public setGeneratedByAILabel(enableGeneratedByAILabel: boolean): void { + this._enableGeneratedByAILabel = enableGeneratedByAILabel; + } + /** * Returns the most recently streamed message. * @returns The streamed message. @@ -185,7 +209,10 @@ export class StreamingResponse { // If there's no sync in progress, start one if (!this._queueSync) { - this._queueSync = this.drainQueue(); + this._queueSync = this.drainQueue().catch((err) => { + console.error(`Error occured when sending activity while streaming: "${err}".`); + throw err; + }); } } @@ -195,7 +222,7 @@ export class StreamingResponse { * @private */ private drainQueue(): Promise { - return new Promise(async (resolve) => { + return new Promise(async (resolve, reject) => { try { while (this._queue.length > 0) { // Get next activity from queue @@ -207,6 +234,8 @@ export class StreamingResponse { } resolve(); + } catch (err) { + reject(err); } finally { // Queue is empty, mark as idle this._queueSync = undefined; @@ -227,8 +256,37 @@ export class StreamingResponse { activity.channelData = Object.assign({}, activity.channelData, { streamId: this._streamId }); } + activity.entities = [ + { + type: 'streaminfo', + properties: { + ...activity.channelData + } + } as Entity + ]; + + // Add in Powered by AI feature flags + if (this._ended) { + // Add in feedback loop + activity.channelData = Object.assign({}, activity.channelData, { + feedbackLoopEnabled: this._enableFeedbackLoop + }); + + // Add in Generated by AI + if (this._enableGeneratedByAILabel) { + activity.entities.push({ + type: 'https://schema.org/Message', + '@type': 'Message', + '@context': 'https://schema.org', + '@id': '', + additionalType: ['AIGeneratedContent'] + } as AIEntity); + } + } + // Send activity const response = await this._context.sendActivity(activity); + await new Promise((resolve) => setTimeout(resolve, 1.5)); // Save assigned stream ID if (!this._streamId) { diff --git a/js/packages/teams-ai/src/planners/ActionPlanner.ts b/js/packages/teams-ai/src/planners/ActionPlanner.ts index b9368c77a..aa1892d58 100644 --- a/js/packages/teams-ai/src/planners/ActionPlanner.ts +++ b/js/packages/teams-ai/src/planners/ActionPlanner.ts @@ -90,6 +90,11 @@ export interface ActionPlannerOptions { * Optional handler to run when a stream is about to conclude. */ endStreamHandler?: PromptCompletionModelResponseReceivedEvent; + + /** + * If true, the feedback loop will be enabled for streaming responses. + */ + enableFeedbackLoop?: boolean; } /** @@ -116,6 +121,7 @@ export class ActionPlanner implements Plan private readonly _options: ActionPlannerOptions; private readonly _promptFactory: ActionPlannerPromptFactory; private readonly _defaultPrompt?: string; + private _enableFeedbackLoop: boolean | undefined; /** * Creates a new `ActionPlanner` instance. @@ -187,6 +193,10 @@ export class ActionPlanner implements Plan // Identify the augmentation to use const augmentation = template.augmentation ?? new DefaultAugmentation(); + if (ai.enableFeedbackLoop != null) { + this._enableFeedbackLoop = ai.enableFeedbackLoop; + } + // Complete prompt const result = await this.completePrompt(context, state, template, augmentation); if (result.status != 'success') { @@ -265,7 +275,8 @@ export class ActionPlanner implements Plan max_repair_attempts: this._options.max_repair_attempts, logRepairs: this._options.logRepairs, startStreamingMessage: this._options.startStreamingMessage, - endStreamHandler: this._options.endStreamHandler + endStreamHandler: this._options.endStreamHandler, + enableFeedbackLoop: this._enableFeedbackLoop }); // Complete prompt diff --git a/js/packages/teams-ai/src/planners/LLMClient.spec.ts b/js/packages/teams-ai/src/planners/LLMClient.spec.ts index 8d07087cc..33200d860 100644 --- a/js/packages/teams-ai/src/planners/LLMClient.spec.ts +++ b/js/packages/teams-ai/src/planners/LLMClient.spec.ts @@ -114,7 +114,8 @@ describe('LLMClient', function () { const client = new LLMClient({ model: streamingModel, template, - startStreamingMessage: 'start' + startStreamingMessage: 'start', + enableFeedbackLoop: true }); const response = await client.completePrompt(context, state, functions); assert.equal(adapter.activeQueue.length, 4, 'adapter should have 4 messages in the queue'); diff --git a/js/packages/teams-ai/src/planners/LLMClient.ts b/js/packages/teams-ai/src/planners/LLMClient.ts index 2491ae9c5..6fdf5b350 100644 --- a/js/packages/teams-ai/src/planners/LLMClient.ts +++ b/js/packages/teams-ai/src/planners/LLMClient.ts @@ -97,6 +97,11 @@ export interface LLMClientOptions { * Optional handler to run when a stream is about to conclude. */ endStreamHandler?: PromptCompletionModelResponseReceivedEvent; + + /** + * If true, the feedback loop will be enabled for streaming responses. + */ + enableFeedbackLoop?: boolean; } /** @@ -200,6 +205,7 @@ export interface ConfiguredLLMClientOptions { export class LLMClient { private readonly _startStreamingMessage: string | undefined; private readonly _endStreamHandler: PromptCompletionModelResponseReceivedEvent | undefined; + private readonly _enableFeedbackLoop: boolean | undefined; /** * Configured options for this LLMClient instance. @@ -234,6 +240,7 @@ export class LLMClient { this._startStreamingMessage = options.startStreamingMessage; this._endStreamHandler = options.endStreamHandler; + this._enableFeedbackLoop = options.enableFeedbackLoop; } /** @@ -299,6 +306,13 @@ export class LLMClient { // Create streamer and send initial message streamer = new StreamingResponse(context); memory.setValue('temp.streamer', streamer); + + if (this._enableFeedbackLoop != null) { + streamer.setFeedbackLoop(this._enableFeedbackLoop); + } + + streamer.setGeneratedByAILabel(true); + if (this._startStreamingMessage) { streamer.queueInformativeUpdate(this._startStreamingMessage); } diff --git a/js/samples/04.ai-apps/i.teamsChefBot-streaming/src/index.ts b/js/samples/04.ai-apps/i.teamsChefBot-streaming/src/index.ts index 6c05beb2a..7ef8ff0f4 100644 --- a/js/samples/04.ai-apps/i.teamsChefBot-streaming/src/index.ts +++ b/js/samples/04.ai-apps/i.teamsChefBot-streaming/src/index.ts @@ -94,7 +94,7 @@ const model = new OpenAIModel({ // Azure OpenAI Support // azureApiKey: process.env.AZURE_OPENAI_KEY!, - // azureDefaultDeployment: 'gpt-3.5-turbo', + // azureDefaultDeployment: 'gpt-4o', // azureEndpoint: process.env.AZURE_OPENAI_ENDPOINT!, // azureApiVersion: '2023-03-15-preview', @@ -141,8 +141,9 @@ const storage = new MemoryStorage(); const app = new Application({ storage, ai: { - planner - } + planner, + enable_feedback_loop: true, + }, }); // Register your data source with planner @@ -173,6 +174,10 @@ app.ai.action(AI.FlaggedOutputActionName, async (context: TurnContext, state: Ap return AI.StopCommandName; }); +app.feedbackLoop(async (context, state, feedbackLoopData) => { + console.log("Feedback loop triggered"); + }); + // Listen for incoming server requests. server.post('/api/messages', async (req, res) => { // Route received a request to adapter for processing