diff --git a/js/packages/teams-ai/src/StreamingResponse.spec.ts b/js/packages/teams-ai/src/StreamingResponse.spec.ts index c4279eb06..f0994b210 100644 --- a/js/packages/teams-ai/src/StreamingResponse.spec.ts +++ b/js/packages/teams-ai/src/StreamingResponse.spec.ts @@ -2,6 +2,7 @@ import assert from 'assert'; import { TestAdapter } from 'botbuilder'; import { CardFactory } from 'botbuilder-core'; import { StreamingResponse } from './StreamingResponse'; +import { Citation } from './prompts/Message'; describe('StreamingResponse', function () { describe('constructor()', () => { @@ -163,6 +164,34 @@ describe('StreamingResponse', function () { }); }); + it('should send a final message with text and citations', async () => { + const adapter = new TestAdapter(); + await adapter.sendTextToBot('test', async (context) => { + const response = new StreamingResponse(context); + response.queueTextChunk('first', [ + { content: 'test-content', url: 'https://example.com', title: 'test', filepath: 'test' } as Citation + ]); + response.queueTextChunk('second'); + await response.waitForQueue(); + await response.endStream(); + assert(response.updatesSent == 2, 'updatesSent should be 2'); + assert(response.citations == undefined, 'no citations matched'); + + // Validate sent activities + const activities = adapter.activeQueue; + assert.equal(activities.length, 3, 'should have sent 3 activities'); + assert.equal(activities[0].channelData.streamSequence, 1, 'first activity streamSequence should be 1'); + assert.equal(activities[1].channelData.streamSequence, 2, 'second activity streamSequence should be 2'); + assert.equal(activities[2].type, 'message', 'final activity type should be "message"'); + assert.equal(activities[2].text, 'firstsecond', 'final activity text should be "firstsecond"'); + assert.deepEqual( + activities[2].channelData, + { streamType: 'final', streamId: response.streamId, feedbackLoopEnabled: false }, + 'final activity channelData should match' + ); + }); + }); + it('should send a final message with powered by AI features', async () => { const adapter = new TestAdapter(); await adapter.sendTextToBot('test', async (context) => { @@ -209,7 +238,9 @@ describe('StreamingResponse', function () { '@type': 'Message', '@context': 'https://schema.org', '@id': '', - additionalType: ['AIGeneratedContent'] + additionalType: ['AIGeneratedContent'], + citation: [], + usageInfo: undefined } ], 'final activity entities obj should match' diff --git a/js/packages/teams-ai/src/StreamingResponse.ts b/js/packages/teams-ai/src/StreamingResponse.ts index 420efb2ee..a51a3e9bf 100644 --- a/js/packages/teams-ai/src/StreamingResponse.ts +++ b/js/packages/teams-ai/src/StreamingResponse.ts @@ -7,7 +7,9 @@ */ import { Activity, Attachment, TurnContext, Entity } from 'botbuilder-core'; -import { AIEntity } from './types'; +import { AIEntity, ClientCitation, SensitivityUsageInfo } from './types'; +import { Citation } from './prompts/Message'; +import { Utilities } from './Utilities'; /** * A helper class for streaming responses to the client. @@ -35,6 +37,8 @@ export class StreamingResponse { // Powered by AI feature flags private _enableFeedbackLoop = false; private _enableGeneratedByAILabel = false; + private _citations?: ClientCitation[] = []; + private _sensitivityLabel?: SensitivityUsageInfo; /** * Creates a new StreamingResponse instance. @@ -55,6 +59,13 @@ export class StreamingResponse { return this._streamId; } + /** + * Gets the citations of the current response. + */ + public get citations(): ClientCitation[] | undefined { + return this._citations; + } + /** * Gets the number of updates sent for the stream. * @returns {number} - The number of updates sent for the stream. @@ -89,8 +100,9 @@ export class StreamingResponse { * The text we be sent as quickly as possible to the client. Chunks may be combined before * delivery to the client. * @param {string} text Partial text of the message to send. + * @param {Citation[]} citations Citations to be included in the message. */ - public queueTextChunk(text: string): void { + public queueTextChunk(text: string, citations?: Citation[]): void { if (this._ended) { throw new Error('The stream has already ended.'); } @@ -98,6 +110,34 @@ export class StreamingResponse { // Update full message text this._message += text; + if (citations && citations.length > 0) { + if (!this._citations) { + this._citations = []; + } + let currPos = this._citations.length; + + for (const citation of citations) { + const clientCitation: ClientCitation = { + '@type': 'Claim', + position: `${currPos + 1}`, + appearance: { + '@type': 'DigitalDocument', + name: citation.title || `Document #${currPos + 1}`, + abstract: Utilities.snippet(citation.content, 477) + } + }; + currPos++; + this._citations.push(clientCitation); + } + + // If there are citations, modify the content so that the sources are numbers instead of [doc1], [doc2], etc. + this._message = + this._citations.length == 0 ? this._message : Utilities.formatCitationsResponse(this._message); + + // If there are citations, filter out the citations unused in content. + this._citations = Utilities.getUsedCitations(this._message, this._citations) ?? undefined; + } + // Queue the next chunk this.queueNextChunk(); } @@ -127,6 +167,14 @@ export class StreamingResponse { this._attachments = attachments; } + /** + * Sets the sensitivity label to attach to the final chunk. + * @param sensitivityLabel The sensitivty label. + */ + public setSensitivityLabel(sensitivityLabel: SensitivityUsageInfo): void { + this._sensitivityLabel = sensitivityLabel; + } + /** * Sets the Feedback Loop in Teams that allows a user to * give thumbs up or down to a response. @@ -279,7 +327,9 @@ export class StreamingResponse { '@type': 'Message', '@context': 'https://schema.org', '@id': '', - additionalType: ['AIGeneratedContent'] + additionalType: ['AIGeneratedContent'], + citation: this._citations && this._citations.length > 0 ? this._citations : [], + usageInfo: this._sensitivityLabel } as AIEntity); } } diff --git a/js/packages/teams-ai/src/models/OpenAIModel.spec.ts b/js/packages/teams-ai/src/models/OpenAIModel.spec.ts index 390e2d9a0..6b7198f76 100644 --- a/js/packages/teams-ai/src/models/OpenAIModel.spec.ts +++ b/js/packages/teams-ai/src/models/OpenAIModel.spec.ts @@ -98,21 +98,23 @@ describe('OpenAIModel', () => { }); const mockResponse = { - choices: [{ - message: { - role: 'assistant', - content: 'Test response', - context: { - citations: [ - { - content: 'Citation content', - title: 'Citation title', - url: 'https://citation.url' - } - ] + choices: [ + { + message: { + role: 'assistant', + content: 'Test response', + context: { + citations: [ + { + content: 'Citation content', + title: 'Citation title', + url: 'https://citation.url' + } + ] + } } } - }] + ] }; // Mock the API call diff --git a/js/packages/teams-ai/src/moderators/AzureContentSafetyModerator.spec.ts b/js/packages/teams-ai/src/moderators/AzureContentSafetyModerator.spec.ts index 07715458f..4627175a6 100644 --- a/js/packages/teams-ai/src/moderators/AzureContentSafetyModerator.spec.ts +++ b/js/packages/teams-ai/src/moderators/AzureContentSafetyModerator.spec.ts @@ -119,10 +119,12 @@ describe('AzureContentSafetyModerator', () => { status: '200', statusText: 'OK', data: { - categoriesAnalysis: [{ - category: 'Hate', - severity: 1 - }] + categoriesAnalysis: [ + { + category: 'Hate', + severity: 1 + } + ] } }) ); @@ -170,10 +172,12 @@ describe('AzureContentSafetyModerator', () => { status: '200', statusText: 'OK', data: { - categoriesAnalysis: [{ - category: 'Hate', - severity: 7 - }] + categoriesAnalysis: [ + { + category: 'Hate', + severity: 7 + } + ] } }) ); diff --git a/js/packages/teams-ai/src/moderators/AzureContentSafetyModerator.ts b/js/packages/teams-ai/src/moderators/AzureContentSafetyModerator.ts index 8abc68908..87afd28ff 100644 --- a/js/packages/teams-ai/src/moderators/AzureContentSafetyModerator.ts +++ b/js/packages/teams-ai/src/moderators/AzureContentSafetyModerator.ts @@ -45,7 +45,7 @@ export interface AzureOpenAIModeratorOptions extends OpenAIModeratorOptions { /** * @deprecated * use `haltOnBlocklistHit` - * + * * When set to true, further analyses of harmful content will not be performed in cases where blocklists are hit. * When set to false, all analyses of harmful content will be performed, whether or not blocklists are hit. * Default value is false. @@ -160,9 +160,11 @@ export class AzureContentSafetyModerator e const predicate = (category: AzureOpenAIModeratorCategory) => { return (c: ContentSafetyHarmCategory) => { - return c.category === category && + return ( + c.category === category && c.severity > 0 && c.severity <= this._azureContentSafetyCategories[category].severity + ); }; }; diff --git a/js/packages/teams-ai/src/planners/LLMClient.ts b/js/packages/teams-ai/src/planners/LLMClient.ts index 6fdf5b350..50652c0da 100644 --- a/js/packages/teams-ai/src/planners/LLMClient.ts +++ b/js/packages/teams-ai/src/planners/LLMClient.ts @@ -327,8 +327,10 @@ export class LLMClient { // Send chunk to client const text = chunk.delta?.content ?? ''; + const citations = chunk.delta?.context?.citations ?? undefined; + if (text.length > 0) { - streamer.queueTextChunk(text); + streamer.queueTextChunk(text, citations); } }; diff --git a/js/packages/teams-ai/src/types/AIEntity.ts b/js/packages/teams-ai/src/types/AIEntity.ts index 3b982635a..3aabea204 100644 --- a/js/packages/teams-ai/src/types/AIEntity.ts +++ b/js/packages/teams-ai/src/types/AIEntity.ts @@ -7,6 +7,7 @@ */ import { ClientCitation } from './ClientCitation'; +import { SensitivityUsageInfo } from './SensitivityUsageInfo'; export interface AIEntity { /** @@ -38,4 +39,9 @@ export interface AIEntity { * Optional; if citations object is included, the sent activity will include the citations, referenced in the activity text. */ citation?: ClientCitation[]; + + /** + * Optional; if usage_info object is included, the sent activity will include the sensitivity usage information. + */ + usageInfo?: SensitivityUsageInfo; }