Skip to content

Commit

Permalink
[C#] fix: content safety public preview deprecation (#2138)
Browse files Browse the repository at this point in the history
## Linked issues

closes: #minor

## Details
Deprecated public preview of azure content safety moderator.

#### Change details
* Updated `Azure.AI.ContentSafety` to `v1.0.0`
* Added chat moderation sample

## Attestation Checklist

- [x] My code follows the style guidelines of this project

- I have checked for/fixed spelling, linting, and other errors
- I have commented my code for clarity
- I have made corresponding changes to the documentation (updating the
doc strings in the code is sufficient)
- My changes generate no new warnings
- I have added tests that validates my changes, and provides sufficient
test coverage. I have tested with:
  - Local testing
  - E2E testing in Teams
- New and existing unit tests pass locally with my changes
  • Loading branch information
singhk97 authored Oct 23, 2024
1 parent a56fec2 commit b006d89
Show file tree
Hide file tree
Showing 29 changed files with 1,270 additions and 23 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,8 @@ public async Task Test_ReviewPrompt_Flagged(ModerationType moderate)
};

var clientMock = new Mock<ContentSafetyClient>(new Uri(endpoint), new AzureKeyCredential(apiKey));
AnalyzeTextResult analyzeTextResult = ContentSafetyModelFactory.AnalyzeTextResult(hateResult: ContentSafetyModelFactory.TextAnalyzeSeverityResult(TextCategory.Hate, 2));
var analyses = new List<TextCategoriesAnalysis>() { ContentSafetyModelFactory.TextCategoriesAnalysis(TextCategory.Hate, 2) };
AnalyzeTextResult analyzeTextResult = ContentSafetyModelFactory.AnalyzeTextResult(null, analyses);
Response? response = null;
clientMock.Setup(client => client.AnalyzeTextAsync(It.IsAny<AnalyzeTextOptions>(), It.IsAny<CancellationToken>())).ReturnsAsync(Response.FromValue(analyzeTextResult, response));

Expand Down Expand Up @@ -173,7 +174,8 @@ public async Task Test_ReviewPrompt_NotFlagged(ModerationType moderate)
};

var clientMock = new Mock<ContentSafetyClient>(new Uri(endpoint), new AzureKeyCredential(apiKey));
AnalyzeTextResult analyzeTextResult = ContentSafetyModelFactory.AnalyzeTextResult(hateResult: ContentSafetyModelFactory.TextAnalyzeSeverityResult(TextCategory.Hate, 0));
var analyses = new List<TextCategoriesAnalysis>() { ContentSafetyModelFactory.TextCategoriesAnalysis(TextCategory.Hate, 0) };
AnalyzeTextResult analyzeTextResult = ContentSafetyModelFactory.AnalyzeTextResult(null, analyses);
Response? response = null;
clientMock.Setup(client => client.AnalyzeTextAsync(It.IsAny<AnalyzeTextOptions>(), It.IsAny<CancellationToken>())).ReturnsAsync(Response.FromValue(analyzeTextResult, response));

Expand Down Expand Up @@ -237,7 +239,8 @@ public async Task Test_ReviewPlan_Flagged(ModerationType moderate)
});

var clientMock = new Mock<ContentSafetyClient>(new Uri(endpoint), new AzureKeyCredential(apiKey));
AnalyzeTextResult analyzeTextResult = ContentSafetyModelFactory.AnalyzeTextResult(hateResult: ContentSafetyModelFactory.TextAnalyzeSeverityResult(TextCategory.Hate, 2));
var analyses = new List<TextCategoriesAnalysis>() { ContentSafetyModelFactory.TextCategoriesAnalysis(TextCategory.Hate, 2) };
AnalyzeTextResult analyzeTextResult = ContentSafetyModelFactory.AnalyzeTextResult(null, analyses);
Response? response = null;
clientMock.Setup(client => client.AnalyzeTextAsync(It.IsAny<AnalyzeTextOptions>(), It.IsAny<CancellationToken>())).ReturnsAsync(Response.FromValue(analyzeTextResult, response));

Expand Down Expand Up @@ -298,7 +301,8 @@ public async Task Test_ReviewPlan_NotFlagged(ModerationType moderate)
});

var clientMock = new Mock<ContentSafetyClient>(new Uri(endpoint), new AzureKeyCredential(apiKey));
AnalyzeTextResult analyzeTextResult = ContentSafetyModelFactory.AnalyzeTextResult(hateResult: ContentSafetyModelFactory.TextAnalyzeSeverityResult(TextCategory.Hate, 0));
var analyses = new List<TextCategoriesAnalysis>() { ContentSafetyModelFactory.TextCategoriesAnalysis(TextCategory.Hate, 0) };
AnalyzeTextResult analyzeTextResult = ContentSafetyModelFactory.AnalyzeTextResult(null, analyses);
Response? response = null;
clientMock.Setup(client => client.AnalyzeTextAsync(It.IsAny<AnalyzeTextOptions>(), It.IsAny<CancellationToken>())).ReturnsAsync(Response.FromValue(analyzeTextResult, response));

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,11 +96,8 @@ public async Task<Plan> ReviewOutputAsync(ITurnContext turnContext, TState turnS
{
Response<AnalyzeTextResult> response = await _client.AnalyzeTextAsync(analyzeTextOptions);

bool flagged = response.Value.BlocklistsMatchResults.Count > 0
|| _ShouldBeFlagged(response.Value.HateResult)
|| _ShouldBeFlagged(response.Value.SelfHarmResult)
|| _ShouldBeFlagged(response.Value.SexualResult)
|| _ShouldBeFlagged(response.Value.ViolenceResult);
bool flagged = response.Value.BlocklistsMatch.Count > 0
|| response.Value.CategoriesAnalysis.Any((ca) => _ShouldBeFlagged(ca));
if (flagged)
{
string actionName = isModelInput ? AIConstants.FlaggedInputActionName : AIConstants.FlaggedOutputActionName;
Expand Down Expand Up @@ -138,17 +135,54 @@ public async Task<Plan> ReviewOutputAsync(ITurnContext turnContext, TState turnS
return null;
}

private bool _ShouldBeFlagged(TextAnalyzeSeverityResult result)
private bool _ShouldBeFlagged(TextCategoriesAnalysis result)
{
return result != null && result.Severity >= _options.SeverityLevel;
}

private ModerationResult BuildModerationResult(AnalyzeTextResult result)
{
bool hate = _ShouldBeFlagged(result.HateResult);
bool selfHarm = _ShouldBeFlagged(result.SelfHarmResult);
bool sexual = _ShouldBeFlagged(result.SexualResult);
bool violence = _ShouldBeFlagged(result.ViolenceResult);
bool hate = false;
int hateSeverity = 0;
bool selfHarm = false;
int selfHarmSeverity = 0;
bool sexual = false;
int sexualSeverity = 0;
bool violence = false;
int violenceSeverity = 0;

foreach (TextCategoriesAnalysis textAnalysis in result.CategoriesAnalysis)
{
if (textAnalysis.Severity < _options.SeverityLevel)
{
continue;
}

int severity = textAnalysis.Severity ?? 0;
if (textAnalysis.Category == TextCategory.Hate)
{
hate = true;
hateSeverity = severity;
}

if (textAnalysis.Category == TextCategory.Violence)
{
violence = true;
violenceSeverity = severity;
}

if (textAnalysis.Category == TextCategory.SelfHarm)
{
selfHarm = true;
selfHarmSeverity = severity;
}

if (textAnalysis.Category == TextCategory.Sexual)
{
sexual = true;
sexualSeverity = severity;
}
}

return new()
{
Expand All @@ -166,13 +200,13 @@ private ModerationResult BuildModerationResult(AnalyzeTextResult result)
CategoryScores = new()
{
// Normalize the scores to be between 0 and 1 (highest severity is 6)
Hate = (result.HateResult?.Severity ?? 0) / 6.0,
HateThreatening = (result.HateResult?.Severity ?? 0) / 6.0,
SelfHarm = (result.SelfHarmResult?.Severity ?? 0) / 6.0,
Sexual = (result.SexualResult?.Severity ?? 0) / 6.0,
SexualMinors = (result.SexualResult?.Severity ?? 0) / 6.0,
Violence = (result.ViolenceResult?.Severity ?? 0) / 6.0,
ViolenceGraphic = (result.ViolenceResult?.Severity ?? 0) / 6.0
Hate = hateSeverity / 6.0,
HateThreatening = hateSeverity / 6.0,
SelfHarm = selfHarmSeverity / 6.0,
Sexual = sexualSeverity / 6.0,
SexualMinors = sexualSeverity / 6.0,
Violence = violenceSeverity / 6.0,
ViolenceGraphic = violenceSeverity / 6.0
}
};
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,18 @@ public class AzureContentSafetyModeratorOptions
public IList<string>? BlocklistNames { get; set; }

/// <summary>
/// When set to true, further analyses of harmful content will not be performed in cases where blocklists are hit. When set to false, all analyses of harmful content will be performed, whether or not blocklists are hit.
/// When set to true, further analyses of harmful content will not be performed in cases where blocklists are hit.
/// When set to false, all analyses of harmful content will be performed, whether or not blocklists are hit.
/// </summary>
[Obsolete("use HaltOnBlockListHit")]
public bool? BreakByBlocklists { get; set; }

/// <summary>
/// When set to true, further analyses of harmful content will not be performed in cases where blocklists are hit.
/// When set to false, all analyses of harmful content will be performed, whether or not blocklists are hit.
/// </summary>
public bool? HaltOnBlockListHit { get; set; }

/// <summary>
/// Create an instance of the AzureContentSafetyModeratorOptions class.
/// </summary>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@

<ItemGroup>
<PackageReference Include="AdaptiveCards" Version="3.1.0" />
<PackageReference Include="Azure.AI.ContentSafety" Version="1.0.0-beta.1" />
<PackageReference Include="Azure.AI.ContentSafety" Version="1.0.0" />
<PackageReference Include="Azure.AI.OpenAI" Version="2.1.0-beta.1" />
<PackageReference Include="JsonSchema.Net" Version="5.5.1" />
<PackageReference Include="Microsoft.Bcl.AsyncInterfaces" Version="8.0.0" />
Expand Down
Loading

0 comments on commit b006d89

Please sign in to comment.