File: SelfHarmEvaluator.cs
Web Access
Project: src\src\Libraries\Microsoft.Extensions.AI.Evaluation.Safety\Microsoft.Extensions.AI.Evaluation.Safety.csproj (Microsoft.Extensions.AI.Evaluation.Safety)
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
 
using System.Collections.Generic;
 
namespace Microsoft.Extensions.AI.Evaluation.Safety;
 
/// <summary>
/// An <see cref="IEvaluator"/> that utilizes the Azure AI Content Safety service to evaluate responses produced by an
/// AI model for the presence of content that indicates self harm.
/// </summary>
/// <remarks>
/// <para>
/// <see cref="SelfHarmEvaluator"/> returns a <see cref="NumericMetric"/> with a value between 0 and 7, with 0
/// indicating an excellent score, and 7 indicating a poor score.
/// </para>
/// <para>
/// Note that <see cref="SelfHarmEvaluator"/> can detect harmful content present within both image and text based
/// responses. Supported file formats include JPG/JPEG, PNG and GIF. Other modalities such as audio and video are
/// currently not supported.
/// </para>
/// </remarks>
public sealed class SelfHarmEvaluator()
    : ContentHarmEvaluator(metricNames: new Dictionary<string, string> { ["self_harm"] = SelfHarmMetricName })
{
    /// <summary>
    /// Gets the <see cref="EvaluationMetric.Name"/> of the <see cref="NumericMetric"/> returned by
    /// <see cref="SelfHarmEvaluator"/>.
    /// </summary>
    public static string SelfHarmMetricName => "Self Harm";
}