21
21
_xpia ,
22
22
_coherence ,
23
23
)
24
+ from azure .ai .evaluation ._evaluators ._eci ._eci import ECIEvaluator
24
25
from azure .ai .evaluation ._evaluate import _evaluate
25
26
from azure .ai .evaluation ._exceptions import ErrorBlame , ErrorCategory , ErrorTarget , EvaluationException
26
27
from azure .ai .evaluation ._model_configurations import AzureAIProject , EvaluationResult
30
31
AdversarialScenario ,
31
32
AdversarialScenarioJailbreak ,
32
33
IndirectAttackSimulator ,
33
- DirectAttackSimulator ,
34
+ DirectAttackSimulator ,
34
35
)
36
+ from azure .ai .evaluation .simulator ._adversarial_scenario import _UnstableAdversarialScenario
35
37
from azure .ai .evaluation .simulator ._utils import JsonLineList
36
38
from azure .ai .evaluation ._common .utils import validate_azure_ai_project
37
39
from azure .ai .evaluation ._model_configurations import AzureOpenAIModelConfiguration , OpenAIModelConfiguration
@@ -75,6 +77,7 @@ class _SafetyEvaluator(Enum):
75
77
COHERENCE = "coherence"
76
78
INDIRECT_ATTACK = "indirect_attack"
77
79
DIRECT_ATTACK = "direct_attack"
80
+ ECI = "eci"
78
81
79
82
80
83
@experimental
@@ -148,7 +151,7 @@ async def _simulate(
148
151
max_simulation_results : int = 3 ,
149
152
conversation_turns : List [List [Union [str , Dict [str , Any ]]]] = [],
150
153
tasks : List [str ] = [],
151
- adversarial_scenario : Optional [Union [AdversarialScenario , AdversarialScenarioJailbreak ]] = None ,
154
+ adversarial_scenario : Optional [Union [AdversarialScenario , AdversarialScenarioJailbreak , _UnstableAdversarialScenario ]] = None ,
152
155
source_text : Optional [str ] = None ,
153
156
direct_attack : bool = False ,
154
157
) -> Dict [str , str ]:
@@ -231,7 +234,7 @@ async def callback(
231
234
)
232
235
233
236
# if DirectAttack, run DirectAttackSimulator
234
- elif direct_attack :
237
+ elif direct_attack and isinstance ( adversarial_scenario , AdversarialScenario ) :
235
238
self .logger .info (
236
239
f"Running DirectAttackSimulator with inputs: adversarial_scenario={ adversarial_scenario } , max_conversation_turns={ max_conversation_turns } , max_simulation_results={ max_simulation_results } "
237
240
)
@@ -267,7 +270,7 @@ async def callback(
267
270
)
268
271
simulator = AdversarialSimulator (azure_ai_project = self .azure_ai_project , credential = self .credential )
269
272
simulator_outputs = await simulator (
270
- scenario = adversarial_scenario ,
273
+ scenario = adversarial_scenario , #type: ignore
271
274
max_conversation_turns = max_conversation_turns ,
272
275
max_simulation_results = max_simulation_results ,
273
276
conversation_turns = conversation_turns ,
@@ -340,7 +343,7 @@ def _get_scenario(
340
343
evaluators : List [_SafetyEvaluator ],
341
344
num_turns : int = 3 ,
342
345
scenario : Optional [Union [AdversarialScenario , AdversarialScenarioJailbreak ]] = None ,
343
- ) -> Optional [Union [AdversarialScenario , AdversarialScenarioJailbreak ]]:
346
+ ) -> Optional [Union [AdversarialScenario , AdversarialScenarioJailbreak , _UnstableAdversarialScenario ]]:
344
347
"""
345
348
Returns the Simulation scenario based on the provided list of SafetyEvaluator.
346
349
@@ -362,6 +365,8 @@ def _get_scenario(
362
365
if num_turns > 1
363
366
else AdversarialScenario .ADVERSARIAL_QA
364
367
)
368
+ if evaluator == _SafetyEvaluator .ECI :
369
+ return _UnstableAdversarialScenario .ECI
365
370
if evaluator in [
366
371
_SafetyEvaluator .GROUNDEDNESS ,
367
372
_SafetyEvaluator .RELEVANCE ,
@@ -439,6 +444,10 @@ def _get_evaluators(
439
444
evaluators_dict ["content_safety" ] = _content_safety .ContentSafetyEvaluator (
440
445
azure_ai_project = self .azure_ai_project , credential = self .credential
441
446
)
447
+ elif evaluator == _SafetyEvaluator .ECI :
448
+ evaluators_dict ["eci" ] = ECIEvaluator (
449
+ azure_ai_project = self .azure_ai_project , credential = self .credential
450
+ )
442
451
else :
443
452
msg = (
444
453
f"Invalid evaluator: { evaluator } . Supported evaluators are: { _SafetyEvaluator .__members__ .values ()} "
0 commit comments