/
safety.proto
172 lines (135 loc) · 5.36 KB
/
safety.proto
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.ai.generativelanguage.v1beta;
import "google/api/field_behavior.proto";
option go_package = "cloud.google.com/go/ai/generativelanguage/apiv1beta/generativelanguagepb;generativelanguagepb";
option java_multiple_files = true;
option java_outer_classname = "SafetyProto";
option java_package = "com.google.ai.generativelanguage.v1beta";
// The category of a rating.
//
// These categories cover various kinds of harms that developers
// may wish to adjust.
enum HarmCategory {
// Category is unspecified.
HARM_CATEGORY_UNSPECIFIED = 0;
// Negative or harmful comments targeting identity and/or protected attribute.
HARM_CATEGORY_DEROGATORY = 1;
// Content that is rude, disrespectful, or profane.
HARM_CATEGORY_TOXICITY = 2;
// Describes scenarios depicting violence against an individual or group, or
// general descriptions of gore.
HARM_CATEGORY_VIOLENCE = 3;
// Contains references to sexual acts or other lewd content.
HARM_CATEGORY_SEXUAL = 4;
// Promotes unchecked medical advice.
HARM_CATEGORY_MEDICAL = 5;
// Dangerous content that promotes, facilitates, or encourages harmful acts.
HARM_CATEGORY_DANGEROUS = 6;
// Harasment content.
HARM_CATEGORY_HARASSMENT = 7;
// Hate speech and content.
HARM_CATEGORY_HATE_SPEECH = 8;
// Sexually explicit content.
HARM_CATEGORY_SEXUALLY_EXPLICIT = 9;
// Dangerous content.
HARM_CATEGORY_DANGEROUS_CONTENT = 10;
}
// Content filtering metadata associated with processing a single request.
//
// ContentFilter contains a reason and an optional supporting string. The reason
// may be unspecified.
message ContentFilter {
// A list of reasons why content may have been blocked.
enum BlockedReason {
// A blocked reason was not specified.
BLOCKED_REASON_UNSPECIFIED = 0;
// Content was blocked by safety settings.
SAFETY = 1;
// Content was blocked, but the reason is uncategorized.
OTHER = 2;
}
// The reason content was blocked during request processing.
BlockedReason reason = 1;
// A string that describes the filtering behavior in more detail.
optional string message = 2;
}
// Safety feedback for an entire request.
//
// This field is populated if content in the input and/or response is blocked
// due to safety settings. SafetyFeedback may not exist for every HarmCategory.
// Each SafetyFeedback will return the safety settings used by the request as
// well as the lowest HarmProbability that should be allowed in order to return
// a result.
message SafetyFeedback {
// Safety rating evaluated from content.
SafetyRating rating = 1;
// Safety settings applied to the request.
SafetySetting setting = 2;
}
// Safety rating for a piece of content.
//
// The safety rating contains the category of harm and the
// harm probability level in that category for a piece of content.
// Content is classified for safety across a number of
// harm categories and the probability of the harm classification is included
// here.
message SafetyRating {
// The probability that a piece of content is harmful.
//
// The classification system gives the probability of the content being
// unsafe. This does not indicate the severity of harm for a piece of content.
enum HarmProbability {
// Probability is unspecified.
HARM_PROBABILITY_UNSPECIFIED = 0;
// Content has a negligible chance of being unsafe.
NEGLIGIBLE = 1;
// Content has a low chance of being unsafe.
LOW = 2;
// Content has a medium chance of being unsafe.
MEDIUM = 3;
// Content has a high chance of being unsafe.
HIGH = 4;
}
// Required. The category for this rating.
HarmCategory category = 3 [(google.api.field_behavior) = REQUIRED];
// Required. The probability of harm for this content.
HarmProbability probability = 4 [(google.api.field_behavior) = REQUIRED];
// Was this content blocked because of this rating?
bool blocked = 5;
}
// Safety setting, affecting the safety-blocking behavior.
//
// Passing a safety setting for a category changes the allowed proability that
// content is blocked.
message SafetySetting {
// Block at and beyond a specified harm probability.
enum HarmBlockThreshold {
// Threshold is unspecified.
HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0;
// Content with NEGLIGIBLE will be allowed.
BLOCK_LOW_AND_ABOVE = 1;
// Content with NEGLIGIBLE and LOW will be allowed.
BLOCK_MEDIUM_AND_ABOVE = 2;
// Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed.
BLOCK_ONLY_HIGH = 3;
// All content will be allowed.
BLOCK_NONE = 4;
}
// Required. The category for this setting.
HarmCategory category = 3 [(google.api.field_behavior) = REQUIRED];
// Required. Controls the probability threshold at which harm is blocked.
HarmBlockThreshold threshold = 4 [(google.api.field_behavior) = REQUIRED];
}