1
Create moderation
One script reply has been approved by the moderators Verified

Classifies if text is potentially harmful.

Created by adam186 490 days ago Viewed 3991 times
0
Submitted by hugo697 Typescript (fetch-only)
Verified 137 days ago
1
type Openai = {
2
  api_key: string;
3
  organization_id: string;
4
};
5
/**
6
 * Create moderation
7
 * Classifies if text is potentially harmful.
8
 */
9
export async function main(
10
  auth: Openai,
11
  body: {
12
    input: string | string[];
13
    model?: string | ("text-moderation-latest" | "text-moderation-stable");
14
    [k: string]: unknown;
15
  }
16
) {
17
  const url = new URL(`https://api.openai.com/v1/moderations`);
18

19
  const response = await fetch(url, {
20
    method: "POST",
21
    headers: {
22
      "OpenAI-Organization": auth.organization_id,
23
      "Content-Type": "application/json",
24
      Authorization: "Bearer " + auth.api_key,
25
    },
26
    body: JSON.stringify(body),
27
  });
28
  if (!response.ok) {
29
    const text = await response.text();
30
    throw new Error(`${response.status} ${text}`);
31
  }
32
  return await response.json();
33
}
34

Other submissions