//native
type Ai21 = {
apiKey: string;
};
/**
* Studio Chat Complete
* This is the endpoint for the [Jamba Instruct model](https://docs.ai21.com/docs/jamba-models).
This is a foundation model that supports both single-turn (question answering,
text completion) and multi-turn (chat style) interactions.
You can optionally stream results if you want to get the response as each
token is generated, rather than waiting for the entire response.
*/
export async function main(
auth: Ai21,
body: {
model:
| "jamba-instruct"
| "jamba-instruct-preview"
| "jamba-1.5-mini"
| "jamba-1.5-large";
messages:
| { role?: "user"; content: string }
| {
role?: "assistant";
content?: string;
tool_calls?: {
id: string;
type?: "function";
function: { name: string; arguments: string };
}[];
}
| { role?: "tool"; content: string; tool_call_id: string }
| { role?: "system"; content: string }[];
tools?: {
type: "function";
function: {
name: string;
description?: string;
parameters?: { type?: "object"; properties: {}; required?: string[] };
};
}[];
n?: number;
max_tokens?: number;
temperature?: number;
top_p?: number;
stop?: string | string[];
stream?: false | true;
mock_response?: {
response_delay_seconds?: number;
stream_response_delay_between_deltas_seconds?: number;
};
documents?: { id?: string; content: string; metadata?: {} }[];
response_format?: { type: "text" | "json_object" };
},
) {
const url = new URL(`https://api.ai21.com/studio/v1/chat/completions`);
const response = await fetch(url, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: "Bearer " + auth.apiKey,
},
body: JSON.stringify(body),
});
if (!response.ok) {
const text = await response.text();
throw new Error(`${response.status} ${text}`);
}
return await response.json();
}
Submitted by hugo697 32 days ago