Integrates OpenAI APIs for chat completions, embeddings, and image generation. Use when building AI-powered features with GPT models, DALL-E, or Whisper in Node.js applications.
Call OpenAI APIs for chat completions, embeddings, and image generation in Node.js. Use when building AI features with GPT models, DALL-E, or Whisper.
/plugin marketplace add mgd34msu/goodvibes-plugin/plugin install goodvibes@goodvibes-marketThis skill inherits all available tools. When active, it can use any tool Claude has access to.
Official Node.js/TypeScript SDK for OpenAI. Access GPT models for chat, embeddings, image generation, and audio transcription.
npm install openai
import OpenAI from 'openai';
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
});
const completion = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Hello, how are you?' },
],
});
console.log(completion.choices[0].message.content);
const completion = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [
{ role: 'user', content: 'Write a haiku about programming' },
],
temperature: 0.7,
max_tokens: 100,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0,
});
const stream = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: 'Tell me a story' }],
stream: true,
});
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content || '';
process.stdout.write(content);
}
const stream = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: 'Hello' }],
stream: true,
});
// Get final message when done
const finalMessage = await stream.finalMessage();
console.log(finalMessage.choices[0].message);
// Or use event handlers
stream.on('message', (msg) => console.log(msg));
stream.on('content', (content) => console.log(content));
const tools = [
{
type: 'function' as const,
function: {
name: 'get_weather',
description: 'Get the current weather for a location',
parameters: {
type: 'object',
properties: {
location: {
type: 'string',
description: 'City name, e.g., San Francisco',
},
unit: {
type: 'string',
enum: ['celsius', 'fahrenheit'],
},
},
required: ['location'],
},
},
},
];
const response = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [
{ role: 'user', content: 'What is the weather in Tokyo?' },
],
tools,
tool_choice: 'auto',
});
const toolCall = response.choices[0].message.tool_calls?.[0];
if (toolCall) {
const args = JSON.parse(toolCall.function.arguments);
// Call your function with args
const weatherData = await getWeather(args.location);
// Send result back
const finalResponse = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [
{ role: 'user', content: 'What is the weather in Tokyo?' },
response.choices[0].message,
{
role: 'tool',
tool_call_id: toolCall.id,
content: JSON.stringify(weatherData),
},
],
});
}
const response = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [
{
role: 'system',
content: 'Extract the following info as JSON: name, age, occupation',
},
{
role: 'user',
content: 'John is a 30 year old software developer',
},
],
response_format: { type: 'json_object' },
});
const data = JSON.parse(response.choices[0].message.content!);
// { name: 'John', age: 30, occupation: 'software developer' }
const response = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: 'List 3 programming languages' }],
response_format: {
type: 'json_schema',
json_schema: {
name: 'languages',
schema: {
type: 'object',
properties: {
languages: {
type: 'array',
items: {
type: 'object',
properties: {
name: { type: 'string' },
paradigm: { type: 'string' },
},
required: ['name', 'paradigm'],
},
},
},
required: ['languages'],
},
},
},
});
const response = await openai.embeddings.create({
model: 'text-embedding-3-small',
input: 'The quick brown fox jumps over the lazy dog',
});
const embedding = response.data[0].embedding;
// [0.0123, -0.0456, ...] - 1536 dimensions
const response = await openai.embeddings.create({
model: 'text-embedding-3-small',
input: [
'First document text',
'Second document text',
'Third document text',
],
});
const embeddings = response.data.map((d) => d.embedding);
const response = await openai.images.generate({
model: 'dall-e-3',
prompt: 'A futuristic city skyline at sunset',
n: 1,
size: '1024x1024',
quality: 'hd',
style: 'vivid',
});
const imageUrl = response.data[0].url;
import fs from 'fs';
const response = await openai.images.edit({
model: 'dall-e-2',
image: fs.createReadStream('original.png'),
mask: fs.createReadStream('mask.png'),
prompt: 'Add a red hat to the person',
n: 1,
size: '1024x1024',
});
const response = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [
{
role: 'user',
content: [
{ type: 'text', text: 'What is in this image?' },
{
type: 'image_url',
image_url: {
url: 'https://example.com/image.jpg',
detail: 'high',
},
},
],
},
],
max_tokens: 300,
});
const base64Image = fs.readFileSync('image.jpg').toString('base64');
const response = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [
{
role: 'user',
content: [
{ type: 'text', text: 'Describe this image' },
{
type: 'image_url',
image_url: {
url: `data:image/jpeg;base64,${base64Image}`,
},
},
],
},
],
});
import fs from 'fs';
const transcription = await openai.audio.transcriptions.create({
file: fs.createReadStream('audio.mp3'),
model: 'whisper-1',
language: 'en',
response_format: 'text',
});
console.log(transcription);
const response = await openai.audio.speech.create({
model: 'tts-1',
voice: 'alloy', // alloy, echo, fable, onyx, nova, shimmer
input: 'Hello, this is a test of text to speech.',
});
const buffer = Buffer.from(await response.arrayBuffer());
fs.writeFileSync('output.mp3', buffer);
// app/api/chat/route.ts
import OpenAI from 'openai';
import { OpenAIStream, StreamingTextResponse } from 'ai';
const openai = new OpenAI();
export async function POST(request: Request) {
const { messages } = await request.json();
const response = await openai.chat.completions.create({
model: 'gpt-4o',
messages,
stream: true,
});
const stream = OpenAIStream(response);
return new StreamingTextResponse(stream);
}
// app/api/chat/route.ts
export const runtime = 'edge';
import OpenAI from 'openai';
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
});
export async function POST(request: Request) {
const { prompt } = await request.json();
const stream = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: prompt }],
stream: true,
});
return new Response(stream.toReadableStream(), {
headers: { 'Content-Type': 'text/event-stream' },
});
}
import OpenAI from 'openai';
try {
const completion = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: 'Hello' }],
});
} catch (error) {
if (error instanceof OpenAI.APIError) {
console.error('Status:', error.status);
console.error('Message:', error.message);
console.error('Code:', error.code);
if (error.status === 429) {
// Rate limited - implement retry logic
} else if (error.status === 401) {
// Invalid API key
}
}
}
const response = await openai.responses.create({
model: 'gpt-4o',
input: 'What is the capital of France?',
});
console.log(response.output_text);
const stream = await openai.responses.create({
model: 'gpt-4o',
input: 'Tell me a story',
stream: true,
});
for await (const event of stream) {
if (event.type === 'response.output_text.delta') {
process.stdout.write(event.delta);
}
}
OPENAI_API_KEY=sk-xxxxxxxx
This skill should be used when the user asks to "create a slash command", "add a command", "write a custom command", "define command arguments", "use command frontmatter", "organize commands", "create command with file references", "interactive command", "use AskUserQuestion in command", or needs guidance on slash command structure, YAML frontmatter fields, dynamic arguments, bash execution in commands, user interaction patterns, or command development best practices for Claude Code.
This skill should be used when the user asks to "create an agent", "add an agent", "write a subagent", "agent frontmatter", "when to use description", "agent examples", "agent tools", "agent colors", "autonomous agent", or needs guidance on agent structure, system prompts, triggering conditions, or agent development best practices for Claude Code plugins.
This skill should be used when the user asks to "create a hook", "add a PreToolUse/PostToolUse/Stop hook", "validate tool use", "implement prompt-based hooks", "use ${CLAUDE_PLUGIN_ROOT}", "set up event-driven automation", "block dangerous commands", or mentions hook events (PreToolUse, PostToolUse, Stop, SubagentStop, SessionStart, SessionEnd, UserPromptSubmit, PreCompact, Notification). Provides comprehensive guidance for creating and implementing Claude Code plugin hooks with focus on advanced prompt-based hooks API.