From aidotnet-moyucode
Automates headless Chrome with Puppeteer for PDF generation from HTML, screenshots, web scraping, and form submission. Useful for browser testing and data extraction.
npx claudepluginhub joshuarweaver/cascade-data-analytics --plugin aidotnet-moyucodeThis skill uses the workspace's default tool permissions.
Headless Chrome/Chromium automation for PDF generation, screenshots, web scraping, and testing.
Conducts multi-round deep research on GitHub repos via API and web searches, generating markdown reports with executive summaries, timelines, metrics, and Mermaid diagrams.
Dynamically discovers and combines enabled skills into cohesive, unexpected delightful experiences like interactive HTML or themed artifacts. Activates on 'surprise me', inspiration, or boredom cues.
Generates images from structured JSON prompts via Python script execution. Supports reference images and aspect ratios for characters, scenes, products, visuals.
Headless Chrome/Chromium automation for PDF generation, screenshots, web scraping, and testing.
npm install puppeteer
import puppeteer from 'puppeteer';
async function generatePDF(html: string, outputPath: string) {
const browser = await puppeteer.launch();
const page = await browser.newPage();
await page.setContent(html, { waitUntil: 'networkidle0' });
await page.pdf({
path: outputPath,
format: 'A4',
margin: { top: '20mm', right: '20mm', bottom: '20mm', left: '20mm' },
printBackground: true,
});
await browser.close();
}
// Usage
const html = `
<html>
<head><style>body { font-family: Arial; }</style></head>
<body><h1>Invoice #001</h1><p>Total: $100.00</p></body>
</html>
`;
await generatePDF(html, 'invoice.pdf');
async function takeScreenshot(url: string, outputPath: string) {
const browser = await puppeteer.launch();
const page = await browser.newPage();
await page.setViewport({ width: 1920, height: 1080 });
await page.goto(url, { waitUntil: 'networkidle2' });
await page.screenshot({
path: outputPath,
fullPage: true,
type: 'png',
});
await browser.close();
}
async function scrapeData(url: string) {
const browser = await puppeteer.launch();
const page = await browser.newPage();
await page.goto(url, { waitUntil: 'domcontentloaded' });
const data = await page.evaluate(() => {
const items = document.querySelectorAll('.product');
return Array.from(items).map(item => ({
title: item.querySelector('h2')?.textContent?.trim(),
price: item.querySelector('.price')?.textContent?.trim(),
}));
});
await browser.close();
return data;
}
async function submitForm(url: string, formData: Record<string, string>) {
const browser = await puppeteer.launch({ headless: false });
const page = await browser.newPage();
await page.goto(url);
// Fill form fields
for (const [selector, value] of Object.entries(formData)) {
await page.type(selector, value);
}
// Submit
await page.click('button[type="submit"]');
await page.waitForNavigation();
await browser.close();
}
interface PDFOptions {
path?: string;
scale?: number; // 0.1 - 2, default 1
displayHeaderFooter?: boolean;
headerTemplate?: string;
footerTemplate?: string;
printBackground?: boolean;
landscape?: boolean;
pageRanges?: string; // '1-5, 8, 11-13'
format?: 'Letter' | 'Legal' | 'A4' | 'A3';
width?: string;
height?: string;
margin?: { top, right, bottom, left };
}
browser, pdf, screenshot, automation, scraping