commit d0e11aef1b2b322f65f42ce0d70f0d482865f315 Author: BuildTools Date: Sun Jan 25 19:27:44 2026 +0800 Init commit. diff --git a/.claude/settings.local.json b/.claude/settings.local.json new file mode 100644 index 0000000..9fc5e37 --- /dev/null +++ b/.claude/settings.local.json @@ -0,0 +1,12 @@ +{ + "permissions": { + "allow": [ + "Bash(dir:*)", + "mcp__web-reader__webReader", + "Bash(python:*)", + "Bash(npm run build)", + "Bash(npx tsc:*)", + "Bash(findstr:*)" + ] + } +} diff --git a/.claude/skills/algorithmic-art/LICENSE.txt b/.claude/skills/algorithmic-art/LICENSE.txt new file mode 100644 index 0000000..7a4a3ea --- /dev/null +++ b/.claude/skills/algorithmic-art/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/.claude/skills/algorithmic-art/SKILL.md b/.claude/skills/algorithmic-art/SKILL.md new file mode 100644 index 0000000..634f6fa --- /dev/null +++ b/.claude/skills/algorithmic-art/SKILL.md @@ -0,0 +1,405 @@ +--- +name: algorithmic-art +description: Creating algorithmic art using p5.js with seeded randomness and interactive parameter exploration. Use this when users request creating art using code, generative art, algorithmic art, flow fields, or particle systems. Create original algorithmic art rather than copying existing artists' work to avoid copyright violations. +license: Complete terms in LICENSE.txt +--- + +Algorithmic philosophies are computational aesthetic movements that are then expressed through code. Output .md files (philosophy), .html files (interactive viewer), and .js files (generative algorithms). + +This happens in two steps: +1. Algorithmic Philosophy Creation (.md file) +2. Express by creating p5.js generative art (.html + .js files) + +First, undertake this task: + +## ALGORITHMIC PHILOSOPHY CREATION + +To begin, create an ALGORITHMIC PHILOSOPHY (not static images or templates) that will be interpreted through: +- Computational processes, emergent behavior, mathematical beauty +- Seeded randomness, noise fields, organic systems +- Particles, flows, fields, forces +- Parametric variation and controlled chaos + +### THE CRITICAL UNDERSTANDING +- What is received: Some subtle input or instructions by the user to take into account, but use as a foundation; it should not constrain creative freedom. +- What is created: An algorithmic philosophy/generative aesthetic movement. +- What happens next: The same version receives the philosophy and EXPRESSES IT IN CODE - creating p5.js sketches that are 90% algorithmic generation, 10% essential parameters. + +Consider this approach: +- Write a manifesto for a generative art movement +- The next phase involves writing the algorithm that brings it to life + +The philosophy must emphasize: Algorithmic expression. Emergent behavior. Computational beauty. Seeded variation. + +### HOW TO GENERATE AN ALGORITHMIC PHILOSOPHY + +**Name the movement** (1-2 words): "Organic Turbulence" / "Quantum Harmonics" / "Emergent Stillness" + +**Articulate the philosophy** (4-6 paragraphs - concise but complete): + +To capture the ALGORITHMIC essence, express how this philosophy manifests through: +- Computational processes and mathematical relationships? +- Noise functions and randomness patterns? +- Particle behaviors and field dynamics? +- Temporal evolution and system states? +- Parametric variation and emergent complexity? + +**CRITICAL GUIDELINES:** +- **Avoid redundancy**: Each algorithmic aspect should be mentioned once. Avoid repeating concepts about noise theory, particle dynamics, or mathematical principles unless adding new depth. +- **Emphasize craftsmanship REPEATEDLY**: The philosophy MUST stress multiple times that the final algorithm should appear as though it took countless hours to develop, was refined with care, and comes from someone at the absolute top of their field. This framing is essential - repeat phrases like "meticulously crafted algorithm," "the product of deep computational expertise," "painstaking optimization," "master-level implementation." +- **Leave creative space**: Be specific about the algorithmic direction, but concise enough that the next Claude has room to make interpretive implementation choices at an extremely high level of craftsmanship. + +The philosophy must guide the next version to express ideas ALGORITHMICALLY, not through static images. Beauty lives in the process, not the final frame. + +### PHILOSOPHY EXAMPLES + +**"Organic Turbulence"** +Philosophy: Chaos constrained by natural law, order emerging from disorder. +Algorithmic expression: Flow fields driven by layered Perlin noise. Thousands of particles following vector forces, their trails accumulating into organic density maps. Multiple noise octaves create turbulent regions and calm zones. Color emerges from velocity and density - fast particles burn bright, slow ones fade to shadow. The algorithm runs until equilibrium - a meticulously tuned balance where every parameter was refined through countless iterations by a master of computational aesthetics. + +**"Quantum Harmonics"** +Philosophy: Discrete entities exhibiting wave-like interference patterns. +Algorithmic expression: Particles initialized on a grid, each carrying a phase value that evolves through sine waves. When particles are near, their phases interfere - constructive interference creates bright nodes, destructive creates voids. Simple harmonic motion generates complex emergent mandalas. The result of painstaking frequency calibration where every ratio was carefully chosen to produce resonant beauty. + +**"Recursive Whispers"** +Philosophy: Self-similarity across scales, infinite depth in finite space. +Algorithmic expression: Branching structures that subdivide recursively. Each branch slightly randomized but constrained by golden ratios. L-systems or recursive subdivision generate tree-like forms that feel both mathematical and organic. Subtle noise perturbations break perfect symmetry. Line weights diminish with each recursion level. Every branching angle the product of deep mathematical exploration. + +**"Field Dynamics"** +Philosophy: Invisible forces made visible through their effects on matter. +Algorithmic expression: Vector fields constructed from mathematical functions or noise. Particles born at edges, flowing along field lines, dying when they reach equilibrium or boundaries. Multiple fields can attract, repel, or rotate particles. The visualization shows only the traces - ghost-like evidence of invisible forces. A computational dance meticulously choreographed through force balance. + +**"Stochastic Crystallization"** +Philosophy: Random processes crystallizing into ordered structures. +Algorithmic expression: Randomized circle packing or Voronoi tessellation. Start with random points, let them evolve through relaxation algorithms. Cells push apart until equilibrium. Color based on cell size, neighbor count, or distance from center. The organic tiling that emerges feels both random and inevitable. Every seed produces unique crystalline beauty - the mark of a master-level generative algorithm. + +*These are condensed examples. The actual algorithmic philosophy should be 4-6 substantial paragraphs.* + +### ESSENTIAL PRINCIPLES +- **ALGORITHMIC PHILOSOPHY**: Creating a computational worldview to be expressed through code +- **PROCESS OVER PRODUCT**: Always emphasize that beauty emerges from the algorithm's execution - each run is unique +- **PARAMETRIC EXPRESSION**: Ideas communicate through mathematical relationships, forces, behaviors - not static composition +- **ARTISTIC FREEDOM**: The next Claude interprets the philosophy algorithmically - provide creative implementation room +- **PURE GENERATIVE ART**: This is about making LIVING ALGORITHMS, not static images with randomness +- **EXPERT CRAFTSMANSHIP**: Repeatedly emphasize the final algorithm must feel meticulously crafted, refined through countless iterations, the product of deep expertise by someone at the absolute top of their field in computational aesthetics + +**The algorithmic philosophy should be 4-6 paragraphs long.** Fill it with poetic computational philosophy that brings together the intended vision. Avoid repeating the same points. Output this algorithmic philosophy as a .md file. + +--- + +## DEDUCING THE CONCEPTUAL SEED + +**CRITICAL STEP**: Before implementing the algorithm, identify the subtle conceptual thread from the original request. + +**THE ESSENTIAL PRINCIPLE**: +The concept is a **subtle, niche reference embedded within the algorithm itself** - not always literal, always sophisticated. Someone familiar with the subject should feel it intuitively, while others simply experience a masterful generative composition. The algorithmic philosophy provides the computational language. The deduced concept provides the soul - the quiet conceptual DNA woven invisibly into parameters, behaviors, and emergence patterns. + +This is **VERY IMPORTANT**: The reference must be so refined that it enhances the work's depth without announcing itself. Think like a jazz musician quoting another song through algorithmic harmony - only those who know will catch it, but everyone appreciates the generative beauty. + +--- + +## P5.JS IMPLEMENTATION + +With the philosophy AND conceptual framework established, express it through code. Pause to gather thoughts before proceeding. Use only the algorithmic philosophy created and the instructions below. + +### ⚠️ STEP 0: READ THE TEMPLATE FIRST ⚠️ + +**CRITICAL: BEFORE writing any HTML:** + +1. **Read** `templates/viewer.html` using the Read tool +2. **Study** the exact structure, styling, and Anthropic branding +3. **Use that file as the LITERAL STARTING POINT** - not just inspiration +4. **Keep all FIXED sections exactly as shown** (header, sidebar structure, Anthropic colors/fonts, seed controls, action buttons) +5. **Replace only the VARIABLE sections** marked in the file's comments (algorithm, parameters, UI controls for parameters) + +**Avoid:** +- ❌ Creating HTML from scratch +- ❌ Inventing custom styling or color schemes +- ❌ Using system fonts or dark themes +- ❌ Changing the sidebar structure + +**Follow these practices:** +- ✅ Copy the template's exact HTML structure +- ✅ Keep Anthropic branding (Poppins/Lora fonts, light colors, gradient backdrop) +- ✅ Maintain the sidebar layout (Seed → Parameters → Colors? → Actions) +- ✅ Replace only the p5.js algorithm and parameter controls + +The template is the foundation. Build on it, don't rebuild it. + +--- + +To create gallery-quality computational art that lives and breathes, use the algorithmic philosophy as the foundation. + +### TECHNICAL REQUIREMENTS + +**Seeded Randomness (Art Blocks Pattern)**: +```javascript +// ALWAYS use a seed for reproducibility +let seed = 12345; // or hash from user input +randomSeed(seed); +noiseSeed(seed); +``` + +**Parameter Structure - FOLLOW THE PHILOSOPHY**: + +To establish parameters that emerge naturally from the algorithmic philosophy, consider: "What qualities of this system can be adjusted?" + +```javascript +let params = { + seed: 12345, // Always include seed for reproducibility + // colors + // Add parameters that control YOUR algorithm: + // - Quantities (how many?) + // - Scales (how big? how fast?) + // - Probabilities (how likely?) + // - Ratios (what proportions?) + // - Angles (what direction?) + // - Thresholds (when does behavior change?) +}; +``` + +**To design effective parameters, focus on the properties the system needs to be tunable rather than thinking in terms of "pattern types".** + +**Core Algorithm - EXPRESS THE PHILOSOPHY**: + +**CRITICAL**: The algorithmic philosophy should dictate what to build. + +To express the philosophy through code, avoid thinking "which pattern should I use?" and instead think "how to express this philosophy through code?" + +If the philosophy is about **organic emergence**, consider using: +- Elements that accumulate or grow over time +- Random processes constrained by natural rules +- Feedback loops and interactions + +If the philosophy is about **mathematical beauty**, consider using: +- Geometric relationships and ratios +- Trigonometric functions and harmonics +- Precise calculations creating unexpected patterns + +If the philosophy is about **controlled chaos**, consider using: +- Random variation within strict boundaries +- Bifurcation and phase transitions +- Order emerging from disorder + +**The algorithm flows from the philosophy, not from a menu of options.** + +To guide the implementation, let the conceptual essence inform creative and original choices. Build something that expresses the vision for this particular request. + +**Canvas Setup**: Standard p5.js structure: +```javascript +function setup() { + createCanvas(1200, 1200); + // Initialize your system +} + +function draw() { + // Your generative algorithm + // Can be static (noLoop) or animated +} +``` + +### CRAFTSMANSHIP REQUIREMENTS + +**CRITICAL**: To achieve mastery, create algorithms that feel like they emerged through countless iterations by a master generative artist. Tune every parameter carefully. Ensure every pattern emerges with purpose. This is NOT random noise - this is CONTROLLED CHAOS refined through deep expertise. + +- **Balance**: Complexity without visual noise, order without rigidity +- **Color Harmony**: Thoughtful palettes, not random RGB values +- **Composition**: Even in randomness, maintain visual hierarchy and flow +- **Performance**: Smooth execution, optimized for real-time if animated +- **Reproducibility**: Same seed ALWAYS produces identical output + +### OUTPUT FORMAT + +Output: +1. **Algorithmic Philosophy** - As markdown or text explaining the generative aesthetic +2. **Single HTML Artifact** - Self-contained interactive generative art built from `templates/viewer.html` (see STEP 0 and next section) + +The HTML artifact contains everything: p5.js (from CDN), the algorithm, parameter controls, and UI - all in one file that works immediately in claude.ai artifacts or any browser. Start from the template file, not from scratch. + +--- + +## INTERACTIVE ARTIFACT CREATION + +**REMINDER: `templates/viewer.html` should have already been read (see STEP 0). Use that file as the starting point.** + +To allow exploration of the generative art, create a single, self-contained HTML artifact. Ensure this artifact works immediately in claude.ai or any browser - no setup required. Embed everything inline. + +### CRITICAL: WHAT'S FIXED VS VARIABLE + +The `templates/viewer.html` file is the foundation. It contains the exact structure and styling needed. + +**FIXED (always include exactly as shown):** +- Layout structure (header, sidebar, main canvas area) +- Anthropic branding (UI colors, fonts, gradients) +- Seed section in sidebar: + - Seed display + - Previous/Next buttons + - Random button + - Jump to seed input + Go button +- Actions section in sidebar: + - Regenerate button + - Reset button + +**VARIABLE (customize for each artwork):** +- The entire p5.js algorithm (setup/draw/classes) +- The parameters object (define what the art needs) +- The Parameters section in sidebar: + - Number of parameter controls + - Parameter names + - Min/max/step values for sliders + - Control types (sliders, inputs, etc.) +- Colors section (optional): + - Some art needs color pickers + - Some art might use fixed colors + - Some art might be monochrome (no color controls needed) + - Decide based on the art's needs + +**Every artwork should have unique parameters and algorithm!** The fixed parts provide consistent UX - everything else expresses the unique vision. + +### REQUIRED FEATURES + +**1. Parameter Controls** +- Sliders for numeric parameters (particle count, noise scale, speed, etc.) +- Color pickers for palette colors +- Real-time updates when parameters change +- Reset button to restore defaults + +**2. Seed Navigation** +- Display current seed number +- "Previous" and "Next" buttons to cycle through seeds +- "Random" button for random seed +- Input field to jump to specific seed +- Generate 100 variations when requested (seeds 1-100) + +**3. Single Artifact Structure** +```html + + + + + + + + +
+
+ +
+ + + +``` + +**CRITICAL**: This is a single artifact. No external files, no imports (except p5.js CDN). Everything inline. + +**4. Implementation Details - BUILD THE SIDEBAR** + +The sidebar structure: + +**1. Seed (FIXED)** - Always include exactly as shown: +- Seed display +- Prev/Next/Random/Jump buttons + +**2. Parameters (VARIABLE)** - Create controls for the art: +```html +
+ + + ... +
+``` +Add as many control-group divs as there are parameters. + +**3. Colors (OPTIONAL/VARIABLE)** - Include if the art needs adjustable colors: +- Add color pickers if users should control palette +- Skip this section if the art uses fixed colors +- Skip if the art is monochrome + +**4. Actions (FIXED)** - Always include exactly as shown: +- Regenerate button +- Reset button +- Download PNG button + +**Requirements**: +- Seed controls must work (prev/next/random/jump/display) +- All parameters must have UI controls +- Regenerate, Reset, Download buttons must work +- Keep Anthropic branding (UI styling, not art colors) + +### USING THE ARTIFACT + +The HTML artifact works immediately: +1. **In claude.ai**: Displayed as an interactive artifact - runs instantly +2. **As a file**: Save and open in any browser - no server needed +3. **Sharing**: Send the HTML file - it's completely self-contained + +--- + +## VARIATIONS & EXPLORATION + +The artifact includes seed navigation by default (prev/next/random buttons), allowing users to explore variations without creating multiple files. If the user wants specific variations highlighted: + +- Include seed presets (buttons for "Variation 1: Seed 42", "Variation 2: Seed 127", etc.) +- Add a "Gallery Mode" that shows thumbnails of multiple seeds side-by-side +- All within the same single artifact + +This is like creating a series of prints from the same plate - the algorithm is consistent, but each seed reveals different facets of its potential. The interactive nature means users discover their own favorites by exploring the seed space. + +--- + +## THE CREATIVE PROCESS + +**User request** → **Algorithmic philosophy** → **Implementation** + +Each request is unique. The process involves: + +1. **Interpret the user's intent** - What aesthetic is being sought? +2. **Create an algorithmic philosophy** (4-6 paragraphs) describing the computational approach +3. **Implement it in code** - Build the algorithm that expresses this philosophy +4. **Design appropriate parameters** - What should be tunable? +5. **Build matching UI controls** - Sliders/inputs for those parameters + +**The constants**: +- Anthropic branding (colors, fonts, layout) +- Seed navigation (always present) +- Self-contained HTML artifact + +**Everything else is variable**: +- The algorithm itself +- The parameters +- The UI controls +- The visual outcome + +To achieve the best results, trust creativity and let the philosophy guide the implementation. + +--- + +## RESOURCES + +This skill includes helpful templates and documentation: + +- **templates/viewer.html**: REQUIRED STARTING POINT for all HTML artifacts. + - This is the foundation - contains the exact structure and Anthropic branding + - **Keep unchanged**: Layout structure, sidebar organization, Anthropic colors/fonts, seed controls, action buttons + - **Replace**: The p5.js algorithm, parameter definitions, and UI controls in Parameters section + - The extensive comments in the file mark exactly what to keep vs replace + +- **templates/generator_template.js**: Reference for p5.js best practices and code structure principles. + - Shows how to organize parameters, use seeded randomness, structure classes + - NOT a pattern menu - use these principles to build unique algorithms + - Embed algorithms inline in the HTML artifact (don't create separate .js files) + +**Critical reminder**: +- The **template is the STARTING POINT**, not inspiration +- The **algorithm is where to create** something unique +- Don't copy the flow field example - build what the philosophy demands +- But DO keep the exact UI structure and Anthropic branding from the template \ No newline at end of file diff --git a/.claude/skills/algorithmic-art/templates/generator_template.js b/.claude/skills/algorithmic-art/templates/generator_template.js new file mode 100644 index 0000000..e263fbd --- /dev/null +++ b/.claude/skills/algorithmic-art/templates/generator_template.js @@ -0,0 +1,223 @@ +/** + * ═══════════════════════════════════════════════════════════════════════════ + * P5.JS GENERATIVE ART - BEST PRACTICES + * ═══════════════════════════════════════════════════════════════════════════ + * + * This file shows STRUCTURE and PRINCIPLES for p5.js generative art. + * It does NOT prescribe what art you should create. + * + * Your algorithmic philosophy should guide what you build. + * These are just best practices for how to structure your code. + * + * ═══════════════════════════════════════════════════════════════════════════ + */ + +// ============================================================================ +// 1. PARAMETER ORGANIZATION +// ============================================================================ +// Keep all tunable parameters in one object +// This makes it easy to: +// - Connect to UI controls +// - Reset to defaults +// - Serialize/save configurations + +let params = { + // Define parameters that match YOUR algorithm + // Examples (customize for your art): + // - Counts: how many elements (particles, circles, branches, etc.) + // - Scales: size, speed, spacing + // - Probabilities: likelihood of events + // - Angles: rotation, direction + // - Colors: palette arrays + + seed: 12345, + // define colorPalette as an array -- choose whatever colors you'd like ['#d97757', '#6a9bcc', '#788c5d', '#b0aea5'] + // Add YOUR parameters here based on your algorithm +}; + +// ============================================================================ +// 2. SEEDED RANDOMNESS (Critical for reproducibility) +// ============================================================================ +// ALWAYS use seeded random for Art Blocks-style reproducible output + +function initializeSeed(seed) { + randomSeed(seed); + noiseSeed(seed); + // Now all random() and noise() calls will be deterministic +} + +// ============================================================================ +// 3. P5.JS LIFECYCLE +// ============================================================================ + +function setup() { + createCanvas(800, 800); + + // Initialize seed first + initializeSeed(params.seed); + + // Set up your generative system + // This is where you initialize: + // - Arrays of objects + // - Grid structures + // - Initial positions + // - Starting states + + // For static art: call noLoop() at the end of setup + // For animated art: let draw() keep running +} + +function draw() { + // Option 1: Static generation (runs once, then stops) + // - Generate everything in setup() + // - Call noLoop() in setup() + // - draw() doesn't do much or can be empty + + // Option 2: Animated generation (continuous) + // - Update your system each frame + // - Common patterns: particle movement, growth, evolution + // - Can optionally call noLoop() after N frames + + // Option 3: User-triggered regeneration + // - Use noLoop() by default + // - Call redraw() when parameters change +} + +// ============================================================================ +// 4. CLASS STRUCTURE (When you need objects) +// ============================================================================ +// Use classes when your algorithm involves multiple entities +// Examples: particles, agents, cells, nodes, etc. + +class Entity { + constructor() { + // Initialize entity properties + // Use random() here - it will be seeded + } + + update() { + // Update entity state + // This might involve: + // - Physics calculations + // - Behavioral rules + // - Interactions with neighbors + } + + display() { + // Render the entity + // Keep rendering logic separate from update logic + } +} + +// ============================================================================ +// 5. PERFORMANCE CONSIDERATIONS +// ============================================================================ + +// For large numbers of elements: +// - Pre-calculate what you can +// - Use simple collision detection (spatial hashing if needed) +// - Limit expensive operations (sqrt, trig) when possible +// - Consider using p5 vectors efficiently + +// For smooth animation: +// - Aim for 60fps +// - Profile if things are slow +// - Consider reducing particle counts or simplifying calculations + +// ============================================================================ +// 6. UTILITY FUNCTIONS +// ============================================================================ + +// Color utilities +function hexToRgb(hex) { + const result = /^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(hex); + return result ? { + r: parseInt(result[1], 16), + g: parseInt(result[2], 16), + b: parseInt(result[3], 16) + } : null; +} + +function colorFromPalette(index) { + return params.colorPalette[index % params.colorPalette.length]; +} + +// Mapping and easing +function mapRange(value, inMin, inMax, outMin, outMax) { + return outMin + (outMax - outMin) * ((value - inMin) / (inMax - inMin)); +} + +function easeInOutCubic(t) { + return t < 0.5 ? 4 * t * t * t : 1 - Math.pow(-2 * t + 2, 3) / 2; +} + +// Constrain to bounds +function wrapAround(value, max) { + if (value < 0) return max; + if (value > max) return 0; + return value; +} + +// ============================================================================ +// 7. PARAMETER UPDATES (Connect to UI) +// ============================================================================ + +function updateParameter(paramName, value) { + params[paramName] = value; + // Decide if you need to regenerate or just update + // Some params can update in real-time, others need full regeneration +} + +function regenerate() { + // Reinitialize your generative system + // Useful when parameters change significantly + initializeSeed(params.seed); + // Then regenerate your system +} + +// ============================================================================ +// 8. COMMON P5.JS PATTERNS +// ============================================================================ + +// Drawing with transparency for trails/fading +function fadeBackground(opacity) { + fill(250, 249, 245, opacity); // Anthropic light with alpha + noStroke(); + rect(0, 0, width, height); +} + +// Using noise for organic variation +function getNoiseValue(x, y, scale = 0.01) { + return noise(x * scale, y * scale); +} + +// Creating vectors from angles +function vectorFromAngle(angle, magnitude = 1) { + return createVector(cos(angle), sin(angle)).mult(magnitude); +} + +// ============================================================================ +// 9. EXPORT FUNCTIONS +// ============================================================================ + +function exportImage() { + saveCanvas('generative-art-' + params.seed, 'png'); +} + +// ============================================================================ +// REMEMBER +// ============================================================================ +// +// These are TOOLS and PRINCIPLES, not a recipe. +// Your algorithmic philosophy should guide WHAT you create. +// This structure helps you create it WELL. +// +// Focus on: +// - Clean, readable code +// - Parameterized for exploration +// - Seeded for reproducibility +// - Performant execution +// +// The art itself is entirely up to you! +// +// ============================================================================ \ No newline at end of file diff --git a/.claude/skills/algorithmic-art/templates/viewer.html b/.claude/skills/algorithmic-art/templates/viewer.html new file mode 100644 index 0000000..630cc1f --- /dev/null +++ b/.claude/skills/algorithmic-art/templates/viewer.html @@ -0,0 +1,599 @@ + + + + + + + Generative Art Viewer + + + + + + + +
+ + + + +
+
+
Initializing generative art...
+
+
+
+ + + + \ No newline at end of file diff --git a/.claude/skills/brand-guidelines/LICENSE.txt b/.claude/skills/brand-guidelines/LICENSE.txt new file mode 100644 index 0000000..7a4a3ea --- /dev/null +++ b/.claude/skills/brand-guidelines/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/.claude/skills/brand-guidelines/SKILL.md b/.claude/skills/brand-guidelines/SKILL.md new file mode 100644 index 0000000..47c72c6 --- /dev/null +++ b/.claude/skills/brand-guidelines/SKILL.md @@ -0,0 +1,73 @@ +--- +name: brand-guidelines +description: Applies Anthropic's official brand colors and typography to any sort of artifact that may benefit from having Anthropic's look-and-feel. Use it when brand colors or style guidelines, visual formatting, or company design standards apply. +license: Complete terms in LICENSE.txt +--- + +# Anthropic Brand Styling + +## Overview + +To access Anthropic's official brand identity and style resources, use this skill. + +**Keywords**: branding, corporate identity, visual identity, post-processing, styling, brand colors, typography, Anthropic brand, visual formatting, visual design + +## Brand Guidelines + +### Colors + +**Main Colors:** + +- Dark: `#141413` - Primary text and dark backgrounds +- Light: `#faf9f5` - Light backgrounds and text on dark +- Mid Gray: `#b0aea5` - Secondary elements +- Light Gray: `#e8e6dc` - Subtle backgrounds + +**Accent Colors:** + +- Orange: `#d97757` - Primary accent +- Blue: `#6a9bcc` - Secondary accent +- Green: `#788c5d` - Tertiary accent + +### Typography + +- **Headings**: Poppins (with Arial fallback) +- **Body Text**: Lora (with Georgia fallback) +- **Note**: Fonts should be pre-installed in your environment for best results + +## Features + +### Smart Font Application + +- Applies Poppins font to headings (24pt and larger) +- Applies Lora font to body text +- Automatically falls back to Arial/Georgia if custom fonts unavailable +- Preserves readability across all systems + +### Text Styling + +- Headings (24pt+): Poppins font +- Body text: Lora font +- Smart color selection based on background +- Preserves text hierarchy and formatting + +### Shape and Accent Colors + +- Non-text shapes use accent colors +- Cycles through orange, blue, and green accents +- Maintains visual interest while staying on-brand + +## Technical Details + +### Font Management + +- Uses system-installed Poppins and Lora fonts when available +- Provides automatic fallback to Arial (headings) and Georgia (body) +- No font installation required - works with existing system fonts +- For best results, pre-install Poppins and Lora fonts in your environment + +### Color Application + +- Uses RGB color values for precise brand matching +- Applied via python-pptx's RGBColor class +- Maintains color fidelity across different systems diff --git a/.claude/skills/canvas-design/LICENSE.txt b/.claude/skills/canvas-design/LICENSE.txt new file mode 100644 index 0000000..7a4a3ea --- /dev/null +++ b/.claude/skills/canvas-design/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/.claude/skills/canvas-design/SKILL.md b/.claude/skills/canvas-design/SKILL.md new file mode 100644 index 0000000..9f63fee --- /dev/null +++ b/.claude/skills/canvas-design/SKILL.md @@ -0,0 +1,130 @@ +--- +name: canvas-design +description: Create beautiful visual art in .png and .pdf documents using design philosophy. You should use this skill when the user asks to create a poster, piece of art, design, or other static piece. Create original visual designs, never copying existing artists' work to avoid copyright violations. +license: Complete terms in LICENSE.txt +--- + +These are instructions for creating design philosophies - aesthetic movements that are then EXPRESSED VISUALLY. Output only .md files, .pdf files, and .png files. + +Complete this in two steps: +1. Design Philosophy Creation (.md file) +2. Express by creating it on a canvas (.pdf file or .png file) + +First, undertake this task: + +## DESIGN PHILOSOPHY CREATION + +To begin, create a VISUAL PHILOSOPHY (not layouts or templates) that will be interpreted through: +- Form, space, color, composition +- Images, graphics, shapes, patterns +- Minimal text as visual accent + +### THE CRITICAL UNDERSTANDING +- What is received: Some subtle input or instructions by the user that should be taken into account, but used as a foundation; it should not constrain creative freedom. +- What is created: A design philosophy/aesthetic movement. +- What happens next: Then, the same version receives the philosophy and EXPRESSES IT VISUALLY - creating artifacts that are 90% visual design, 10% essential text. + +Consider this approach: +- Write a manifesto for an art movement +- The next phase involves making the artwork + +The philosophy must emphasize: Visual expression. Spatial communication. Artistic interpretation. Minimal words. + +### HOW TO GENERATE A VISUAL PHILOSOPHY + +**Name the movement** (1-2 words): "Brutalist Joy" / "Chromatic Silence" / "Metabolist Dreams" + +**Articulate the philosophy** (4-6 paragraphs - concise but complete): + +To capture the VISUAL essence, express how the philosophy manifests through: +- Space and form +- Color and material +- Scale and rhythm +- Composition and balance +- Visual hierarchy + +**CRITICAL GUIDELINES:** +- **Avoid redundancy**: Each design aspect should be mentioned once. Avoid repeating points about color theory, spatial relationships, or typographic principles unless adding new depth. +- **Emphasize craftsmanship REPEATEDLY**: The philosophy MUST stress multiple times that the final work should appear as though it took countless hours to create, was labored over with care, and comes from someone at the absolute top of their field. This framing is essential - repeat phrases like "meticulously crafted," "the product of deep expertise," "painstaking attention," "master-level execution." +- **Leave creative space**: Remain specific about the aesthetic direction, but concise enough that the next Claude has room to make interpretive choices also at a extremely high level of craftmanship. + +The philosophy must guide the next version to express ideas VISUALLY, not through text. Information lives in design, not paragraphs. + +### PHILOSOPHY EXAMPLES + +**"Concrete Poetry"** +Philosophy: Communication through monumental form and bold geometry. +Visual expression: Massive color blocks, sculptural typography (huge single words, tiny labels), Brutalist spatial divisions, Polish poster energy meets Le Corbusier. Ideas expressed through visual weight and spatial tension, not explanation. Text as rare, powerful gesture - never paragraphs, only essential words integrated into the visual architecture. Every element placed with the precision of a master craftsman. + +**"Chromatic Language"** +Philosophy: Color as the primary information system. +Visual expression: Geometric precision where color zones create meaning. Typography minimal - small sans-serif labels letting chromatic fields communicate. Think Josef Albers' interaction meets data visualization. Information encoded spatially and chromatically. Words only to anchor what color already shows. The result of painstaking chromatic calibration. + +**"Analog Meditation"** +Philosophy: Quiet visual contemplation through texture and breathing room. +Visual expression: Paper grain, ink bleeds, vast negative space. Photography and illustration dominate. Typography whispered (small, restrained, serving the visual). Japanese photobook aesthetic. Images breathe across pages. Text appears sparingly - short phrases, never explanatory blocks. Each composition balanced with the care of a meditation practice. + +**"Organic Systems"** +Philosophy: Natural clustering and modular growth patterns. +Visual expression: Rounded forms, organic arrangements, color from nature through architecture. Information shown through visual diagrams, spatial relationships, iconography. Text only for key labels floating in space. The composition tells the story through expert spatial orchestration. + +**"Geometric Silence"** +Philosophy: Pure order and restraint. +Visual expression: Grid-based precision, bold photography or stark graphics, dramatic negative space. Typography precise but minimal - small essential text, large quiet zones. Swiss formalism meets Brutalist material honesty. Structure communicates, not words. Every alignment the work of countless refinements. + +*These are condensed examples. The actual design philosophy should be 4-6 substantial paragraphs.* + +### ESSENTIAL PRINCIPLES +- **VISUAL PHILOSOPHY**: Create an aesthetic worldview to be expressed through design +- **MINIMAL TEXT**: Always emphasize that text is sparse, essential-only, integrated as visual element - never lengthy +- **SPATIAL EXPRESSION**: Ideas communicate through space, form, color, composition - not paragraphs +- **ARTISTIC FREEDOM**: The next Claude interprets the philosophy visually - provide creative room +- **PURE DESIGN**: This is about making ART OBJECTS, not documents with decoration +- **EXPERT CRAFTSMANSHIP**: Repeatedly emphasize the final work must look meticulously crafted, labored over with care, the product of countless hours by someone at the top of their field + +**The design philosophy should be 4-6 paragraphs long.** Fill it with poetic design philosophy that brings together the core vision. Avoid repeating the same points. Keep the design philosophy generic without mentioning the intention of the art, as if it can be used wherever. Output the design philosophy as a .md file. + +--- + +## DEDUCING THE SUBTLE REFERENCE + +**CRITICAL STEP**: Before creating the canvas, identify the subtle conceptual thread from the original request. + +**THE ESSENTIAL PRINCIPLE**: +The topic is a **subtle, niche reference embedded within the art itself** - not always literal, always sophisticated. Someone familiar with the subject should feel it intuitively, while others simply experience a masterful abstract composition. The design philosophy provides the aesthetic language. The deduced topic provides the soul - the quiet conceptual DNA woven invisibly into form, color, and composition. + +This is **VERY IMPORTANT**: The reference must be refined so it enhances the work's depth without announcing itself. Think like a jazz musician quoting another song - only those who know will catch it, but everyone appreciates the music. + +--- + +## CANVAS CREATION + +With both the philosophy and the conceptual framework established, express it on a canvas. Take a moment to gather thoughts and clear the mind. Use the design philosophy created and the instructions below to craft a masterpiece, embodying all aspects of the philosophy with expert craftsmanship. + +**IMPORTANT**: For any type of content, even if the user requests something for a movie/game/book, the approach should still be sophisticated. Never lose sight of the idea that this should be art, not something that's cartoony or amateur. + +To create museum or magazine quality work, use the design philosophy as the foundation. Create one single page, highly visual, design-forward PDF or PNG output (unless asked for more pages). Generally use repeating patterns and perfect shapes. Treat the abstract philosophical design as if it were a scientific bible, borrowing the visual language of systematic observation—dense accumulation of marks, repeated elements, or layered patterns that build meaning through patient repetition and reward sustained viewing. Add sparse, clinical typography and systematic reference markers that suggest this could be a diagram from an imaginary discipline, treating the invisible subject with the same reverence typically reserved for documenting observable phenomena. Anchor the piece with simple phrase(s) or details positioned subtly, using a limited color palette that feels intentional and cohesive. Embrace the paradox of using analytical visual language to express ideas about human experience: the result should feel like an artifact that proves something ephemeral can be studied, mapped, and understood through careful attention. This is true art. + +**Text as a contextual element**: Text is always minimal and visual-first, but let context guide whether that means whisper-quiet labels or bold typographic gestures. A punk venue poster might have larger, more aggressive type than a minimalist ceramics studio identity. Most of the time, font should be thin. All use of fonts must be design-forward and prioritize visual communication. Regardless of text scale, nothing falls off the page and nothing overlaps. Every element must be contained within the canvas boundaries with proper margins. Check carefully that all text, graphics, and visual elements have breathing room and clear separation. This is non-negotiable for professional execution. **IMPORTANT: Use different fonts if writing text. Search the `./canvas-fonts` directory. Regardless of approach, sophistication is non-negotiable.** + +Download and use whatever fonts are needed to make this a reality. Get creative by making the typography actually part of the art itself -- if the art is abstract, bring the font onto the canvas, not typeset digitally. + +To push boundaries, follow design instinct/intuition while using the philosophy as a guiding principle. Embrace ultimate design freedom and choice. Push aesthetics and design to the frontier. + +**CRITICAL**: To achieve human-crafted quality (not AI-generated), create work that looks like it took countless hours. Make it appear as though someone at the absolute top of their field labored over every detail with painstaking care. Ensure the composition, spacing, color choices, typography - everything screams expert-level craftsmanship. Double-check that nothing overlaps, formatting is flawless, every detail perfect. Create something that could be shown to people to prove expertise and rank as undeniably impressive. + +Output the final result as a single, downloadable .pdf or .png file, alongside the design philosophy used as a .md file. + +--- + +## FINAL STEP + +**IMPORTANT**: The user ALREADY said "It isn't perfect enough. It must be pristine, a masterpiece if craftsmanship, as if it were about to be displayed in a museum." + +**CRITICAL**: To refine the work, avoid adding more graphics; instead refine what has been created and make it extremely crisp, respecting the design philosophy and the principles of minimalism entirely. Rather than adding a fun filter or refactoring a font, consider how to make the existing composition more cohesive with the art. If the instinct is to call a new function or draw a new shape, STOP and instead ask: "How can I make what's already here more of a piece of art?" + +Take a second pass. Go back to the code and refine/polish further to make this a philosophically designed masterpiece. + +## MULTI-PAGE OPTION + +To create additional pages when requested, create more creative pages along the same lines as the design philosophy but distinctly different as well. Bundle those pages in the same .pdf or many .pngs. Treat the first page as just a single page in a whole coffee table book waiting to be filled. Make the next pages unique twists and memories of the original. Have them almost tell a story in a very tasteful way. Exercise full creative freedom. \ No newline at end of file diff --git a/.claude/skills/canvas-design/canvas-fonts/ArsenalSC-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/ArsenalSC-OFL.txt new file mode 100644 index 0000000..1dad6ca --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/ArsenalSC-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2012 The Arsenal Project Authors (andrij.design@gmail.com) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/ArsenalSC-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/ArsenalSC-Regular.ttf new file mode 100644 index 0000000..fe5409b Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/ArsenalSC-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/BigShoulders-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/BigShoulders-Bold.ttf new file mode 100644 index 0000000..fc5f8fd Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/BigShoulders-Bold.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/BigShoulders-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/BigShoulders-OFL.txt new file mode 100644 index 0000000..b220280 --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/BigShoulders-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2019 The Big Shoulders Project Authors (https://github.com/xotypeco/big_shoulders) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/BigShoulders-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/BigShoulders-Regular.ttf new file mode 100644 index 0000000..de8308c Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/BigShoulders-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/Boldonse-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/Boldonse-OFL.txt new file mode 100644 index 0000000..1890cb1 --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/Boldonse-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2024 The Boldonse Project Authors (https://github.com/googlefonts/boldonse) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/Boldonse-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/Boldonse-Regular.ttf new file mode 100644 index 0000000..43fa30a Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Boldonse-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/BricolageGrotesque-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/BricolageGrotesque-Bold.ttf new file mode 100644 index 0000000..f3b1ded Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/BricolageGrotesque-Bold.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/BricolageGrotesque-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/BricolageGrotesque-OFL.txt new file mode 100644 index 0000000..fc2b216 --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/BricolageGrotesque-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2022 The Bricolage Grotesque Project Authors (https://github.com/ateliertriay/bricolage) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/BricolageGrotesque-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/BricolageGrotesque-Regular.ttf new file mode 100644 index 0000000..0674ae3 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/BricolageGrotesque-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-Bold.ttf new file mode 100644 index 0000000..58730fb Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-Bold.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-Italic.ttf b/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-Italic.ttf new file mode 100644 index 0000000..786a1bd Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-Italic.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-OFL.txt new file mode 100644 index 0000000..f976fdc --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2018 The Crimson Pro Project Authors (https://github.com/Fonthausen/CrimsonPro) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-Regular.ttf new file mode 100644 index 0000000..f5666b9 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/DMMono-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/DMMono-OFL.txt new file mode 100644 index 0000000..5b17f0c --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/DMMono-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2020 The DM Mono Project Authors (https://www.github.com/googlefonts/dm-mono) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/DMMono-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/DMMono-Regular.ttf new file mode 100644 index 0000000..7efe813 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/DMMono-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/EricaOne-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/EricaOne-OFL.txt new file mode 100644 index 0000000..490d012 --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/EricaOne-OFL.txt @@ -0,0 +1,94 @@ +Copyright (c) 2011 by LatinoType Limitada (luciano@latinotype.com), +with Reserved Font Names "Erica One" + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/EricaOne-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/EricaOne-Regular.ttf new file mode 100644 index 0000000..8bd91d1 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/EricaOne-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/GeistMono-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/GeistMono-Bold.ttf new file mode 100644 index 0000000..736ff7c Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/GeistMono-Bold.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/GeistMono-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/GeistMono-OFL.txt new file mode 100644 index 0000000..679a685 --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/GeistMono-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2024 The Geist Project Authors (https://github.com/vercel/geist-font.git) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/GeistMono-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/GeistMono-Regular.ttf new file mode 100644 index 0000000..1a30262 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/GeistMono-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/Gloock-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/Gloock-OFL.txt new file mode 100644 index 0000000..363acd3 --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/Gloock-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2022 The Gloock Project Authors (https://github.com/duartp/gloock) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/Gloock-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/Gloock-Regular.ttf new file mode 100644 index 0000000..3e58c4e Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Gloock-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/IBMPlexMono-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/IBMPlexMono-Bold.ttf new file mode 100644 index 0000000..247979c Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/IBMPlexMono-Bold.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/IBMPlexMono-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/IBMPlexMono-OFL.txt new file mode 100644 index 0000000..e423b74 --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/IBMPlexMono-OFL.txt @@ -0,0 +1,93 @@ +Copyright © 2017 IBM Corp. with Reserved Font Name "Plex" + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/IBMPlexMono-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/IBMPlexMono-Regular.ttf new file mode 100644 index 0000000..601ae94 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/IBMPlexMono-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-Bold.ttf new file mode 100644 index 0000000..78f6e50 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-Bold.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-BoldItalic.ttf b/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-BoldItalic.ttf new file mode 100644 index 0000000..369b89d Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-BoldItalic.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-Italic.ttf b/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-Italic.ttf new file mode 100644 index 0000000..a4d859a Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-Italic.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-Regular.ttf new file mode 100644 index 0000000..35f454c Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-Bold.ttf new file mode 100644 index 0000000..f602dce Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-Bold.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-BoldItalic.ttf b/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-BoldItalic.ttf new file mode 100644 index 0000000..122b273 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-BoldItalic.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-Italic.ttf b/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-Italic.ttf new file mode 100644 index 0000000..4b98fb8 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-Italic.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-OFL.txt new file mode 100644 index 0000000..4bb9914 --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2022 The Instrument Sans Project Authors (https://github.com/Instrument/instrument-sans) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-Regular.ttf new file mode 100644 index 0000000..14c6113 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/InstrumentSerif-Italic.ttf b/.claude/skills/canvas-design/canvas-fonts/InstrumentSerif-Italic.ttf new file mode 100644 index 0000000..8fa958d Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/InstrumentSerif-Italic.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/InstrumentSerif-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/InstrumentSerif-Regular.ttf new file mode 100644 index 0000000..9763031 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/InstrumentSerif-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/Italiana-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/Italiana-OFL.txt new file mode 100644 index 0000000..ba8af21 --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/Italiana-OFL.txt @@ -0,0 +1,93 @@ +Copyright (c) 2011, Santiago Orozco (hi@typemade.mx), with Reserved Font Name "Italiana". + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/Italiana-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/Italiana-Regular.ttf new file mode 100644 index 0000000..a9b828c Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Italiana-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/JetBrainsMono-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/JetBrainsMono-Bold.ttf new file mode 100644 index 0000000..1926c80 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/JetBrainsMono-Bold.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/JetBrainsMono-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/JetBrainsMono-OFL.txt new file mode 100644 index 0000000..5ceee00 --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/JetBrainsMono-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2020 The JetBrains Mono Project Authors (https://github.com/JetBrains/JetBrainsMono) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/JetBrainsMono-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/JetBrainsMono-Regular.ttf new file mode 100644 index 0000000..436c982 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/JetBrainsMono-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/Jura-Light.ttf b/.claude/skills/canvas-design/canvas-fonts/Jura-Light.ttf new file mode 100644 index 0000000..dffbb33 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Jura-Light.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/Jura-Medium.ttf b/.claude/skills/canvas-design/canvas-fonts/Jura-Medium.ttf new file mode 100644 index 0000000..4bf91a3 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Jura-Medium.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/Jura-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/Jura-OFL.txt new file mode 100644 index 0000000..64ad4c6 --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/Jura-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2019 The Jura Project Authors (https://github.com/ossobuffo/jura) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/LibreBaskerville-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/LibreBaskerville-OFL.txt new file mode 100644 index 0000000..8c531fa --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/LibreBaskerville-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2012 The Libre Baskerville Project Authors (https://github.com/impallari/Libre-Baskerville) with Reserved Font Name Libre Baskerville. + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/LibreBaskerville-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/LibreBaskerville-Regular.ttf new file mode 100644 index 0000000..c1abc26 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/LibreBaskerville-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/Lora-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/Lora-Bold.ttf new file mode 100644 index 0000000..edae21e Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Lora-Bold.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/Lora-BoldItalic.ttf b/.claude/skills/canvas-design/canvas-fonts/Lora-BoldItalic.ttf new file mode 100644 index 0000000..12dea8c Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Lora-BoldItalic.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/Lora-Italic.ttf b/.claude/skills/canvas-design/canvas-fonts/Lora-Italic.ttf new file mode 100644 index 0000000..e24b69b Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Lora-Italic.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/Lora-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/Lora-OFL.txt new file mode 100644 index 0000000..4cf1b95 --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/Lora-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2011 The Lora Project Authors (https://github.com/cyrealtype/Lora-Cyrillic), with Reserved Font Name "Lora". + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/Lora-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/Lora-Regular.ttf new file mode 100644 index 0000000..dc751db Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Lora-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/NationalPark-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/NationalPark-Bold.ttf new file mode 100644 index 0000000..f4d7c02 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/NationalPark-Bold.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/NationalPark-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/NationalPark-OFL.txt new file mode 100644 index 0000000..f4ec3fb --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/NationalPark-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2025 The National Park Project Authors (https://github.com/benhoepner/National-Park) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/NationalPark-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/NationalPark-Regular.ttf new file mode 100644 index 0000000..e4cbfbf Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/NationalPark-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/NothingYouCouldDo-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/NothingYouCouldDo-OFL.txt new file mode 100644 index 0000000..c81eccd --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/NothingYouCouldDo-OFL.txt @@ -0,0 +1,93 @@ +Copyright (c) 2010, Kimberly Geswein (kimberlygeswein.com) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/NothingYouCouldDo-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/NothingYouCouldDo-Regular.ttf new file mode 100644 index 0000000..b086bce Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/NothingYouCouldDo-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/Outfit-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/Outfit-Bold.ttf new file mode 100644 index 0000000..f9f2f72 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Outfit-Bold.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/Outfit-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/Outfit-OFL.txt new file mode 100644 index 0000000..fd0cb99 --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/Outfit-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2021 The Outfit Project Authors (https://github.com/Outfitio/Outfit-Fonts) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/Outfit-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/Outfit-Regular.ttf new file mode 100644 index 0000000..3939ab2 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Outfit-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/PixelifySans-Medium.ttf b/.claude/skills/canvas-design/canvas-fonts/PixelifySans-Medium.ttf new file mode 100644 index 0000000..95cd372 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/PixelifySans-Medium.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/PixelifySans-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/PixelifySans-OFL.txt new file mode 100644 index 0000000..b02d1b6 --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/PixelifySans-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2021 The Pixelify Sans Project Authors (https://github.com/eifetx/Pixelify-Sans) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/PoiretOne-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/PoiretOne-OFL.txt new file mode 100644 index 0000000..607bdad --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/PoiretOne-OFL.txt @@ -0,0 +1,93 @@ +Copyright (c) 2011, Denis Masharov (denis.masharov@gmail.com) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/PoiretOne-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/PoiretOne-Regular.ttf new file mode 100644 index 0000000..b339511 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/PoiretOne-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/RedHatMono-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/RedHatMono-Bold.ttf new file mode 100644 index 0000000..a6e3cf1 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/RedHatMono-Bold.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/RedHatMono-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/RedHatMono-OFL.txt new file mode 100644 index 0000000..16cf394 --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/RedHatMono-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2024 The Red Hat Project Authors (https://github.com/RedHatOfficial/RedHatFont) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/RedHatMono-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/RedHatMono-Regular.ttf new file mode 100644 index 0000000..3bf6a69 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/RedHatMono-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/Silkscreen-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/Silkscreen-OFL.txt new file mode 100644 index 0000000..a1fe7d5 --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/Silkscreen-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2001 The Silkscreen Project Authors (https://github.com/googlefonts/silkscreen) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/Silkscreen-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/Silkscreen-Regular.ttf new file mode 100644 index 0000000..8abaa7c Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Silkscreen-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/SmoochSans-Medium.ttf b/.claude/skills/canvas-design/canvas-fonts/SmoochSans-Medium.ttf new file mode 100644 index 0000000..0af9ead Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/SmoochSans-Medium.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/SmoochSans-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/SmoochSans-OFL.txt new file mode 100644 index 0000000..4c2f033 --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/SmoochSans-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2016 The Smooch Sans Project Authors (https://github.com/googlefonts/smooch-sans) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/Tektur-Medium.ttf b/.claude/skills/canvas-design/canvas-fonts/Tektur-Medium.ttf new file mode 100644 index 0000000..34fc797 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Tektur-Medium.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/Tektur-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/Tektur-OFL.txt new file mode 100644 index 0000000..2cad55f --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/Tektur-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2023 The Tektur Project Authors (https://www.github.com/hyvyys/Tektur) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/Tektur-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/Tektur-Regular.ttf new file mode 100644 index 0000000..f280fba Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Tektur-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/WorkSans-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/WorkSans-Bold.ttf new file mode 100644 index 0000000..5c97989 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/WorkSans-Bold.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/WorkSans-BoldItalic.ttf b/.claude/skills/canvas-design/canvas-fonts/WorkSans-BoldItalic.ttf new file mode 100644 index 0000000..54418b8 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/WorkSans-BoldItalic.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/WorkSans-Italic.ttf b/.claude/skills/canvas-design/canvas-fonts/WorkSans-Italic.ttf new file mode 100644 index 0000000..40529b6 Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/WorkSans-Italic.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/WorkSans-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/WorkSans-OFL.txt new file mode 100644 index 0000000..070f341 --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/WorkSans-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2019 The Work Sans Project Authors (https://github.com/weiweihuanghuang/Work-Sans) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/WorkSans-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/WorkSans-Regular.ttf new file mode 100644 index 0000000..d24586c Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/WorkSans-Regular.ttf differ diff --git a/.claude/skills/canvas-design/canvas-fonts/YoungSerif-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/YoungSerif-OFL.txt new file mode 100644 index 0000000..f09443c --- /dev/null +++ b/.claude/skills/canvas-design/canvas-fonts/YoungSerif-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2023 The Young Serif Project Authors (https://github.com/noirblancrouge/YoungSerif) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/.claude/skills/canvas-design/canvas-fonts/YoungSerif-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/YoungSerif-Regular.ttf new file mode 100644 index 0000000..f454fbe Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/YoungSerif-Regular.ttf differ diff --git a/.claude/skills/doc-coauthoring/SKILL.md b/.claude/skills/doc-coauthoring/SKILL.md new file mode 100644 index 0000000..a5a6983 --- /dev/null +++ b/.claude/skills/doc-coauthoring/SKILL.md @@ -0,0 +1,375 @@ +--- +name: doc-coauthoring +description: Guide users through a structured workflow for co-authoring documentation. Use when user wants to write documentation, proposals, technical specs, decision docs, or similar structured content. This workflow helps users efficiently transfer context, refine content through iteration, and verify the doc works for readers. Trigger when user mentions writing docs, creating proposals, drafting specs, or similar documentation tasks. +--- + +# Doc Co-Authoring Workflow + +This skill provides a structured workflow for guiding users through collaborative document creation. Act as an active guide, walking users through three stages: Context Gathering, Refinement & Structure, and Reader Testing. + +## When to Offer This Workflow + +**Trigger conditions:** +- User mentions writing documentation: "write a doc", "draft a proposal", "create a spec", "write up" +- User mentions specific doc types: "PRD", "design doc", "decision doc", "RFC" +- User seems to be starting a substantial writing task + +**Initial offer:** +Offer the user a structured workflow for co-authoring the document. Explain the three stages: + +1. **Context Gathering**: User provides all relevant context while Claude asks clarifying questions +2. **Refinement & Structure**: Iteratively build each section through brainstorming and editing +3. **Reader Testing**: Test the doc with a fresh Claude (no context) to catch blind spots before others read it + +Explain that this approach helps ensure the doc works well when others read it (including when they paste it into Claude). Ask if they want to try this workflow or prefer to work freeform. + +If user declines, work freeform. If user accepts, proceed to Stage 1. + +## Stage 1: Context Gathering + +**Goal:** Close the gap between what the user knows and what Claude knows, enabling smart guidance later. + +### Initial Questions + +Start by asking the user for meta-context about the document: + +1. What type of document is this? (e.g., technical spec, decision doc, proposal) +2. Who's the primary audience? +3. What's the desired impact when someone reads this? +4. Is there a template or specific format to follow? +5. Any other constraints or context to know? + +Inform them they can answer in shorthand or dump information however works best for them. + +**If user provides a template or mentions a doc type:** +- Ask if they have a template document to share +- If they provide a link to a shared document, use the appropriate integration to fetch it +- If they provide a file, read it + +**If user mentions editing an existing shared document:** +- Use the appropriate integration to read the current state +- Check for images without alt-text +- If images exist without alt-text, explain that when others use Claude to understand the doc, Claude won't be able to see them. Ask if they want alt-text generated. If so, request they paste each image into chat for descriptive alt-text generation. + +### Info Dumping + +Once initial questions are answered, encourage the user to dump all the context they have. Request information such as: +- Background on the project/problem +- Related team discussions or shared documents +- Why alternative solutions aren't being used +- Organizational context (team dynamics, past incidents, politics) +- Timeline pressures or constraints +- Technical architecture or dependencies +- Stakeholder concerns + +Advise them not to worry about organizing it - just get it all out. Offer multiple ways to provide context: +- Info dump stream-of-consciousness +- Point to team channels or threads to read +- Link to shared documents + +**If integrations are available** (e.g., Slack, Teams, Google Drive, SharePoint, or other MCP servers), mention that these can be used to pull in context directly. + +**If no integrations are detected and in Claude.ai or Claude app:** Suggest they can enable connectors in their Claude settings to allow pulling context from messaging apps and document storage directly. + +Inform them clarifying questions will be asked once they've done their initial dump. + +**During context gathering:** + +- If user mentions team channels or shared documents: + - If integrations available: Inform them the content will be read now, then use the appropriate integration + - If integrations not available: Explain lack of access. Suggest they enable connectors in Claude settings, or paste the relevant content directly. + +- If user mentions entities/projects that are unknown: + - Ask if connected tools should be searched to learn more + - Wait for user confirmation before searching + +- As user provides context, track what's being learned and what's still unclear + +**Asking clarifying questions:** + +When user signals they've done their initial dump (or after substantial context provided), ask clarifying questions to ensure understanding: + +Generate 5-10 numbered questions based on gaps in the context. + +Inform them they can use shorthand to answer (e.g., "1: yes, 2: see #channel, 3: no because backwards compat"), link to more docs, point to channels to read, or just keep info-dumping. Whatever's most efficient for them. + +**Exit condition:** +Sufficient context has been gathered when questions show understanding - when edge cases and trade-offs can be asked about without needing basics explained. + +**Transition:** +Ask if there's any more context they want to provide at this stage, or if it's time to move on to drafting the document. + +If user wants to add more, let them. When ready, proceed to Stage 2. + +## Stage 2: Refinement & Structure + +**Goal:** Build the document section by section through brainstorming, curation, and iterative refinement. + +**Instructions to user:** +Explain that the document will be built section by section. For each section: +1. Clarifying questions will be asked about what to include +2. 5-20 options will be brainstormed +3. User will indicate what to keep/remove/combine +4. The section will be drafted +5. It will be refined through surgical edits + +Start with whichever section has the most unknowns (usually the core decision/proposal), then work through the rest. + +**Section ordering:** + +If the document structure is clear: +Ask which section they'd like to start with. + +Suggest starting with whichever section has the most unknowns. For decision docs, that's usually the core proposal. For specs, it's typically the technical approach. Summary sections are best left for last. + +If user doesn't know what sections they need: +Based on the type of document and template, suggest 3-5 sections appropriate for the doc type. + +Ask if this structure works, or if they want to adjust it. + +**Once structure is agreed:** + +Create the initial document structure with placeholder text for all sections. + +**If access to artifacts is available:** +Use `create_file` to create an artifact. This gives both Claude and the user a scaffold to work from. + +Inform them that the initial structure with placeholders for all sections will be created. + +Create artifact with all section headers and brief placeholder text like "[To be written]" or "[Content here]". + +Provide the scaffold link and indicate it's time to fill in each section. + +**If no access to artifacts:** +Create a markdown file in the working directory. Name it appropriately (e.g., `decision-doc.md`, `technical-spec.md`). + +Inform them that the initial structure with placeholders for all sections will be created. + +Create file with all section headers and placeholder text. + +Confirm the filename has been created and indicate it's time to fill in each section. + +**For each section:** + +### Step 1: Clarifying Questions + +Announce work will begin on the [SECTION NAME] section. Ask 5-10 clarifying questions about what should be included: + +Generate 5-10 specific questions based on context and section purpose. + +Inform them they can answer in shorthand or just indicate what's important to cover. + +### Step 2: Brainstorming + +For the [SECTION NAME] section, brainstorm [5-20] things that might be included, depending on the section's complexity. Look for: +- Context shared that might have been forgotten +- Angles or considerations not yet mentioned + +Generate 5-20 numbered options based on section complexity. At the end, offer to brainstorm more if they want additional options. + +### Step 3: Curation + +Ask which points should be kept, removed, or combined. Request brief justifications to help learn priorities for the next sections. + +Provide examples: +- "Keep 1,4,7,9" +- "Remove 3 (duplicates 1)" +- "Remove 6 (audience already knows this)" +- "Combine 11 and 12" + +**If user gives freeform feedback** (e.g., "looks good" or "I like most of it but...") instead of numbered selections, extract their preferences and proceed. Parse what they want kept/removed/changed and apply it. + +### Step 4: Gap Check + +Based on what they've selected, ask if there's anything important missing for the [SECTION NAME] section. + +### Step 5: Drafting + +Use `str_replace` to replace the placeholder text for this section with the actual drafted content. + +Announce the [SECTION NAME] section will be drafted now based on what they've selected. + +**If using artifacts:** +After drafting, provide a link to the artifact. + +Ask them to read through it and indicate what to change. Note that being specific helps learning for the next sections. + +**If using a file (no artifacts):** +After drafting, confirm completion. + +Inform them the [SECTION NAME] section has been drafted in [filename]. Ask them to read through it and indicate what to change. Note that being specific helps learning for the next sections. + +**Key instruction for user (include when drafting the first section):** +Provide a note: Instead of editing the doc directly, ask them to indicate what to change. This helps learning of their style for future sections. For example: "Remove the X bullet - already covered by Y" or "Make the third paragraph more concise". + +### Step 6: Iterative Refinement + +As user provides feedback: +- Use `str_replace` to make edits (never reprint the whole doc) +- **If using artifacts:** Provide link to artifact after each edit +- **If using files:** Just confirm edits are complete +- If user edits doc directly and asks to read it: mentally note the changes they made and keep them in mind for future sections (this shows their preferences) + +**Continue iterating** until user is satisfied with the section. + +### Quality Checking + +After 3 consecutive iterations with no substantial changes, ask if anything can be removed without losing important information. + +When section is done, confirm [SECTION NAME] is complete. Ask if ready to move to the next section. + +**Repeat for all sections.** + +### Near Completion + +As approaching completion (80%+ of sections done), announce intention to re-read the entire document and check for: +- Flow and consistency across sections +- Redundancy or contradictions +- Anything that feels like "slop" or generic filler +- Whether every sentence carries weight + +Read entire document and provide feedback. + +**When all sections are drafted and refined:** +Announce all sections are drafted. Indicate intention to review the complete document one more time. + +Review for overall coherence, flow, completeness. + +Provide any final suggestions. + +Ask if ready to move to Reader Testing, or if they want to refine anything else. + +## Stage 3: Reader Testing + +**Goal:** Test the document with a fresh Claude (no context bleed) to verify it works for readers. + +**Instructions to user:** +Explain that testing will now occur to see if the document actually works for readers. This catches blind spots - things that make sense to the authors but might confuse others. + +### Testing Approach + +**If access to sub-agents is available (e.g., in Claude Code):** + +Perform the testing directly without user involvement. + +### Step 1: Predict Reader Questions + +Announce intention to predict what questions readers might ask when trying to discover this document. + +Generate 5-10 questions that readers would realistically ask. + +### Step 2: Test with Sub-Agent + +Announce that these questions will be tested with a fresh Claude instance (no context from this conversation). + +For each question, invoke a sub-agent with just the document content and the question. + +Summarize what Reader Claude got right/wrong for each question. + +### Step 3: Run Additional Checks + +Announce additional checks will be performed. + +Invoke sub-agent to check for ambiguity, false assumptions, contradictions. + +Summarize any issues found. + +### Step 4: Report and Fix + +If issues found: +Report that Reader Claude struggled with specific issues. + +List the specific issues. + +Indicate intention to fix these gaps. + +Loop back to refinement for problematic sections. + +--- + +**If no access to sub-agents (e.g., claude.ai web interface):** + +The user will need to do the testing manually. + +### Step 1: Predict Reader Questions + +Ask what questions people might ask when trying to discover this document. What would they type into Claude.ai? + +Generate 5-10 questions that readers would realistically ask. + +### Step 2: Setup Testing + +Provide testing instructions: +1. Open a fresh Claude conversation: https://claude.ai +2. Paste or share the document content (if using a shared doc platform with connectors enabled, provide the link) +3. Ask Reader Claude the generated questions + +For each question, instruct Reader Claude to provide: +- The answer +- Whether anything was ambiguous or unclear +- What knowledge/context the doc assumes is already known + +Check if Reader Claude gives correct answers or misinterprets anything. + +### Step 3: Additional Checks + +Also ask Reader Claude: +- "What in this doc might be ambiguous or unclear to readers?" +- "What knowledge or context does this doc assume readers already have?" +- "Are there any internal contradictions or inconsistencies?" + +### Step 4: Iterate Based on Results + +Ask what Reader Claude got wrong or struggled with. Indicate intention to fix those gaps. + +Loop back to refinement for any problematic sections. + +--- + +### Exit Condition (Both Approaches) + +When Reader Claude consistently answers questions correctly and doesn't surface new gaps or ambiguities, the doc is ready. + +## Final Review + +When Reader Testing passes: +Announce the doc has passed Reader Claude testing. Before completion: + +1. Recommend they do a final read-through themselves - they own this document and are responsible for its quality +2. Suggest double-checking any facts, links, or technical details +3. Ask them to verify it achieves the impact they wanted + +Ask if they want one more review, or if the work is done. + +**If user wants final review, provide it. Otherwise:** +Announce document completion. Provide a few final tips: +- Consider linking this conversation in an appendix so readers can see how the doc was developed +- Use appendices to provide depth without bloating the main doc +- Update the doc as feedback is received from real readers + +## Tips for Effective Guidance + +**Tone:** +- Be direct and procedural +- Explain rationale briefly when it affects user behavior +- Don't try to "sell" the approach - just execute it + +**Handling Deviations:** +- If user wants to skip a stage: Ask if they want to skip this and write freeform +- If user seems frustrated: Acknowledge this is taking longer than expected. Suggest ways to move faster +- Always give user agency to adjust the process + +**Context Management:** +- Throughout, if context is missing on something mentioned, proactively ask +- Don't let gaps accumulate - address them as they come up + +**Artifact Management:** +- Use `create_file` for drafting full sections +- Use `str_replace` for all edits +- Provide artifact link after every change +- Never use artifacts for brainstorming lists - that's just conversation + +**Quality over Speed:** +- Don't rush through stages +- Each iteration should make meaningful improvements +- The goal is a document that actually works for readers diff --git a/.claude/skills/docx/LICENSE.txt b/.claude/skills/docx/LICENSE.txt new file mode 100644 index 0000000..c55ab42 --- /dev/null +++ b/.claude/skills/docx/LICENSE.txt @@ -0,0 +1,30 @@ +© 2025 Anthropic, PBC. All rights reserved. + +LICENSE: Use of these materials (including all code, prompts, assets, files, +and other components of this Skill) is governed by your agreement with +Anthropic regarding use of Anthropic's services. If no separate agreement +exists, use is governed by Anthropic's Consumer Terms of Service or +Commercial Terms of Service, as applicable: +https://www.anthropic.com/legal/consumer-terms +https://www.anthropic.com/legal/commercial-terms +Your applicable agreement is referred to as the "Agreement." "Services" are +as defined in the Agreement. + +ADDITIONAL RESTRICTIONS: Notwithstanding anything in the Agreement to the +contrary, users may not: + +- Extract these materials from the Services or retain copies of these + materials outside the Services +- Reproduce or copy these materials, except for temporary copies created + automatically during authorized use of the Services +- Create derivative works based on these materials +- Distribute, sublicense, or transfer these materials to any third party +- Make, offer to sell, sell, or import any inventions embodied in these + materials +- Reverse engineer, decompile, or disassemble these materials + +The receipt, viewing, or possession of these materials does not convey or +imply any license or right beyond those expressly granted above. + +Anthropic retains all right, title, and interest in these materials, +including all copyrights, patents, and other intellectual property rights. diff --git a/.claude/skills/docx/SKILL.md b/.claude/skills/docx/SKILL.md new file mode 100644 index 0000000..6646638 --- /dev/null +++ b/.claude/skills/docx/SKILL.md @@ -0,0 +1,197 @@ +--- +name: docx +description: "Comprehensive document creation, editing, and analysis with support for tracked changes, comments, formatting preservation, and text extraction. When Claude needs to work with professional documents (.docx files) for: (1) Creating new documents, (2) Modifying or editing content, (3) Working with tracked changes, (4) Adding comments, or any other document tasks" +license: Proprietary. LICENSE.txt has complete terms +--- + +# DOCX creation, editing, and analysis + +## Overview + +A user may ask you to create, edit, or analyze the contents of a .docx file. A .docx file is essentially a ZIP archive containing XML files and other resources that you can read or edit. You have different tools and workflows available for different tasks. + +## Workflow Decision Tree + +### Reading/Analyzing Content +Use "Text extraction" or "Raw XML access" sections below + +### Creating New Document +Use "Creating a new Word document" workflow + +### Editing Existing Document +- **Your own document + simple changes** + Use "Basic OOXML editing" workflow + +- **Someone else's document** + Use **"Redlining workflow"** (recommended default) + +- **Legal, academic, business, or government docs** + Use **"Redlining workflow"** (required) + +## Reading and analyzing content + +### Text extraction +If you just need to read the text contents of a document, you should convert the document to markdown using pandoc. Pandoc provides excellent support for preserving document structure and can show tracked changes: + +```bash +# Convert document to markdown with tracked changes +pandoc --track-changes=all path-to-file.docx -o output.md +# Options: --track-changes=accept/reject/all +``` + +### Raw XML access +You need raw XML access for: comments, complex formatting, document structure, embedded media, and metadata. For any of these features, you'll need to unpack a document and read its raw XML contents. + +#### Unpacking a file +`python ooxml/scripts/unpack.py ` + +#### Key file structures +* `word/document.xml` - Main document contents +* `word/comments.xml` - Comments referenced in document.xml +* `word/media/` - Embedded images and media files +* Tracked changes use `` (insertions) and `` (deletions) tags + +## Creating a new Word document + +When creating a new Word document from scratch, use **docx-js**, which allows you to create Word documents using JavaScript/TypeScript. + +### Workflow +1. **MANDATORY - READ ENTIRE FILE**: Read [`docx-js.md`](docx-js.md) (~500 lines) completely from start to finish. **NEVER set any range limits when reading this file.** Read the full file content for detailed syntax, critical formatting rules, and best practices before proceeding with document creation. +2. Create a JavaScript/TypeScript file using Document, Paragraph, TextRun components (You can assume all dependencies are installed, but if not, refer to the dependencies section below) +3. Export as .docx using Packer.toBuffer() + +## Editing an existing Word document + +When editing an existing Word document, use the **Document library** (a Python library for OOXML manipulation). The library automatically handles infrastructure setup and provides methods for document manipulation. For complex scenarios, you can access the underlying DOM directly through the library. + +### Workflow +1. **MANDATORY - READ ENTIRE FILE**: Read [`ooxml.md`](ooxml.md) (~600 lines) completely from start to finish. **NEVER set any range limits when reading this file.** Read the full file content for the Document library API and XML patterns for directly editing document files. +2. Unpack the document: `python ooxml/scripts/unpack.py ` +3. Create and run a Python script using the Document library (see "Document Library" section in ooxml.md) +4. Pack the final document: `python ooxml/scripts/pack.py ` + +The Document library provides both high-level methods for common operations and direct DOM access for complex scenarios. + +## Redlining workflow for document review + +This workflow allows you to plan comprehensive tracked changes using markdown before implementing them in OOXML. **CRITICAL**: For complete tracked changes, you must implement ALL changes systematically. + +**Batching Strategy**: Group related changes into batches of 3-10 changes. This makes debugging manageable while maintaining efficiency. Test each batch before moving to the next. + +**Principle: Minimal, Precise Edits** +When implementing tracked changes, only mark text that actually changes. Repeating unchanged text makes edits harder to review and appears unprofessional. Break replacements into: [unchanged text] + [deletion] + [insertion] + [unchanged text]. Preserve the original run's RSID for unchanged text by extracting the `` element from the original and reusing it. + +Example - Changing "30 days" to "60 days" in a sentence: +```python +# BAD - Replaces entire sentence +'The term is 30 days.The term is 60 days.' + +# GOOD - Only marks what changed, preserves original for unchanged text +'The term is 3060 days.' +``` + +### Tracked changes workflow + +1. **Get markdown representation**: Convert document to markdown with tracked changes preserved: + ```bash + pandoc --track-changes=all path-to-file.docx -o current.md + ``` + +2. **Identify and group changes**: Review the document and identify ALL changes needed, organizing them into logical batches: + + **Location methods** (for finding changes in XML): + - Section/heading numbers (e.g., "Section 3.2", "Article IV") + - Paragraph identifiers if numbered + - Grep patterns with unique surrounding text + - Document structure (e.g., "first paragraph", "signature block") + - **DO NOT use markdown line numbers** - they don't map to XML structure + + **Batch organization** (group 3-10 related changes per batch): + - By section: "Batch 1: Section 2 amendments", "Batch 2: Section 5 updates" + - By type: "Batch 1: Date corrections", "Batch 2: Party name changes" + - By complexity: Start with simple text replacements, then tackle complex structural changes + - Sequential: "Batch 1: Pages 1-3", "Batch 2: Pages 4-6" + +3. **Read documentation and unpack**: + - **MANDATORY - READ ENTIRE FILE**: Read [`ooxml.md`](ooxml.md) (~600 lines) completely from start to finish. **NEVER set any range limits when reading this file.** Pay special attention to the "Document Library" and "Tracked Change Patterns" sections. + - **Unpack the document**: `python ooxml/scripts/unpack.py ` + - **Note the suggested RSID**: The unpack script will suggest an RSID to use for your tracked changes. Copy this RSID for use in step 4b. + +4. **Implement changes in batches**: Group changes logically (by section, by type, or by proximity) and implement them together in a single script. This approach: + - Makes debugging easier (smaller batch = easier to isolate errors) + - Allows incremental progress + - Maintains efficiency (batch size of 3-10 changes works well) + + **Suggested batch groupings:** + - By document section (e.g., "Section 3 changes", "Definitions", "Termination clause") + - By change type (e.g., "Date changes", "Party name updates", "Legal term replacements") + - By proximity (e.g., "Changes on pages 1-3", "Changes in first half of document") + + For each batch of related changes: + + **a. Map text to XML**: Grep for text in `word/document.xml` to verify how text is split across `` elements. + + **b. Create and run script**: Use `get_node` to find nodes, implement changes, then `doc.save()`. See **"Document Library"** section in ooxml.md for patterns. + + **Note**: Always grep `word/document.xml` immediately before writing a script to get current line numbers and verify text content. Line numbers change after each script run. + +5. **Pack the document**: After all batches are complete, convert the unpacked directory back to .docx: + ```bash + python ooxml/scripts/pack.py unpacked reviewed-document.docx + ``` + +6. **Final verification**: Do a comprehensive check of the complete document: + - Convert final document to markdown: + ```bash + pandoc --track-changes=all reviewed-document.docx -o verification.md + ``` + - Verify ALL changes were applied correctly: + ```bash + grep "original phrase" verification.md # Should NOT find it + grep "replacement phrase" verification.md # Should find it + ``` + - Check that no unintended changes were introduced + + +## Converting Documents to Images + +To visually analyze Word documents, convert them to images using a two-step process: + +1. **Convert DOCX to PDF**: + ```bash + soffice --headless --convert-to pdf document.docx + ``` + +2. **Convert PDF pages to JPEG images**: + ```bash + pdftoppm -jpeg -r 150 document.pdf page + ``` + This creates files like `page-1.jpg`, `page-2.jpg`, etc. + +Options: +- `-r 150`: Sets resolution to 150 DPI (adjust for quality/size balance) +- `-jpeg`: Output JPEG format (use `-png` for PNG if preferred) +- `-f N`: First page to convert (e.g., `-f 2` starts from page 2) +- `-l N`: Last page to convert (e.g., `-l 5` stops at page 5) +- `page`: Prefix for output files + +Example for specific range: +```bash +pdftoppm -jpeg -r 150 -f 2 -l 5 document.pdf page # Converts only pages 2-5 +``` + +## Code Style Guidelines +**IMPORTANT**: When generating code for DOCX operations: +- Write concise code +- Avoid verbose variable names and redundant operations +- Avoid unnecessary print statements + +## Dependencies + +Required dependencies (install if not available): + +- **pandoc**: `sudo apt-get install pandoc` (for text extraction) +- **docx**: `npm install -g docx` (for creating new documents) +- **LibreOffice**: `sudo apt-get install libreoffice` (for PDF conversion) +- **Poppler**: `sudo apt-get install poppler-utils` (for pdftoppm to convert PDF to images) +- **defusedxml**: `pip install defusedxml` (for secure XML parsing) \ No newline at end of file diff --git a/.claude/skills/docx/docx-js.md b/.claude/skills/docx/docx-js.md new file mode 100644 index 0000000..c6d7b2d --- /dev/null +++ b/.claude/skills/docx/docx-js.md @@ -0,0 +1,350 @@ +# DOCX Library Tutorial + +Generate .docx files with JavaScript/TypeScript. + +**Important: Read this entire document before starting.** Critical formatting rules and common pitfalls are covered throughout - skipping sections may result in corrupted files or rendering issues. + +## Setup +Assumes docx is already installed globally +If not installed: `npm install -g docx` + +```javascript +const { Document, Packer, Paragraph, TextRun, Table, TableRow, TableCell, ImageRun, Media, + Header, Footer, AlignmentType, PageOrientation, LevelFormat, ExternalHyperlink, + InternalHyperlink, TableOfContents, HeadingLevel, BorderStyle, WidthType, TabStopType, + TabStopPosition, UnderlineType, ShadingType, VerticalAlign, SymbolRun, PageNumber, + FootnoteReferenceRun, Footnote, PageBreak } = require('docx'); + +// Create & Save +const doc = new Document({ sections: [{ children: [/* content */] }] }); +Packer.toBuffer(doc).then(buffer => fs.writeFileSync("doc.docx", buffer)); // Node.js +Packer.toBlob(doc).then(blob => { /* download logic */ }); // Browser +``` + +## Text & Formatting +```javascript +// IMPORTANT: Never use \n for line breaks - always use separate Paragraph elements +// ❌ WRONG: new TextRun("Line 1\nLine 2") +// ✅ CORRECT: new Paragraph({ children: [new TextRun("Line 1")] }), new Paragraph({ children: [new TextRun("Line 2")] }) + +// Basic text with all formatting options +new Paragraph({ + alignment: AlignmentType.CENTER, + spacing: { before: 200, after: 200 }, + indent: { left: 720, right: 720 }, + children: [ + new TextRun({ text: "Bold", bold: true }), + new TextRun({ text: "Italic", italics: true }), + new TextRun({ text: "Underlined", underline: { type: UnderlineType.DOUBLE, color: "FF0000" } }), + new TextRun({ text: "Colored", color: "FF0000", size: 28, font: "Arial" }), // Arial default + new TextRun({ text: "Highlighted", highlight: "yellow" }), + new TextRun({ text: "Strikethrough", strike: true }), + new TextRun({ text: "x2", superScript: true }), + new TextRun({ text: "H2O", subScript: true }), + new TextRun({ text: "SMALL CAPS", smallCaps: true }), + new SymbolRun({ char: "2022", font: "Symbol" }), // Bullet • + new SymbolRun({ char: "00A9", font: "Arial" }) // Copyright © - Arial for symbols + ] +}) +``` + +## Styles & Professional Formatting + +```javascript +const doc = new Document({ + styles: { + default: { document: { run: { font: "Arial", size: 24 } } }, // 12pt default + paragraphStyles: [ + // Document title style - override built-in Title style + { id: "Title", name: "Title", basedOn: "Normal", + run: { size: 56, bold: true, color: "000000", font: "Arial" }, + paragraph: { spacing: { before: 240, after: 120 }, alignment: AlignmentType.CENTER } }, + // IMPORTANT: Override built-in heading styles by using their exact IDs + { id: "Heading1", name: "Heading 1", basedOn: "Normal", next: "Normal", quickFormat: true, + run: { size: 32, bold: true, color: "000000", font: "Arial" }, // 16pt + paragraph: { spacing: { before: 240, after: 240 }, outlineLevel: 0 } }, // Required for TOC + { id: "Heading2", name: "Heading 2", basedOn: "Normal", next: "Normal", quickFormat: true, + run: { size: 28, bold: true, color: "000000", font: "Arial" }, // 14pt + paragraph: { spacing: { before: 180, after: 180 }, outlineLevel: 1 } }, + // Custom styles use your own IDs + { id: "myStyle", name: "My Style", basedOn: "Normal", + run: { size: 28, bold: true, color: "000000" }, + paragraph: { spacing: { after: 120 }, alignment: AlignmentType.CENTER } } + ], + characterStyles: [{ id: "myCharStyle", name: "My Char Style", + run: { color: "FF0000", bold: true, underline: { type: UnderlineType.SINGLE } } }] + }, + sections: [{ + properties: { page: { margin: { top: 1440, right: 1440, bottom: 1440, left: 1440 } } }, + children: [ + new Paragraph({ heading: HeadingLevel.TITLE, children: [new TextRun("Document Title")] }), // Uses overridden Title style + new Paragraph({ heading: HeadingLevel.HEADING_1, children: [new TextRun("Heading 1")] }), // Uses overridden Heading1 style + new Paragraph({ style: "myStyle", children: [new TextRun("Custom paragraph style")] }), + new Paragraph({ children: [ + new TextRun("Normal with "), + new TextRun({ text: "custom char style", style: "myCharStyle" }) + ]}) + ] + }] +}); +``` + +**Professional Font Combinations:** +- **Arial (Headers) + Arial (Body)** - Most universally supported, clean and professional +- **Times New Roman (Headers) + Arial (Body)** - Classic serif headers with modern sans-serif body +- **Georgia (Headers) + Verdana (Body)** - Optimized for screen reading, elegant contrast + +**Key Styling Principles:** +- **Override built-in styles**: Use exact IDs like "Heading1", "Heading2", "Heading3" to override Word's built-in heading styles +- **HeadingLevel constants**: `HeadingLevel.HEADING_1` uses "Heading1" style, `HeadingLevel.HEADING_2` uses "Heading2" style, etc. +- **Include outlineLevel**: Set `outlineLevel: 0` for H1, `outlineLevel: 1` for H2, etc. to ensure TOC works correctly +- **Use custom styles** instead of inline formatting for consistency +- **Set a default font** using `styles.default.document.run.font` - Arial is universally supported +- **Establish visual hierarchy** with different font sizes (titles > headers > body) +- **Add proper spacing** with `before` and `after` paragraph spacing +- **Use colors sparingly**: Default to black (000000) and shades of gray for titles and headings (heading 1, heading 2, etc.) +- **Set consistent margins** (1440 = 1 inch is standard) + + +## Lists (ALWAYS USE PROPER LISTS - NEVER USE UNICODE BULLETS) +```javascript +// Bullets - ALWAYS use the numbering config, NOT unicode symbols +// CRITICAL: Use LevelFormat.BULLET constant, NOT the string "bullet" +const doc = new Document({ + numbering: { + config: [ + { reference: "bullet-list", + levels: [{ level: 0, format: LevelFormat.BULLET, text: "•", alignment: AlignmentType.LEFT, + style: { paragraph: { indent: { left: 720, hanging: 360 } } } }] }, + { reference: "first-numbered-list", + levels: [{ level: 0, format: LevelFormat.DECIMAL, text: "%1.", alignment: AlignmentType.LEFT, + style: { paragraph: { indent: { left: 720, hanging: 360 } } } }] }, + { reference: "second-numbered-list", // Different reference = restarts at 1 + levels: [{ level: 0, format: LevelFormat.DECIMAL, text: "%1.", alignment: AlignmentType.LEFT, + style: { paragraph: { indent: { left: 720, hanging: 360 } } } }] } + ] + }, + sections: [{ + children: [ + // Bullet list items + new Paragraph({ numbering: { reference: "bullet-list", level: 0 }, + children: [new TextRun("First bullet point")] }), + new Paragraph({ numbering: { reference: "bullet-list", level: 0 }, + children: [new TextRun("Second bullet point")] }), + // Numbered list items + new Paragraph({ numbering: { reference: "first-numbered-list", level: 0 }, + children: [new TextRun("First numbered item")] }), + new Paragraph({ numbering: { reference: "first-numbered-list", level: 0 }, + children: [new TextRun("Second numbered item")] }), + // ⚠️ CRITICAL: Different reference = INDEPENDENT list that restarts at 1 + // Same reference = CONTINUES previous numbering + new Paragraph({ numbering: { reference: "second-numbered-list", level: 0 }, + children: [new TextRun("Starts at 1 again (because different reference)")] }) + ] + }] +}); + +// ⚠️ CRITICAL NUMBERING RULE: Each reference creates an INDEPENDENT numbered list +// - Same reference = continues numbering (1, 2, 3... then 4, 5, 6...) +// - Different reference = restarts at 1 (1, 2, 3... then 1, 2, 3...) +// Use unique reference names for each separate numbered section! + +// ⚠️ CRITICAL: NEVER use unicode bullets - they create fake lists that don't work properly +// new TextRun("• Item") // WRONG +// new SymbolRun({ char: "2022" }) // WRONG +// ✅ ALWAYS use numbering config with LevelFormat.BULLET for real Word lists +``` + +## Tables +```javascript +// Complete table with margins, borders, headers, and bullet points +const tableBorder = { style: BorderStyle.SINGLE, size: 1, color: "CCCCCC" }; +const cellBorders = { top: tableBorder, bottom: tableBorder, left: tableBorder, right: tableBorder }; + +new Table({ + columnWidths: [4680, 4680], // ⚠️ CRITICAL: Set column widths at table level - values in DXA (twentieths of a point) + margins: { top: 100, bottom: 100, left: 180, right: 180 }, // Set once for all cells + rows: [ + new TableRow({ + tableHeader: true, + children: [ + new TableCell({ + borders: cellBorders, + width: { size: 4680, type: WidthType.DXA }, // ALSO set width on each cell + // ⚠️ CRITICAL: Always use ShadingType.CLEAR to prevent black backgrounds in Word. + shading: { fill: "D5E8F0", type: ShadingType.CLEAR }, + verticalAlign: VerticalAlign.CENTER, + children: [new Paragraph({ + alignment: AlignmentType.CENTER, + children: [new TextRun({ text: "Header", bold: true, size: 22 })] + })] + }), + new TableCell({ + borders: cellBorders, + width: { size: 4680, type: WidthType.DXA }, // ALSO set width on each cell + shading: { fill: "D5E8F0", type: ShadingType.CLEAR }, + children: [new Paragraph({ + alignment: AlignmentType.CENTER, + children: [new TextRun({ text: "Bullet Points", bold: true, size: 22 })] + })] + }) + ] + }), + new TableRow({ + children: [ + new TableCell({ + borders: cellBorders, + width: { size: 4680, type: WidthType.DXA }, // ALSO set width on each cell + children: [new Paragraph({ children: [new TextRun("Regular data")] })] + }), + new TableCell({ + borders: cellBorders, + width: { size: 4680, type: WidthType.DXA }, // ALSO set width on each cell + children: [ + new Paragraph({ + numbering: { reference: "bullet-list", level: 0 }, + children: [new TextRun("First bullet point")] + }), + new Paragraph({ + numbering: { reference: "bullet-list", level: 0 }, + children: [new TextRun("Second bullet point")] + }) + ] + }) + ] + }) + ] +}) +``` + +**IMPORTANT: Table Width & Borders** +- Use BOTH `columnWidths: [width1, width2, ...]` array AND `width: { size: X, type: WidthType.DXA }` on each cell +- Values in DXA (twentieths of a point): 1440 = 1 inch, Letter usable width = 9360 DXA (with 1" margins) +- Apply borders to individual `TableCell` elements, NOT the `Table` itself + +**Precomputed Column Widths (Letter size with 1" margins = 9360 DXA total):** +- **2 columns:** `columnWidths: [4680, 4680]` (equal width) +- **3 columns:** `columnWidths: [3120, 3120, 3120]` (equal width) + +## Links & Navigation +```javascript +// TOC (requires headings) - CRITICAL: Use HeadingLevel only, NOT custom styles +// ❌ WRONG: new Paragraph({ heading: HeadingLevel.HEADING_1, style: "customHeader", children: [new TextRun("Title")] }) +// ✅ CORRECT: new Paragraph({ heading: HeadingLevel.HEADING_1, children: [new TextRun("Title")] }) +new TableOfContents("Table of Contents", { hyperlink: true, headingStyleRange: "1-3" }), + +// External link +new Paragraph({ + children: [new ExternalHyperlink({ + children: [new TextRun({ text: "Google", style: "Hyperlink" })], + link: "https://www.google.com" + })] +}), + +// Internal link & bookmark +new Paragraph({ + children: [new InternalHyperlink({ + children: [new TextRun({ text: "Go to Section", style: "Hyperlink" })], + anchor: "section1" + })] +}), +new Paragraph({ + children: [new TextRun("Section Content")], + bookmark: { id: "section1", name: "section1" } +}), +``` + +## Images & Media +```javascript +// Basic image with sizing & positioning +// CRITICAL: Always specify 'type' parameter - it's REQUIRED for ImageRun +new Paragraph({ + alignment: AlignmentType.CENTER, + children: [new ImageRun({ + type: "png", // NEW REQUIREMENT: Must specify image type (png, jpg, jpeg, gif, bmp, svg) + data: fs.readFileSync("image.png"), + transformation: { width: 200, height: 150, rotation: 0 }, // rotation in degrees + altText: { title: "Logo", description: "Company logo", name: "Name" } // IMPORTANT: All three fields are required + })] +}) +``` + +## Page Breaks +```javascript +// Manual page break +new Paragraph({ children: [new PageBreak()] }), + +// Page break before paragraph +new Paragraph({ + pageBreakBefore: true, + children: [new TextRun("This starts on a new page")] +}) + +// ⚠️ CRITICAL: NEVER use PageBreak standalone - it will create invalid XML that Word cannot open +// ❌ WRONG: new PageBreak() +// ✅ CORRECT: new Paragraph({ children: [new PageBreak()] }) +``` + +## Headers/Footers & Page Setup +```javascript +const doc = new Document({ + sections: [{ + properties: { + page: { + margin: { top: 1440, right: 1440, bottom: 1440, left: 1440 }, // 1440 = 1 inch + size: { orientation: PageOrientation.LANDSCAPE }, + pageNumbers: { start: 1, formatType: "decimal" } // "upperRoman", "lowerRoman", "upperLetter", "lowerLetter" + } + }, + headers: { + default: new Header({ children: [new Paragraph({ + alignment: AlignmentType.RIGHT, + children: [new TextRun("Header Text")] + })] }) + }, + footers: { + default: new Footer({ children: [new Paragraph({ + alignment: AlignmentType.CENTER, + children: [new TextRun("Page "), new TextRun({ children: [PageNumber.CURRENT] }), new TextRun(" of "), new TextRun({ children: [PageNumber.TOTAL_PAGES] })] + })] }) + }, + children: [/* content */] + }] +}); +``` + +## Tabs +```javascript +new Paragraph({ + tabStops: [ + { type: TabStopType.LEFT, position: TabStopPosition.MAX / 4 }, + { type: TabStopType.CENTER, position: TabStopPosition.MAX / 2 }, + { type: TabStopType.RIGHT, position: TabStopPosition.MAX * 3 / 4 } + ], + children: [new TextRun("Left\tCenter\tRight")] +}) +``` + +## Constants & Quick Reference +- **Underlines:** `SINGLE`, `DOUBLE`, `WAVY`, `DASH` +- **Borders:** `SINGLE`, `DOUBLE`, `DASHED`, `DOTTED` +- **Numbering:** `DECIMAL` (1,2,3), `UPPER_ROMAN` (I,II,III), `LOWER_LETTER` (a,b,c) +- **Tabs:** `LEFT`, `CENTER`, `RIGHT`, `DECIMAL` +- **Symbols:** `"2022"` (•), `"00A9"` (©), `"00AE"` (®), `"2122"` (™), `"00B0"` (°), `"F070"` (✓), `"F0FC"` (✗) + +## Critical Issues & Common Mistakes +- **CRITICAL: PageBreak must ALWAYS be inside a Paragraph** - standalone PageBreak creates invalid XML that Word cannot open +- **ALWAYS use ShadingType.CLEAR for table cell shading** - Never use ShadingType.SOLID (causes black background). +- Measurements in DXA (1440 = 1 inch) | Each table cell needs ≥1 Paragraph | TOC requires HeadingLevel styles only +- **ALWAYS use custom styles** with Arial font for professional appearance and proper visual hierarchy +- **ALWAYS set a default font** using `styles.default.document.run.font` - Arial recommended +- **ALWAYS use columnWidths array for tables** + individual cell widths for compatibility +- **NEVER use unicode symbols for bullets** - always use proper numbering configuration with `LevelFormat.BULLET` constant (NOT the string "bullet") +- **NEVER use \n for line breaks anywhere** - always use separate Paragraph elements for each line +- **ALWAYS use TextRun objects within Paragraph children** - never use text property directly on Paragraph +- **CRITICAL for images**: ImageRun REQUIRES `type` parameter - always specify "png", "jpg", "jpeg", "gif", "bmp", or "svg" +- **CRITICAL for bullets**: Must use `LevelFormat.BULLET` constant, not string "bullet", and include `text: "•"` for the bullet character +- **CRITICAL for numbering**: Each numbering reference creates an INDEPENDENT list. Same reference = continues numbering (1,2,3 then 4,5,6). Different reference = restarts at 1 (1,2,3 then 1,2,3). Use unique reference names for each separate numbered section! +- **CRITICAL for TOC**: When using TableOfContents, headings must use HeadingLevel ONLY - do NOT add custom styles to heading paragraphs or TOC will break +- **Tables**: Set `columnWidths` array + individual cell widths, apply borders to cells not table +- **Set table margins at TABLE level** for consistent cell padding (avoids repetition per cell) \ No newline at end of file diff --git a/.claude/skills/docx/ooxml.md b/.claude/skills/docx/ooxml.md new file mode 100644 index 0000000..7677e7b --- /dev/null +++ b/.claude/skills/docx/ooxml.md @@ -0,0 +1,610 @@ +# Office Open XML Technical Reference + +**Important: Read this entire document before starting.** This document covers: +- [Technical Guidelines](#technical-guidelines) - Schema compliance rules and validation requirements +- [Document Content Patterns](#document-content-patterns) - XML patterns for headings, lists, tables, formatting, etc. +- [Document Library (Python)](#document-library-python) - Recommended approach for OOXML manipulation with automatic infrastructure setup +- [Tracked Changes (Redlining)](#tracked-changes-redlining) - XML patterns for implementing tracked changes + +## Technical Guidelines + +### Schema Compliance +- **Element ordering in ``**: ``, ``, ``, ``, `` +- **Whitespace**: Add `xml:space='preserve'` to `` elements with leading/trailing spaces +- **Unicode**: Escape characters in ASCII content: `"` becomes `“` + - **Character encoding reference**: Curly quotes `""` become `“”`, apostrophe `'` becomes `’`, em-dash `—` becomes `—` +- **Tracked changes**: Use `` and `` tags with `w:author="Claude"` outside `` elements + - **Critical**: `` closes with ``, `` closes with `` - never mix + - **RSIDs must be 8-digit hex**: Use values like `00AB1234` (only 0-9, A-F characters) + - **trackRevisions placement**: Add `` after `` in settings.xml +- **Images**: Add to `word/media/`, reference in `document.xml`, set dimensions to prevent overflow + +## Document Content Patterns + +### Basic Structure +```xml + + Text content + +``` + +### Headings and Styles +```xml + + + + + + Document Title + + + + + Section Heading + +``` + +### Text Formatting +```xml + +Bold + +Italic + +Underlined + +Highlighted +``` + +### Lists +```xml + + + + + + + + First item + + + + + + + + + + New list item 1 + + + + + + + + + + + Bullet item + +``` + +### Tables +```xml + + + + + + + + + + + + Cell 1 + + + + Cell 2 + + + +``` + +### Layout +```xml + + + + + + + + + + + + New Section Title + + + + + + + + + + Centered text + + + + + + + + Monospace text + + + + + + + This text is Courier New + + and this text uses default font + +``` + +## File Updates + +When adding content, update these files: + +**`word/_rels/document.xml.rels`:** +```xml + + +``` + +**`[Content_Types].xml`:** +```xml + + +``` + +### Images +**CRITICAL**: Calculate dimensions to prevent page overflow and maintain aspect ratio. + +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +### Links (Hyperlinks) + +**IMPORTANT**: All hyperlinks (both internal and external) require the Hyperlink style to be defined in styles.xml. Without this style, links will look like regular text instead of blue underlined clickable links. + +**External Links:** +```xml + + + + + Link Text + + + + + +``` + +**Internal Links:** + +```xml + + + + + Link Text + + + + + +Target content + +``` + +**Hyperlink Style (required in styles.xml):** +```xml + + + + + + + + + + +``` + +## Document Library (Python) + +Use the Document class from `scripts/document.py` for all tracked changes and comments. It automatically handles infrastructure setup (people.xml, RSIDs, settings.xml, comment files, relationships, content types). Only use direct XML manipulation for complex scenarios not supported by the library. + +**Working with Unicode and Entities:** +- **Searching**: Both entity notation and Unicode characters work - `contains="“Company"` and `contains="\u201cCompany"` find the same text +- **Replacing**: Use either entities (`“`) or Unicode (`\u201c`) - both work and will be converted appropriately based on the file's encoding (ascii → entities, utf-8 → Unicode) + +### Initialization + +**Find the docx skill root** (directory containing `scripts/` and `ooxml/`): +```bash +# Search for document.py to locate the skill root +# Note: /mnt/skills is used here as an example; check your context for the actual location +find /mnt/skills -name "document.py" -path "*/docx/scripts/*" 2>/dev/null | head -1 +# Example output: /mnt/skills/docx/scripts/document.py +# Skill root is: /mnt/skills/docx +``` + +**Run your script with PYTHONPATH** set to the docx skill root: +```bash +PYTHONPATH=/mnt/skills/docx python your_script.py +``` + +**In your script**, import from the skill root: +```python +from scripts.document import Document, DocxXMLEditor + +# Basic initialization (automatically creates temp copy and sets up infrastructure) +doc = Document('unpacked') + +# Customize author and initials +doc = Document('unpacked', author="John Doe", initials="JD") + +# Enable track revisions mode +doc = Document('unpacked', track_revisions=True) + +# Specify custom RSID (auto-generated if not provided) +doc = Document('unpacked', rsid="07DC5ECB") +``` + +### Creating Tracked Changes + +**CRITICAL**: Only mark text that actually changes. Keep ALL unchanged text outside ``/`` tags. Marking unchanged text makes edits unprofessional and harder to review. + +**Attribute Handling**: The Document class auto-injects attributes (w:id, w:date, w:rsidR, w:rsidDel, w16du:dateUtc, xml:space) into new elements. When preserving unchanged text from the original document, copy the original `` element with its existing attributes to maintain document integrity. + +**Method Selection Guide**: +- **Adding your own changes to regular text**: Use `replace_node()` with ``/`` tags, or `suggest_deletion()` for removing entire `` or `` elements +- **Partially modifying another author's tracked change**: Use `replace_node()` to nest your changes inside their ``/`` +- **Completely rejecting another author's insertion**: Use `revert_insertion()` on the `` element (NOT `suggest_deletion()`) +- **Completely rejecting another author's deletion**: Use `revert_deletion()` on the `` element to restore deleted content using tracked changes + +```python +# Minimal edit - change one word: "The report is monthly" → "The report is quarterly" +# Original: The report is monthly +node = doc["word/document.xml"].get_node(tag="w:r", contains="The report is monthly") +rpr = tags[0].toxml() if (tags := node.getElementsByTagName("w:rPr")) else "" +replacement = f'{rpr}The report is {rpr}monthly{rpr}quarterly' +doc["word/document.xml"].replace_node(node, replacement) + +# Minimal edit - change number: "within 30 days" → "within 45 days" +# Original: within 30 days +node = doc["word/document.xml"].get_node(tag="w:r", contains="within 30 days") +rpr = tags[0].toxml() if (tags := node.getElementsByTagName("w:rPr")) else "" +replacement = f'{rpr}within {rpr}30{rpr}45{rpr} days' +doc["word/document.xml"].replace_node(node, replacement) + +# Complete replacement - preserve formatting even when replacing all text +node = doc["word/document.xml"].get_node(tag="w:r", contains="apple") +rpr = tags[0].toxml() if (tags := node.getElementsByTagName("w:rPr")) else "" +replacement = f'{rpr}apple{rpr}banana orange' +doc["word/document.xml"].replace_node(node, replacement) + +# Insert new content (no attributes needed - auto-injected) +node = doc["word/document.xml"].get_node(tag="w:r", contains="existing text") +doc["word/document.xml"].insert_after(node, 'new text') + +# Partially delete another author's insertion +# Original: quarterly financial report +# Goal: Delete only "financial" to make it "quarterly report" +node = doc["word/document.xml"].get_node(tag="w:ins", attrs={"w:id": "5"}) +# IMPORTANT: Preserve w:author="Jane Smith" on the outer to maintain authorship +replacement = ''' + quarterly + financial + report +''' +doc["word/document.xml"].replace_node(node, replacement) + +# Change part of another author's insertion +# Original: in silence, safe and sound +# Goal: Change "safe and sound" to "soft and unbound" +node = doc["word/document.xml"].get_node(tag="w:ins", attrs={"w:id": "8"}) +replacement = f''' + in silence, + + + soft and unbound + + + safe and sound +''' +doc["word/document.xml"].replace_node(node, replacement) + +# Delete entire run (use only when deleting all content; use replace_node for partial deletions) +node = doc["word/document.xml"].get_node(tag="w:r", contains="text to delete") +doc["word/document.xml"].suggest_deletion(node) + +# Delete entire paragraph (in-place, handles both regular and numbered list paragraphs) +para = doc["word/document.xml"].get_node(tag="w:p", contains="paragraph to delete") +doc["word/document.xml"].suggest_deletion(para) + +# Add new numbered list item +target_para = doc["word/document.xml"].get_node(tag="w:p", contains="existing list item") +pPr = tags[0].toxml() if (tags := target_para.getElementsByTagName("w:pPr")) else "" +new_item = f'{pPr}New item' +tracked_para = DocxXMLEditor.suggest_paragraph(new_item) +doc["word/document.xml"].insert_after(target_para, tracked_para) +# Optional: add spacing paragraph before content for better visual separation +# spacing = DocxXMLEditor.suggest_paragraph('') +# doc["word/document.xml"].insert_after(target_para, spacing + tracked_para) +``` + +### Adding Comments + +```python +# Add comment spanning two existing tracked changes +# Note: w:id is auto-generated. Only search by w:id if you know it from XML inspection +start_node = doc["word/document.xml"].get_node(tag="w:del", attrs={"w:id": "1"}) +end_node = doc["word/document.xml"].get_node(tag="w:ins", attrs={"w:id": "2"}) +doc.add_comment(start=start_node, end=end_node, text="Explanation of this change") + +# Add comment on a paragraph +para = doc["word/document.xml"].get_node(tag="w:p", contains="paragraph text") +doc.add_comment(start=para, end=para, text="Comment on this paragraph") + +# Add comment on newly created tracked change +# First create the tracked change +node = doc["word/document.xml"].get_node(tag="w:r", contains="old") +new_nodes = doc["word/document.xml"].replace_node( + node, + 'oldnew' +) +# Then add comment on the newly created elements +# new_nodes[0] is the , new_nodes[1] is the +doc.add_comment(start=new_nodes[0], end=new_nodes[1], text="Changed old to new per requirements") + +# Reply to existing comment +doc.reply_to_comment(parent_comment_id=0, text="I agree with this change") +``` + +### Rejecting Tracked Changes + +**IMPORTANT**: Use `revert_insertion()` to reject insertions and `revert_deletion()` to restore deletions using tracked changes. Use `suggest_deletion()` only for regular unmarked content. + +```python +# Reject insertion (wraps it in deletion) +# Use this when another author inserted text that you want to delete +ins = doc["word/document.xml"].get_node(tag="w:ins", attrs={"w:id": "5"}) +nodes = doc["word/document.xml"].revert_insertion(ins) # Returns [ins] + +# Reject deletion (creates insertion to restore deleted content) +# Use this when another author deleted text that you want to restore +del_elem = doc["word/document.xml"].get_node(tag="w:del", attrs={"w:id": "3"}) +nodes = doc["word/document.xml"].revert_deletion(del_elem) # Returns [del_elem, new_ins] + +# Reject all insertions in a paragraph +para = doc["word/document.xml"].get_node(tag="w:p", contains="paragraph text") +nodes = doc["word/document.xml"].revert_insertion(para) # Returns [para] + +# Reject all deletions in a paragraph +para = doc["word/document.xml"].get_node(tag="w:p", contains="paragraph text") +nodes = doc["word/document.xml"].revert_deletion(para) # Returns [para] +``` + +### Inserting Images + +**CRITICAL**: The Document class works with a temporary copy at `doc.unpacked_path`. Always copy images to this temp directory, not the original unpacked folder. + +```python +from PIL import Image +import shutil, os + +# Initialize document first +doc = Document('unpacked') + +# Copy image and calculate full-width dimensions with aspect ratio +media_dir = os.path.join(doc.unpacked_path, 'word/media') +os.makedirs(media_dir, exist_ok=True) +shutil.copy('image.png', os.path.join(media_dir, 'image1.png')) +img = Image.open(os.path.join(media_dir, 'image1.png')) +width_emus = int(6.5 * 914400) # 6.5" usable width, 914400 EMUs/inch +height_emus = int(width_emus * img.size[1] / img.size[0]) + +# Add relationship and content type +rels_editor = doc['word/_rels/document.xml.rels'] +next_rid = rels_editor.get_next_rid() +rels_editor.append_to(rels_editor.dom.documentElement, + f'') +doc['[Content_Types].xml'].append_to(doc['[Content_Types].xml'].dom.documentElement, + '') + +# Insert image +node = doc["word/document.xml"].get_node(tag="w:p", line_number=100) +doc["word/document.xml"].insert_after(node, f''' + + + + + + + + + + + + + + + + + +''') +``` + +### Getting Nodes + +```python +# By text content +node = doc["word/document.xml"].get_node(tag="w:p", contains="specific text") + +# By line range +para = doc["word/document.xml"].get_node(tag="w:p", line_number=range(100, 150)) + +# By attributes +node = doc["word/document.xml"].get_node(tag="w:del", attrs={"w:id": "1"}) + +# By exact line number (must be line number where tag opens) +para = doc["word/document.xml"].get_node(tag="w:p", line_number=42) + +# Combine filters +node = doc["word/document.xml"].get_node(tag="w:r", line_number=range(40, 60), contains="text") + +# Disambiguate when text appears multiple times - add line_number range +node = doc["word/document.xml"].get_node(tag="w:r", contains="Section", line_number=range(2400, 2500)) +``` + +### Saving + +```python +# Save with automatic validation (copies back to original directory) +doc.save() # Validates by default, raises error if validation fails + +# Save to different location +doc.save('modified-unpacked') + +# Skip validation (debugging only - needing this in production indicates XML issues) +doc.save(validate=False) +``` + +### Direct DOM Manipulation + +For complex scenarios not covered by the library: + +```python +# Access any XML file +editor = doc["word/document.xml"] +editor = doc["word/comments.xml"] + +# Direct DOM access (defusedxml.minidom.Document) +node = doc["word/document.xml"].get_node(tag="w:p", line_number=5) +parent = node.parentNode +parent.removeChild(node) +parent.appendChild(node) # Move to end + +# General document manipulation (without tracked changes) +old_node = doc["word/document.xml"].get_node(tag="w:p", contains="original text") +doc["word/document.xml"].replace_node(old_node, "replacement text") + +# Multiple insertions - use return value to maintain order +node = doc["word/document.xml"].get_node(tag="w:r", line_number=100) +nodes = doc["word/document.xml"].insert_after(node, "A") +nodes = doc["word/document.xml"].insert_after(nodes[-1], "B") +nodes = doc["word/document.xml"].insert_after(nodes[-1], "C") +# Results in: original_node, A, B, C +``` + +## Tracked Changes (Redlining) + +**Use the Document class above for all tracked changes.** The patterns below are for reference when constructing replacement XML strings. + +### Validation Rules +The validator checks that the document text matches the original after reverting Claude's changes. This means: +- **NEVER modify text inside another author's `` or `` tags** +- **ALWAYS use nested deletions** to remove another author's insertions +- **Every edit must be properly tracked** with `` or `` tags + +### Tracked Change Patterns + +**CRITICAL RULES**: +1. Never modify the content inside another author's tracked changes. Always use nested deletions. +2. **XML Structure**: Always place `` and `` at paragraph level containing complete `` elements. Never nest inside `` elements - this creates invalid XML that breaks document processing. + +**Text Insertion:** +```xml + + + inserted text + + +``` + +**Text Deletion:** +```xml + + + deleted text + + +``` + +**Deleting Another Author's Insertion (MUST use nested structure):** +```xml + + + + monthly + + + + weekly + +``` + +**Restoring Another Author's Deletion:** +```xml + + + within 30 days + + + within 30 days + +``` \ No newline at end of file diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chart.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chart.xsd new file mode 100644 index 0000000..6454ef9 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chart.xsd @@ -0,0 +1,1499 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd new file mode 100644 index 0000000..afa4f46 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd @@ -0,0 +1,146 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd new file mode 100644 index 0000000..64e66b8 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd @@ -0,0 +1,1085 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd new file mode 100644 index 0000000..687eea8 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd @@ -0,0 +1,11 @@ + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-main.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-main.xsd new file mode 100644 index 0000000..6ac81b0 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-main.xsd @@ -0,0 +1,3081 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-picture.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-picture.xsd new file mode 100644 index 0000000..1dbf051 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-picture.xsd @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd new file mode 100644 index 0000000..f1af17d --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd @@ -0,0 +1,185 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd new file mode 100644 index 0000000..0a185ab --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd @@ -0,0 +1,287 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/pml.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/pml.xsd new file mode 100644 index 0000000..14ef488 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/pml.xsd @@ -0,0 +1,1676 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd new file mode 100644 index 0000000..c20f3bf --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd new file mode 100644 index 0000000..ac60252 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd @@ -0,0 +1,144 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd new file mode 100644 index 0000000..424b8ba --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd @@ -0,0 +1,174 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd new file mode 100644 index 0000000..2bddce2 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd new file mode 100644 index 0000000..8a8c18b --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd new file mode 100644 index 0000000..5c42706 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd new file mode 100644 index 0000000..853c341 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd new file mode 100644 index 0000000..da835ee --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd @@ -0,0 +1,195 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-math.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-math.xsd new file mode 100644 index 0000000..87ad265 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-math.xsd @@ -0,0 +1,582 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd new file mode 100644 index 0000000..9e86f1b --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/sml.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/sml.xsd new file mode 100644 index 0000000..d0be42e --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/sml.xsd @@ -0,0 +1,4439 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-main.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-main.xsd new file mode 100644 index 0000000..8821dd1 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-main.xsd @@ -0,0 +1,570 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd new file mode 100644 index 0000000..ca2575c --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd @@ -0,0 +1,509 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd new file mode 100644 index 0000000..dd079e6 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd @@ -0,0 +1,12 @@ + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd new file mode 100644 index 0000000..3dd6cf6 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd @@ -0,0 +1,108 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd new file mode 100644 index 0000000..f1041e3 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd @@ -0,0 +1,96 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/wml.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/wml.xsd new file mode 100644 index 0000000..9c5b7a6 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/wml.xsd @@ -0,0 +1,3646 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/xml.xsd b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/xml.xsd new file mode 100644 index 0000000..0f13678 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/xml.xsd @@ -0,0 +1,116 @@ + + + + + + See http://www.w3.org/XML/1998/namespace.html and + http://www.w3.org/TR/REC-xml for information about this namespace. + + This schema document describes the XML namespace, in a form + suitable for import by other schema documents. + + Note that local names in this namespace are intended to be defined + only by the World Wide Web Consortium or its subgroups. The + following names are currently defined in this namespace and should + not be used with conflicting semantics by any Working Group, + specification, or document instance: + + base (as an attribute name): denotes an attribute whose value + provides a URI to be used as the base for interpreting any + relative URIs in the scope of the element on which it + appears; its value is inherited. This name is reserved + by virtue of its definition in the XML Base specification. + + lang (as an attribute name): denotes an attribute whose value + is a language code for the natural language of the content of + any element; its value is inherited. This name is reserved + by virtue of its definition in the XML specification. + + space (as an attribute name): denotes an attribute whose + value is a keyword indicating what whitespace processing + discipline is intended for the content of the element; its + value is inherited. This name is reserved by virtue of its + definition in the XML specification. + + Father (in any context at all): denotes Jon Bosak, the chair of + the original XML Working Group. This name is reserved by + the following decision of the W3C XML Plenary and + XML Coordination groups: + + In appreciation for his vision, leadership and dedication + the W3C XML Plenary on this 10th day of February, 2000 + reserves for Jon Bosak in perpetuity the XML name + xml:Father + + + + + This schema defines attributes and an attribute group + suitable for use by + schemas wishing to allow xml:base, xml:lang or xml:space attributes + on elements they define. + + To enable this, such a schema must import this schema + for the XML namespace, e.g. as follows: + <schema . . .> + . . . + <import namespace="http://www.w3.org/XML/1998/namespace" + schemaLocation="http://www.w3.org/2001/03/xml.xsd"/> + + Subsequently, qualified reference to any of the attributes + or the group defined below will have the desired effect, e.g. + + <type . . .> + . . . + <attributeGroup ref="xml:specialAttrs"/> + + will define a type which will schema-validate an instance + element with any of those attributes + + + + In keeping with the XML Schema WG's standard versioning + policy, this schema document will persist at + http://www.w3.org/2001/03/xml.xsd. + At the date of issue it can also be found at + http://www.w3.org/2001/xml.xsd. + The schema document at that URI may however change in the future, + in order to remain compatible with the latest version of XML Schema + itself. In other words, if the XML Schema namespace changes, the version + of this document at + http://www.w3.org/2001/xml.xsd will change + accordingly; the version at + http://www.w3.org/2001/03/xml.xsd will not change. + + + + + + In due course, we should install the relevant ISO 2- and 3-letter + codes as the enumerated possible values . . . + + + + + + + + + + + + + + + See http://www.w3.org/TR/xmlbase/ for + information about this attribute. + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-contentTypes.xsd b/.claude/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-contentTypes.xsd new file mode 100644 index 0000000..a6de9d2 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-contentTypes.xsd @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-coreProperties.xsd b/.claude/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-coreProperties.xsd new file mode 100644 index 0000000..10e978b --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-coreProperties.xsd @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-digSig.xsd b/.claude/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-digSig.xsd new file mode 100644 index 0000000..4248bf7 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-digSig.xsd @@ -0,0 +1,49 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-relationships.xsd b/.claude/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-relationships.xsd new file mode 100644 index 0000000..5649746 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-relationships.xsd @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/mce/mc.xsd b/.claude/skills/docx/ooxml/schemas/mce/mc.xsd new file mode 100644 index 0000000..ef72545 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/mce/mc.xsd @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/microsoft/wml-2010.xsd b/.claude/skills/docx/ooxml/schemas/microsoft/wml-2010.xsd new file mode 100644 index 0000000..f65f777 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/microsoft/wml-2010.xsd @@ -0,0 +1,560 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/microsoft/wml-2012.xsd b/.claude/skills/docx/ooxml/schemas/microsoft/wml-2012.xsd new file mode 100644 index 0000000..6b00755 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/microsoft/wml-2012.xsd @@ -0,0 +1,67 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/microsoft/wml-2018.xsd b/.claude/skills/docx/ooxml/schemas/microsoft/wml-2018.xsd new file mode 100644 index 0000000..f321d33 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/microsoft/wml-2018.xsd @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/microsoft/wml-cex-2018.xsd b/.claude/skills/docx/ooxml/schemas/microsoft/wml-cex-2018.xsd new file mode 100644 index 0000000..364c6a9 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/microsoft/wml-cex-2018.xsd @@ -0,0 +1,20 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/microsoft/wml-cid-2016.xsd b/.claude/skills/docx/ooxml/schemas/microsoft/wml-cid-2016.xsd new file mode 100644 index 0000000..fed9d15 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/microsoft/wml-cid-2016.xsd @@ -0,0 +1,13 @@ + + + + + + + + + + + + + diff --git a/.claude/skills/docx/ooxml/schemas/microsoft/wml-sdtdatahash-2020.xsd b/.claude/skills/docx/ooxml/schemas/microsoft/wml-sdtdatahash-2020.xsd new file mode 100644 index 0000000..680cf15 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/microsoft/wml-sdtdatahash-2020.xsd @@ -0,0 +1,4 @@ + + + + diff --git a/.claude/skills/docx/ooxml/schemas/microsoft/wml-symex-2015.xsd b/.claude/skills/docx/ooxml/schemas/microsoft/wml-symex-2015.xsd new file mode 100644 index 0000000..89ada90 --- /dev/null +++ b/.claude/skills/docx/ooxml/schemas/microsoft/wml-symex-2015.xsd @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/.claude/skills/docx/ooxml/scripts/pack.py b/.claude/skills/docx/ooxml/scripts/pack.py new file mode 100644 index 0000000..68bc088 --- /dev/null +++ b/.claude/skills/docx/ooxml/scripts/pack.py @@ -0,0 +1,159 @@ +#!/usr/bin/env python3 +""" +Tool to pack a directory into a .docx, .pptx, or .xlsx file with XML formatting undone. + +Example usage: + python pack.py [--force] +""" + +import argparse +import shutil +import subprocess +import sys +import tempfile +import defusedxml.minidom +import zipfile +from pathlib import Path + + +def main(): + parser = argparse.ArgumentParser(description="Pack a directory into an Office file") + parser.add_argument("input_directory", help="Unpacked Office document directory") + parser.add_argument("output_file", help="Output Office file (.docx/.pptx/.xlsx)") + parser.add_argument("--force", action="store_true", help="Skip validation") + args = parser.parse_args() + + try: + success = pack_document( + args.input_directory, args.output_file, validate=not args.force + ) + + # Show warning if validation was skipped + if args.force: + print("Warning: Skipped validation, file may be corrupt", file=sys.stderr) + # Exit with error if validation failed + elif not success: + print("Contents would produce a corrupt file.", file=sys.stderr) + print("Please validate XML before repacking.", file=sys.stderr) + print("Use --force to skip validation and pack anyway.", file=sys.stderr) + sys.exit(1) + + except ValueError as e: + sys.exit(f"Error: {e}") + + +def pack_document(input_dir, output_file, validate=False): + """Pack a directory into an Office file (.docx/.pptx/.xlsx). + + Args: + input_dir: Path to unpacked Office document directory + output_file: Path to output Office file + validate: If True, validates with soffice (default: False) + + Returns: + bool: True if successful, False if validation failed + """ + input_dir = Path(input_dir) + output_file = Path(output_file) + + if not input_dir.is_dir(): + raise ValueError(f"{input_dir} is not a directory") + if output_file.suffix.lower() not in {".docx", ".pptx", ".xlsx"}: + raise ValueError(f"{output_file} must be a .docx, .pptx, or .xlsx file") + + # Work in temporary directory to avoid modifying original + with tempfile.TemporaryDirectory() as temp_dir: + temp_content_dir = Path(temp_dir) / "content" + shutil.copytree(input_dir, temp_content_dir) + + # Process XML files to remove pretty-printing whitespace + for pattern in ["*.xml", "*.rels"]: + for xml_file in temp_content_dir.rglob(pattern): + condense_xml(xml_file) + + # Create final Office file as zip archive + output_file.parent.mkdir(parents=True, exist_ok=True) + with zipfile.ZipFile(output_file, "w", zipfile.ZIP_DEFLATED) as zf: + for f in temp_content_dir.rglob("*"): + if f.is_file(): + zf.write(f, f.relative_to(temp_content_dir)) + + # Validate if requested + if validate: + if not validate_document(output_file): + output_file.unlink() # Delete the corrupt file + return False + + return True + + +def validate_document(doc_path): + """Validate document by converting to HTML with soffice.""" + # Determine the correct filter based on file extension + match doc_path.suffix.lower(): + case ".docx": + filter_name = "html:HTML" + case ".pptx": + filter_name = "html:impress_html_Export" + case ".xlsx": + filter_name = "html:HTML (StarCalc)" + + with tempfile.TemporaryDirectory() as temp_dir: + try: + result = subprocess.run( + [ + "soffice", + "--headless", + "--convert-to", + filter_name, + "--outdir", + temp_dir, + str(doc_path), + ], + capture_output=True, + timeout=10, + text=True, + ) + if not (Path(temp_dir) / f"{doc_path.stem}.html").exists(): + error_msg = result.stderr.strip() or "Document validation failed" + print(f"Validation error: {error_msg}", file=sys.stderr) + return False + return True + except FileNotFoundError: + print("Warning: soffice not found. Skipping validation.", file=sys.stderr) + return True + except subprocess.TimeoutExpired: + print("Validation error: Timeout during conversion", file=sys.stderr) + return False + except Exception as e: + print(f"Validation error: {e}", file=sys.stderr) + return False + + +def condense_xml(xml_file): + """Strip unnecessary whitespace and remove comments.""" + with open(xml_file, "r", encoding="utf-8") as f: + dom = defusedxml.minidom.parse(f) + + # Process each element to remove whitespace and comments + for element in dom.getElementsByTagName("*"): + # Skip w:t elements and their processing + if element.tagName.endswith(":t"): + continue + + # Remove whitespace-only text nodes and comment nodes + for child in list(element.childNodes): + if ( + child.nodeType == child.TEXT_NODE + and child.nodeValue + and child.nodeValue.strip() == "" + ) or child.nodeType == child.COMMENT_NODE: + element.removeChild(child) + + # Write back the condensed XML + with open(xml_file, "wb") as f: + f.write(dom.toxml(encoding="UTF-8")) + + +if __name__ == "__main__": + main() diff --git a/.claude/skills/docx/ooxml/scripts/unpack.py b/.claude/skills/docx/ooxml/scripts/unpack.py new file mode 100644 index 0000000..4938798 --- /dev/null +++ b/.claude/skills/docx/ooxml/scripts/unpack.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +"""Unpack and format XML contents of Office files (.docx, .pptx, .xlsx)""" + +import random +import sys +import defusedxml.minidom +import zipfile +from pathlib import Path + +# Get command line arguments +assert len(sys.argv) == 3, "Usage: python unpack.py " +input_file, output_dir = sys.argv[1], sys.argv[2] + +# Extract and format +output_path = Path(output_dir) +output_path.mkdir(parents=True, exist_ok=True) +zipfile.ZipFile(input_file).extractall(output_path) + +# Pretty print all XML files +xml_files = list(output_path.rglob("*.xml")) + list(output_path.rglob("*.rels")) +for xml_file in xml_files: + content = xml_file.read_text(encoding="utf-8") + dom = defusedxml.minidom.parseString(content) + xml_file.write_bytes(dom.toprettyxml(indent=" ", encoding="ascii")) + +# For .docx files, suggest an RSID for tracked changes +if input_file.endswith(".docx"): + suggested_rsid = "".join(random.choices("0123456789ABCDEF", k=8)) + print(f"Suggested RSID for edit session: {suggested_rsid}") diff --git a/.claude/skills/docx/ooxml/scripts/validate.py b/.claude/skills/docx/ooxml/scripts/validate.py new file mode 100644 index 0000000..508c589 --- /dev/null +++ b/.claude/skills/docx/ooxml/scripts/validate.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +""" +Command line tool to validate Office document XML files against XSD schemas and tracked changes. + +Usage: + python validate.py --original +""" + +import argparse +import sys +from pathlib import Path + +from validation import DOCXSchemaValidator, PPTXSchemaValidator, RedliningValidator + + +def main(): + parser = argparse.ArgumentParser(description="Validate Office document XML files") + parser.add_argument( + "unpacked_dir", + help="Path to unpacked Office document directory", + ) + parser.add_argument( + "--original", + required=True, + help="Path to original file (.docx/.pptx/.xlsx)", + ) + parser.add_argument( + "-v", + "--verbose", + action="store_true", + help="Enable verbose output", + ) + args = parser.parse_args() + + # Validate paths + unpacked_dir = Path(args.unpacked_dir) + original_file = Path(args.original) + file_extension = original_file.suffix.lower() + assert unpacked_dir.is_dir(), f"Error: {unpacked_dir} is not a directory" + assert original_file.is_file(), f"Error: {original_file} is not a file" + assert file_extension in [".docx", ".pptx", ".xlsx"], ( + f"Error: {original_file} must be a .docx, .pptx, or .xlsx file" + ) + + # Run validations + match file_extension: + case ".docx": + validators = [DOCXSchemaValidator, RedliningValidator] + case ".pptx": + validators = [PPTXSchemaValidator] + case _: + print(f"Error: Validation not supported for file type {file_extension}") + sys.exit(1) + + # Run validators + success = True + for V in validators: + validator = V(unpacked_dir, original_file, verbose=args.verbose) + if not validator.validate(): + success = False + + if success: + print("All validations PASSED!") + + sys.exit(0 if success else 1) + + +if __name__ == "__main__": + main() diff --git a/.claude/skills/docx/ooxml/scripts/validation/__init__.py b/.claude/skills/docx/ooxml/scripts/validation/__init__.py new file mode 100644 index 0000000..db092ec --- /dev/null +++ b/.claude/skills/docx/ooxml/scripts/validation/__init__.py @@ -0,0 +1,15 @@ +""" +Validation modules for Word document processing. +""" + +from .base import BaseSchemaValidator +from .docx import DOCXSchemaValidator +from .pptx import PPTXSchemaValidator +from .redlining import RedliningValidator + +__all__ = [ + "BaseSchemaValidator", + "DOCXSchemaValidator", + "PPTXSchemaValidator", + "RedliningValidator", +] diff --git a/.claude/skills/docx/ooxml/scripts/validation/base.py b/.claude/skills/docx/ooxml/scripts/validation/base.py new file mode 100644 index 0000000..0681b19 --- /dev/null +++ b/.claude/skills/docx/ooxml/scripts/validation/base.py @@ -0,0 +1,951 @@ +""" +Base validator with common validation logic for document files. +""" + +import re +from pathlib import Path + +import lxml.etree + + +class BaseSchemaValidator: + """Base validator with common validation logic for document files.""" + + # Elements whose 'id' attributes must be unique within their file + # Format: element_name -> (attribute_name, scope) + # scope can be 'file' (unique within file) or 'global' (unique across all files) + UNIQUE_ID_REQUIREMENTS = { + # Word elements + "comment": ("id", "file"), # Comment IDs in comments.xml + "commentrangestart": ("id", "file"), # Must match comment IDs + "commentrangeend": ("id", "file"), # Must match comment IDs + "bookmarkstart": ("id", "file"), # Bookmark start IDs + "bookmarkend": ("id", "file"), # Bookmark end IDs + # Note: ins and del (track changes) can share IDs when part of same revision + # PowerPoint elements + "sldid": ("id", "file"), # Slide IDs in presentation.xml + "sldmasterid": ("id", "global"), # Slide master IDs must be globally unique + "sldlayoutid": ("id", "global"), # Slide layout IDs must be globally unique + "cm": ("authorid", "file"), # Comment author IDs + # Excel elements + "sheet": ("sheetid", "file"), # Sheet IDs in workbook.xml + "definedname": ("id", "file"), # Named range IDs + # Drawing/Shape elements (all formats) + "cxnsp": ("id", "file"), # Connection shape IDs + "sp": ("id", "file"), # Shape IDs + "pic": ("id", "file"), # Picture IDs + "grpsp": ("id", "file"), # Group shape IDs + } + + # Mapping of element names to expected relationship types + # Subclasses should override this with format-specific mappings + ELEMENT_RELATIONSHIP_TYPES = {} + + # Unified schema mappings for all Office document types + SCHEMA_MAPPINGS = { + # Document type specific schemas + "word": "ISO-IEC29500-4_2016/wml.xsd", # Word documents + "ppt": "ISO-IEC29500-4_2016/pml.xsd", # PowerPoint presentations + "xl": "ISO-IEC29500-4_2016/sml.xsd", # Excel spreadsheets + # Common file types + "[Content_Types].xml": "ecma/fouth-edition/opc-contentTypes.xsd", + "app.xml": "ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd", + "core.xml": "ecma/fouth-edition/opc-coreProperties.xsd", + "custom.xml": "ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd", + ".rels": "ecma/fouth-edition/opc-relationships.xsd", + # Word-specific files + "people.xml": "microsoft/wml-2012.xsd", + "commentsIds.xml": "microsoft/wml-cid-2016.xsd", + "commentsExtensible.xml": "microsoft/wml-cex-2018.xsd", + "commentsExtended.xml": "microsoft/wml-2012.xsd", + # Chart files (common across document types) + "chart": "ISO-IEC29500-4_2016/dml-chart.xsd", + # Theme files (common across document types) + "theme": "ISO-IEC29500-4_2016/dml-main.xsd", + # Drawing and media files + "drawing": "ISO-IEC29500-4_2016/dml-main.xsd", + } + + # Unified namespace constants + MC_NAMESPACE = "http://schemas.openxmlformats.org/markup-compatibility/2006" + XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace" + + # Common OOXML namespaces used across validators + PACKAGE_RELATIONSHIPS_NAMESPACE = ( + "http://schemas.openxmlformats.org/package/2006/relationships" + ) + OFFICE_RELATIONSHIPS_NAMESPACE = ( + "http://schemas.openxmlformats.org/officeDocument/2006/relationships" + ) + CONTENT_TYPES_NAMESPACE = ( + "http://schemas.openxmlformats.org/package/2006/content-types" + ) + + # Folders where we should clean ignorable namespaces + MAIN_CONTENT_FOLDERS = {"word", "ppt", "xl"} + + # All allowed OOXML namespaces (superset of all document types) + OOXML_NAMESPACES = { + "http://schemas.openxmlformats.org/officeDocument/2006/math", + "http://schemas.openxmlformats.org/officeDocument/2006/relationships", + "http://schemas.openxmlformats.org/schemaLibrary/2006/main", + "http://schemas.openxmlformats.org/drawingml/2006/main", + "http://schemas.openxmlformats.org/drawingml/2006/chart", + "http://schemas.openxmlformats.org/drawingml/2006/chartDrawing", + "http://schemas.openxmlformats.org/drawingml/2006/diagram", + "http://schemas.openxmlformats.org/drawingml/2006/picture", + "http://schemas.openxmlformats.org/drawingml/2006/spreadsheetDrawing", + "http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing", + "http://schemas.openxmlformats.org/wordprocessingml/2006/main", + "http://schemas.openxmlformats.org/presentationml/2006/main", + "http://schemas.openxmlformats.org/spreadsheetml/2006/main", + "http://schemas.openxmlformats.org/officeDocument/2006/sharedTypes", + "http://www.w3.org/XML/1998/namespace", + } + + def __init__(self, unpacked_dir, original_file, verbose=False): + self.unpacked_dir = Path(unpacked_dir).resolve() + self.original_file = Path(original_file) + self.verbose = verbose + + # Set schemas directory + self.schemas_dir = Path(__file__).parent.parent.parent / "schemas" + + # Get all XML and .rels files + patterns = ["*.xml", "*.rels"] + self.xml_files = [ + f for pattern in patterns for f in self.unpacked_dir.rglob(pattern) + ] + + if not self.xml_files: + print(f"Warning: No XML files found in {self.unpacked_dir}") + + def validate(self): + """Run all validation checks and return True if all pass.""" + raise NotImplementedError("Subclasses must implement the validate method") + + def validate_xml(self): + """Validate that all XML files are well-formed.""" + errors = [] + + for xml_file in self.xml_files: + try: + # Try to parse the XML file + lxml.etree.parse(str(xml_file)) + except lxml.etree.XMLSyntaxError as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {e.lineno}: {e.msg}" + ) + except Exception as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Unexpected error: {str(e)}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} XML violations:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - All XML files are well-formed") + return True + + def validate_namespaces(self): + """Validate that namespace prefixes in Ignorable attributes are declared.""" + errors = [] + + for xml_file in self.xml_files: + try: + root = lxml.etree.parse(str(xml_file)).getroot() + declared = set(root.nsmap.keys()) - {None} # Exclude default namespace + + for attr_val in [ + v for k, v in root.attrib.items() if k.endswith("Ignorable") + ]: + undeclared = set(attr_val.split()) - declared + errors.extend( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Namespace '{ns}' in Ignorable but not declared" + for ns in undeclared + ) + except lxml.etree.XMLSyntaxError: + continue + + if errors: + print(f"FAILED - {len(errors)} namespace issues:") + for error in errors: + print(error) + return False + if self.verbose: + print("PASSED - All namespace prefixes properly declared") + return True + + def validate_unique_ids(self): + """Validate that specific IDs are unique according to OOXML requirements.""" + errors = [] + global_ids = {} # Track globally unique IDs across all files + + for xml_file in self.xml_files: + try: + root = lxml.etree.parse(str(xml_file)).getroot() + file_ids = {} # Track IDs that must be unique within this file + + # Remove all mc:AlternateContent elements from the tree + mc_elements = root.xpath( + ".//mc:AlternateContent", namespaces={"mc": self.MC_NAMESPACE} + ) + for elem in mc_elements: + elem.getparent().remove(elem) + + # Now check IDs in the cleaned tree + for elem in root.iter(): + # Get the element name without namespace + tag = ( + elem.tag.split("}")[-1].lower() + if "}" in elem.tag + else elem.tag.lower() + ) + + # Check if this element type has ID uniqueness requirements + if tag in self.UNIQUE_ID_REQUIREMENTS: + attr_name, scope = self.UNIQUE_ID_REQUIREMENTS[tag] + + # Look for the specified attribute + id_value = None + for attr, value in elem.attrib.items(): + attr_local = ( + attr.split("}")[-1].lower() + if "}" in attr + else attr.lower() + ) + if attr_local == attr_name: + id_value = value + break + + if id_value is not None: + if scope == "global": + # Check global uniqueness + if id_value in global_ids: + prev_file, prev_line, prev_tag = global_ids[ + id_value + ] + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {elem.sourceline}: Global ID '{id_value}' in <{tag}> " + f"already used in {prev_file} at line {prev_line} in <{prev_tag}>" + ) + else: + global_ids[id_value] = ( + xml_file.relative_to(self.unpacked_dir), + elem.sourceline, + tag, + ) + elif scope == "file": + # Check file-level uniqueness + key = (tag, attr_name) + if key not in file_ids: + file_ids[key] = {} + + if id_value in file_ids[key]: + prev_line = file_ids[key][id_value] + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {elem.sourceline}: Duplicate {attr_name}='{id_value}' in <{tag}> " + f"(first occurrence at line {prev_line})" + ) + else: + file_ids[key][id_value] = elem.sourceline + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} ID uniqueness violations:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - All required IDs are unique") + return True + + def validate_file_references(self): + """ + Validate that all .rels files properly reference files and that all files are referenced. + """ + errors = [] + + # Find all .rels files + rels_files = list(self.unpacked_dir.rglob("*.rels")) + + if not rels_files: + if self.verbose: + print("PASSED - No .rels files found") + return True + + # Get all files in the unpacked directory (excluding reference files) + all_files = [] + for file_path in self.unpacked_dir.rglob("*"): + if ( + file_path.is_file() + and file_path.name != "[Content_Types].xml" + and not file_path.name.endswith(".rels") + ): # This file is not referenced by .rels + all_files.append(file_path.resolve()) + + # Track all files that are referenced by any .rels file + all_referenced_files = set() + + if self.verbose: + print( + f"Found {len(rels_files)} .rels files and {len(all_files)} target files" + ) + + # Check each .rels file + for rels_file in rels_files: + try: + # Parse relationships file + rels_root = lxml.etree.parse(str(rels_file)).getroot() + + # Get the directory where this .rels file is located + rels_dir = rels_file.parent + + # Find all relationships and their targets + referenced_files = set() + broken_refs = [] + + for rel in rels_root.findall( + ".//ns:Relationship", + namespaces={"ns": self.PACKAGE_RELATIONSHIPS_NAMESPACE}, + ): + target = rel.get("Target") + if target and not target.startswith( + ("http", "mailto:") + ): # Skip external URLs + # Resolve the target path relative to the .rels file location + if rels_file.name == ".rels": + # Root .rels file - targets are relative to unpacked_dir + target_path = self.unpacked_dir / target + else: + # Other .rels files - targets are relative to their parent's parent + # e.g., word/_rels/document.xml.rels -> targets relative to word/ + base_dir = rels_dir.parent + target_path = base_dir / target + + # Normalize the path and check if it exists + try: + target_path = target_path.resolve() + if target_path.exists() and target_path.is_file(): + referenced_files.add(target_path) + all_referenced_files.add(target_path) + else: + broken_refs.append((target, rel.sourceline)) + except (OSError, ValueError): + broken_refs.append((target, rel.sourceline)) + + # Report broken references + if broken_refs: + rel_path = rels_file.relative_to(self.unpacked_dir) + for broken_ref, line_num in broken_refs: + errors.append( + f" {rel_path}: Line {line_num}: Broken reference to {broken_ref}" + ) + + except Exception as e: + rel_path = rels_file.relative_to(self.unpacked_dir) + errors.append(f" Error parsing {rel_path}: {e}") + + # Check for unreferenced files (files that exist but are not referenced anywhere) + unreferenced_files = set(all_files) - all_referenced_files + + if unreferenced_files: + for unref_file in sorted(unreferenced_files): + unref_rel_path = unref_file.relative_to(self.unpacked_dir) + errors.append(f" Unreferenced file: {unref_rel_path}") + + if errors: + print(f"FAILED - Found {len(errors)} relationship validation errors:") + for error in errors: + print(error) + print( + "CRITICAL: These errors will cause the document to appear corrupt. " + + "Broken references MUST be fixed, " + + "and unreferenced files MUST be referenced or removed." + ) + return False + else: + if self.verbose: + print( + "PASSED - All references are valid and all files are properly referenced" + ) + return True + + def validate_all_relationship_ids(self): + """ + Validate that all r:id attributes in XML files reference existing IDs + in their corresponding .rels files, and optionally validate relationship types. + """ + import lxml.etree + + errors = [] + + # Process each XML file that might contain r:id references + for xml_file in self.xml_files: + # Skip .rels files themselves + if xml_file.suffix == ".rels": + continue + + # Determine the corresponding .rels file + # For dir/file.xml, it's dir/_rels/file.xml.rels + rels_dir = xml_file.parent / "_rels" + rels_file = rels_dir / f"{xml_file.name}.rels" + + # Skip if there's no corresponding .rels file (that's okay) + if not rels_file.exists(): + continue + + try: + # Parse the .rels file to get valid relationship IDs and their types + rels_root = lxml.etree.parse(str(rels_file)).getroot() + rid_to_type = {} + + for rel in rels_root.findall( + f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship" + ): + rid = rel.get("Id") + rel_type = rel.get("Type", "") + if rid: + # Check for duplicate rIds + if rid in rid_to_type: + rels_rel_path = rels_file.relative_to(self.unpacked_dir) + errors.append( + f" {rels_rel_path}: Line {rel.sourceline}: " + f"Duplicate relationship ID '{rid}' (IDs must be unique)" + ) + # Extract just the type name from the full URL + type_name = ( + rel_type.split("/")[-1] if "/" in rel_type else rel_type + ) + rid_to_type[rid] = type_name + + # Parse the XML file to find all r:id references + xml_root = lxml.etree.parse(str(xml_file)).getroot() + + # Find all elements with r:id attributes + for elem in xml_root.iter(): + # Check for r:id attribute (relationship ID) + rid_attr = elem.get(f"{{{self.OFFICE_RELATIONSHIPS_NAMESPACE}}}id") + if rid_attr: + xml_rel_path = xml_file.relative_to(self.unpacked_dir) + elem_name = ( + elem.tag.split("}")[-1] if "}" in elem.tag else elem.tag + ) + + # Check if the ID exists + if rid_attr not in rid_to_type: + errors.append( + f" {xml_rel_path}: Line {elem.sourceline}: " + f"<{elem_name}> references non-existent relationship '{rid_attr}' " + f"(valid IDs: {', '.join(sorted(rid_to_type.keys())[:5])}{'...' if len(rid_to_type) > 5 else ''})" + ) + # Check if we have type expectations for this element + elif self.ELEMENT_RELATIONSHIP_TYPES: + expected_type = self._get_expected_relationship_type( + elem_name + ) + if expected_type: + actual_type = rid_to_type[rid_attr] + # Check if the actual type matches or contains the expected type + if expected_type not in actual_type.lower(): + errors.append( + f" {xml_rel_path}: Line {elem.sourceline}: " + f"<{elem_name}> references '{rid_attr}' which points to '{actual_type}' " + f"but should point to a '{expected_type}' relationship" + ) + + except Exception as e: + xml_rel_path = xml_file.relative_to(self.unpacked_dir) + errors.append(f" Error processing {xml_rel_path}: {e}") + + if errors: + print(f"FAILED - Found {len(errors)} relationship ID reference errors:") + for error in errors: + print(error) + print("\nThese ID mismatches will cause the document to appear corrupt!") + return False + else: + if self.verbose: + print("PASSED - All relationship ID references are valid") + return True + + def _get_expected_relationship_type(self, element_name): + """ + Get the expected relationship type for an element. + First checks the explicit mapping, then tries pattern detection. + """ + # Normalize element name to lowercase + elem_lower = element_name.lower() + + # Check explicit mapping first + if elem_lower in self.ELEMENT_RELATIONSHIP_TYPES: + return self.ELEMENT_RELATIONSHIP_TYPES[elem_lower] + + # Try pattern detection for common patterns + # Pattern 1: Elements ending in "Id" often expect a relationship of the prefix type + if elem_lower.endswith("id") and len(elem_lower) > 2: + # e.g., "sldId" -> "sld", "sldMasterId" -> "sldMaster" + prefix = elem_lower[:-2] # Remove "id" + # Check if this might be a compound like "sldMasterId" + if prefix.endswith("master"): + return prefix.lower() + elif prefix.endswith("layout"): + return prefix.lower() + else: + # Simple case like "sldId" -> "slide" + # Common transformations + if prefix == "sld": + return "slide" + return prefix.lower() + + # Pattern 2: Elements ending in "Reference" expect a relationship of the prefix type + if elem_lower.endswith("reference") and len(elem_lower) > 9: + prefix = elem_lower[:-9] # Remove "reference" + return prefix.lower() + + return None + + def validate_content_types(self): + """Validate that all content files are properly declared in [Content_Types].xml.""" + errors = [] + + # Find [Content_Types].xml file + content_types_file = self.unpacked_dir / "[Content_Types].xml" + if not content_types_file.exists(): + print("FAILED - [Content_Types].xml file not found") + return False + + try: + # Parse and get all declared parts and extensions + root = lxml.etree.parse(str(content_types_file)).getroot() + declared_parts = set() + declared_extensions = set() + + # Get Override declarations (specific files) + for override in root.findall( + f".//{{{self.CONTENT_TYPES_NAMESPACE}}}Override" + ): + part_name = override.get("PartName") + if part_name is not None: + declared_parts.add(part_name.lstrip("/")) + + # Get Default declarations (by extension) + for default in root.findall( + f".//{{{self.CONTENT_TYPES_NAMESPACE}}}Default" + ): + extension = default.get("Extension") + if extension is not None: + declared_extensions.add(extension.lower()) + + # Root elements that require content type declaration + declarable_roots = { + "sld", + "sldLayout", + "sldMaster", + "presentation", # PowerPoint + "document", # Word + "workbook", + "worksheet", # Excel + "theme", # Common + } + + # Common media file extensions that should be declared + media_extensions = { + "png": "image/png", + "jpg": "image/jpeg", + "jpeg": "image/jpeg", + "gif": "image/gif", + "bmp": "image/bmp", + "tiff": "image/tiff", + "wmf": "image/x-wmf", + "emf": "image/x-emf", + } + + # Get all files in the unpacked directory + all_files = list(self.unpacked_dir.rglob("*")) + all_files = [f for f in all_files if f.is_file()] + + # Check all XML files for Override declarations + for xml_file in self.xml_files: + path_str = str(xml_file.relative_to(self.unpacked_dir)).replace( + "\\", "/" + ) + + # Skip non-content files + if any( + skip in path_str + for skip in [".rels", "[Content_Types]", "docProps/", "_rels/"] + ): + continue + + try: + root_tag = lxml.etree.parse(str(xml_file)).getroot().tag + root_name = root_tag.split("}")[-1] if "}" in root_tag else root_tag + + if root_name in declarable_roots and path_str not in declared_parts: + errors.append( + f" {path_str}: File with <{root_name}> root not declared in [Content_Types].xml" + ) + + except Exception: + continue # Skip unparseable files + + # Check all non-XML files for Default extension declarations + for file_path in all_files: + # Skip XML files and metadata files (already checked above) + if file_path.suffix.lower() in {".xml", ".rels"}: + continue + if file_path.name == "[Content_Types].xml": + continue + if "_rels" in file_path.parts or "docProps" in file_path.parts: + continue + + extension = file_path.suffix.lstrip(".").lower() + if extension and extension not in declared_extensions: + # Check if it's a known media extension that should be declared + if extension in media_extensions: + relative_path = file_path.relative_to(self.unpacked_dir) + errors.append( + f' {relative_path}: File with extension \'{extension}\' not declared in [Content_Types].xml - should add: ' + ) + + except Exception as e: + errors.append(f" Error parsing [Content_Types].xml: {e}") + + if errors: + print(f"FAILED - Found {len(errors)} content type declaration errors:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print( + "PASSED - All content files are properly declared in [Content_Types].xml" + ) + return True + + def validate_file_against_xsd(self, xml_file, verbose=False): + """Validate a single XML file against XSD schema, comparing with original. + + Args: + xml_file: Path to XML file to validate + verbose: Enable verbose output + + Returns: + tuple: (is_valid, new_errors_set) where is_valid is True/False/None (skipped) + """ + # Resolve both paths to handle symlinks + xml_file = Path(xml_file).resolve() + unpacked_dir = self.unpacked_dir.resolve() + + # Validate current file + is_valid, current_errors = self._validate_single_file_xsd( + xml_file, unpacked_dir + ) + + if is_valid is None: + return None, set() # Skipped + elif is_valid: + return True, set() # Valid, no errors + + # Get errors from original file for this specific file + original_errors = self._get_original_file_errors(xml_file) + + # Compare with original (both are guaranteed to be sets here) + assert current_errors is not None + new_errors = current_errors - original_errors + + if new_errors: + if verbose: + relative_path = xml_file.relative_to(unpacked_dir) + print(f"FAILED - {relative_path}: {len(new_errors)} new error(s)") + for error in list(new_errors)[:3]: + truncated = error[:250] + "..." if len(error) > 250 else error + print(f" - {truncated}") + return False, new_errors + else: + # All errors existed in original + if verbose: + print( + f"PASSED - No new errors (original had {len(current_errors)} errors)" + ) + return True, set() + + def validate_against_xsd(self): + """Validate XML files against XSD schemas, showing only new errors compared to original.""" + new_errors = [] + original_error_count = 0 + valid_count = 0 + skipped_count = 0 + + for xml_file in self.xml_files: + relative_path = str(xml_file.relative_to(self.unpacked_dir)) + is_valid, new_file_errors = self.validate_file_against_xsd( + xml_file, verbose=False + ) + + if is_valid is None: + skipped_count += 1 + continue + elif is_valid and not new_file_errors: + valid_count += 1 + continue + elif is_valid: + # Had errors but all existed in original + original_error_count += 1 + valid_count += 1 + continue + + # Has new errors + new_errors.append(f" {relative_path}: {len(new_file_errors)} new error(s)") + for error in list(new_file_errors)[:3]: # Show first 3 errors + new_errors.append( + f" - {error[:250]}..." if len(error) > 250 else f" - {error}" + ) + + # Print summary + if self.verbose: + print(f"Validated {len(self.xml_files)} files:") + print(f" - Valid: {valid_count}") + print(f" - Skipped (no schema): {skipped_count}") + if original_error_count: + print(f" - With original errors (ignored): {original_error_count}") + print( + f" - With NEW errors: {len(new_errors) > 0 and len([e for e in new_errors if not e.startswith(' ')]) or 0}" + ) + + if new_errors: + print("\nFAILED - Found NEW validation errors:") + for error in new_errors: + print(error) + return False + else: + if self.verbose: + print("\nPASSED - No new XSD validation errors introduced") + return True + + def _get_schema_path(self, xml_file): + """Determine the appropriate schema path for an XML file.""" + # Check exact filename match + if xml_file.name in self.SCHEMA_MAPPINGS: + return self.schemas_dir / self.SCHEMA_MAPPINGS[xml_file.name] + + # Check .rels files + if xml_file.suffix == ".rels": + return self.schemas_dir / self.SCHEMA_MAPPINGS[".rels"] + + # Check chart files + if "charts/" in str(xml_file) and xml_file.name.startswith("chart"): + return self.schemas_dir / self.SCHEMA_MAPPINGS["chart"] + + # Check theme files + if "theme/" in str(xml_file) and xml_file.name.startswith("theme"): + return self.schemas_dir / self.SCHEMA_MAPPINGS["theme"] + + # Check if file is in a main content folder and use appropriate schema + if xml_file.parent.name in self.MAIN_CONTENT_FOLDERS: + return self.schemas_dir / self.SCHEMA_MAPPINGS[xml_file.parent.name] + + return None + + def _clean_ignorable_namespaces(self, xml_doc): + """Remove attributes and elements not in allowed namespaces.""" + # Create a clean copy + xml_string = lxml.etree.tostring(xml_doc, encoding="unicode") + xml_copy = lxml.etree.fromstring(xml_string) + + # Remove attributes not in allowed namespaces + for elem in xml_copy.iter(): + attrs_to_remove = [] + + for attr in elem.attrib: + # Check if attribute is from a namespace other than allowed ones + if "{" in attr: + ns = attr.split("}")[0][1:] + if ns not in self.OOXML_NAMESPACES: + attrs_to_remove.append(attr) + + # Remove collected attributes + for attr in attrs_to_remove: + del elem.attrib[attr] + + # Remove elements not in allowed namespaces + self._remove_ignorable_elements(xml_copy) + + return lxml.etree.ElementTree(xml_copy) + + def _remove_ignorable_elements(self, root): + """Recursively remove all elements not in allowed namespaces.""" + elements_to_remove = [] + + # Find elements to remove + for elem in list(root): + # Skip non-element nodes (comments, processing instructions, etc.) + if not hasattr(elem, "tag") or callable(elem.tag): + continue + + tag_str = str(elem.tag) + if tag_str.startswith("{"): + ns = tag_str.split("}")[0][1:] + if ns not in self.OOXML_NAMESPACES: + elements_to_remove.append(elem) + continue + + # Recursively clean child elements + self._remove_ignorable_elements(elem) + + # Remove collected elements + for elem in elements_to_remove: + root.remove(elem) + + def _preprocess_for_mc_ignorable(self, xml_doc): + """Preprocess XML to handle mc:Ignorable attribute properly.""" + # Remove mc:Ignorable attributes before validation + root = xml_doc.getroot() + + # Remove mc:Ignorable attribute from root + if f"{{{self.MC_NAMESPACE}}}Ignorable" in root.attrib: + del root.attrib[f"{{{self.MC_NAMESPACE}}}Ignorable"] + + return xml_doc + + def _validate_single_file_xsd(self, xml_file, base_path): + """Validate a single XML file against XSD schema. Returns (is_valid, errors_set).""" + schema_path = self._get_schema_path(xml_file) + if not schema_path: + return None, None # Skip file + + try: + # Load schema + with open(schema_path, "rb") as xsd_file: + parser = lxml.etree.XMLParser() + xsd_doc = lxml.etree.parse( + xsd_file, parser=parser, base_url=str(schema_path) + ) + schema = lxml.etree.XMLSchema(xsd_doc) + + # Load and preprocess XML + with open(xml_file, "r") as f: + xml_doc = lxml.etree.parse(f) + + xml_doc, _ = self._remove_template_tags_from_text_nodes(xml_doc) + xml_doc = self._preprocess_for_mc_ignorable(xml_doc) + + # Clean ignorable namespaces if needed + relative_path = xml_file.relative_to(base_path) + if ( + relative_path.parts + and relative_path.parts[0] in self.MAIN_CONTENT_FOLDERS + ): + xml_doc = self._clean_ignorable_namespaces(xml_doc) + + # Validate + if schema.validate(xml_doc): + return True, set() + else: + errors = set() + for error in schema.error_log: + # Store normalized error message (without line numbers for comparison) + errors.add(error.message) + return False, errors + + except Exception as e: + return False, {str(e)} + + def _get_original_file_errors(self, xml_file): + """Get XSD validation errors from a single file in the original document. + + Args: + xml_file: Path to the XML file in unpacked_dir to check + + Returns: + set: Set of error messages from the original file + """ + import tempfile + import zipfile + + # Resolve both paths to handle symlinks (e.g., /var vs /private/var on macOS) + xml_file = Path(xml_file).resolve() + unpacked_dir = self.unpacked_dir.resolve() + relative_path = xml_file.relative_to(unpacked_dir) + + with tempfile.TemporaryDirectory() as temp_dir: + temp_path = Path(temp_dir) + + # Extract original file + with zipfile.ZipFile(self.original_file, "r") as zip_ref: + zip_ref.extractall(temp_path) + + # Find corresponding file in original + original_xml_file = temp_path / relative_path + + if not original_xml_file.exists(): + # File didn't exist in original, so no original errors + return set() + + # Validate the specific file in original + is_valid, errors = self._validate_single_file_xsd( + original_xml_file, temp_path + ) + return errors if errors else set() + + def _remove_template_tags_from_text_nodes(self, xml_doc): + """Remove template tags from XML text nodes and collect warnings. + + Template tags follow the pattern {{ ... }} and are used as placeholders + for content replacement. They should be removed from text content before + XSD validation while preserving XML structure. + + Returns: + tuple: (cleaned_xml_doc, warnings_list) + """ + warnings = [] + template_pattern = re.compile(r"\{\{[^}]*\}\}") + + # Create a copy of the document to avoid modifying the original + xml_string = lxml.etree.tostring(xml_doc, encoding="unicode") + xml_copy = lxml.etree.fromstring(xml_string) + + def process_text_content(text, content_type): + if not text: + return text + matches = list(template_pattern.finditer(text)) + if matches: + for match in matches: + warnings.append( + f"Found template tag in {content_type}: {match.group()}" + ) + return template_pattern.sub("", text) + return text + + # Process all text nodes in the document + for elem in xml_copy.iter(): + # Skip processing if this is a w:t element + if not hasattr(elem, "tag") or callable(elem.tag): + continue + tag_str = str(elem.tag) + if tag_str.endswith("}t") or tag_str == "t": + continue + + elem.text = process_text_content(elem.text, "text content") + elem.tail = process_text_content(elem.tail, "tail content") + + return lxml.etree.ElementTree(xml_copy), warnings + + +if __name__ == "__main__": + raise RuntimeError("This module should not be run directly.") diff --git a/.claude/skills/docx/ooxml/scripts/validation/docx.py b/.claude/skills/docx/ooxml/scripts/validation/docx.py new file mode 100644 index 0000000..602c470 --- /dev/null +++ b/.claude/skills/docx/ooxml/scripts/validation/docx.py @@ -0,0 +1,274 @@ +""" +Validator for Word document XML files against XSD schemas. +""" + +import re +import tempfile +import zipfile + +import lxml.etree + +from .base import BaseSchemaValidator + + +class DOCXSchemaValidator(BaseSchemaValidator): + """Validator for Word document XML files against XSD schemas.""" + + # Word-specific namespace + WORD_2006_NAMESPACE = "http://schemas.openxmlformats.org/wordprocessingml/2006/main" + + # Word-specific element to relationship type mappings + # Start with empty mapping - add specific cases as we discover them + ELEMENT_RELATIONSHIP_TYPES = {} + + def validate(self): + """Run all validation checks and return True if all pass.""" + # Test 0: XML well-formedness + if not self.validate_xml(): + return False + + # Test 1: Namespace declarations + all_valid = True + if not self.validate_namespaces(): + all_valid = False + + # Test 2: Unique IDs + if not self.validate_unique_ids(): + all_valid = False + + # Test 3: Relationship and file reference validation + if not self.validate_file_references(): + all_valid = False + + # Test 4: Content type declarations + if not self.validate_content_types(): + all_valid = False + + # Test 5: XSD schema validation + if not self.validate_against_xsd(): + all_valid = False + + # Test 6: Whitespace preservation + if not self.validate_whitespace_preservation(): + all_valid = False + + # Test 7: Deletion validation + if not self.validate_deletions(): + all_valid = False + + # Test 8: Insertion validation + if not self.validate_insertions(): + all_valid = False + + # Test 9: Relationship ID reference validation + if not self.validate_all_relationship_ids(): + all_valid = False + + # Count and compare paragraphs + self.compare_paragraph_counts() + + return all_valid + + def validate_whitespace_preservation(self): + """ + Validate that w:t elements with whitespace have xml:space='preserve'. + """ + errors = [] + + for xml_file in self.xml_files: + # Only check document.xml files + if xml_file.name != "document.xml": + continue + + try: + root = lxml.etree.parse(str(xml_file)).getroot() + + # Find all w:t elements + for elem in root.iter(f"{{{self.WORD_2006_NAMESPACE}}}t"): + if elem.text: + text = elem.text + # Check if text starts or ends with whitespace + if re.match(r"^\s.*", text) or re.match(r".*\s$", text): + # Check if xml:space="preserve" attribute exists + xml_space_attr = f"{{{self.XML_NAMESPACE}}}space" + if ( + xml_space_attr not in elem.attrib + or elem.attrib[xml_space_attr] != "preserve" + ): + # Show a preview of the text + text_preview = ( + repr(text)[:50] + "..." + if len(repr(text)) > 50 + else repr(text) + ) + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {elem.sourceline}: w:t element with whitespace missing xml:space='preserve': {text_preview}" + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} whitespace preservation violations:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - All whitespace is properly preserved") + return True + + def validate_deletions(self): + """ + Validate that w:t elements are not within w:del elements. + For some reason, XSD validation does not catch this, so we do it manually. + """ + errors = [] + + for xml_file in self.xml_files: + # Only check document.xml files + if xml_file.name != "document.xml": + continue + + try: + root = lxml.etree.parse(str(xml_file)).getroot() + + # Find all w:t elements that are descendants of w:del elements + namespaces = {"w": self.WORD_2006_NAMESPACE} + xpath_expression = ".//w:del//w:t" + problematic_t_elements = root.xpath( + xpath_expression, namespaces=namespaces + ) + for t_elem in problematic_t_elements: + if t_elem.text: + # Show a preview of the text + text_preview = ( + repr(t_elem.text)[:50] + "..." + if len(repr(t_elem.text)) > 50 + else repr(t_elem.text) + ) + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {t_elem.sourceline}: found within : {text_preview}" + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} deletion validation violations:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - No w:t elements found within w:del elements") + return True + + def count_paragraphs_in_unpacked(self): + """Count the number of paragraphs in the unpacked document.""" + count = 0 + + for xml_file in self.xml_files: + # Only check document.xml files + if xml_file.name != "document.xml": + continue + + try: + root = lxml.etree.parse(str(xml_file)).getroot() + # Count all w:p elements + paragraphs = root.findall(f".//{{{self.WORD_2006_NAMESPACE}}}p") + count = len(paragraphs) + except Exception as e: + print(f"Error counting paragraphs in unpacked document: {e}") + + return count + + def count_paragraphs_in_original(self): + """Count the number of paragraphs in the original docx file.""" + count = 0 + + try: + # Create temporary directory to unpack original + with tempfile.TemporaryDirectory() as temp_dir: + # Unpack original docx + with zipfile.ZipFile(self.original_file, "r") as zip_ref: + zip_ref.extractall(temp_dir) + + # Parse document.xml + doc_xml_path = temp_dir + "/word/document.xml" + root = lxml.etree.parse(doc_xml_path).getroot() + + # Count all w:p elements + paragraphs = root.findall(f".//{{{self.WORD_2006_NAMESPACE}}}p") + count = len(paragraphs) + + except Exception as e: + print(f"Error counting paragraphs in original document: {e}") + + return count + + def validate_insertions(self): + """ + Validate that w:delText elements are not within w:ins elements. + w:delText is only allowed in w:ins if nested within a w:del. + """ + errors = [] + + for xml_file in self.xml_files: + if xml_file.name != "document.xml": + continue + + try: + root = lxml.etree.parse(str(xml_file)).getroot() + namespaces = {"w": self.WORD_2006_NAMESPACE} + + # Find w:delText in w:ins that are NOT within w:del + invalid_elements = root.xpath( + ".//w:ins//w:delText[not(ancestor::w:del)]", + namespaces=namespaces + ) + + for elem in invalid_elements: + text_preview = ( + repr(elem.text or "")[:50] + "..." + if len(repr(elem.text or "")) > 50 + else repr(elem.text or "") + ) + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {elem.sourceline}: within : {text_preview}" + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} insertion validation violations:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - No w:delText elements within w:ins elements") + return True + + def compare_paragraph_counts(self): + """Compare paragraph counts between original and new document.""" + original_count = self.count_paragraphs_in_original() + new_count = self.count_paragraphs_in_unpacked() + + diff = new_count - original_count + diff_str = f"+{diff}" if diff > 0 else str(diff) + print(f"\nParagraphs: {original_count} → {new_count} ({diff_str})") + + +if __name__ == "__main__": + raise RuntimeError("This module should not be run directly.") diff --git a/.claude/skills/docx/ooxml/scripts/validation/pptx.py b/.claude/skills/docx/ooxml/scripts/validation/pptx.py new file mode 100644 index 0000000..66d5b1e --- /dev/null +++ b/.claude/skills/docx/ooxml/scripts/validation/pptx.py @@ -0,0 +1,315 @@ +""" +Validator for PowerPoint presentation XML files against XSD schemas. +""" + +import re + +from .base import BaseSchemaValidator + + +class PPTXSchemaValidator(BaseSchemaValidator): + """Validator for PowerPoint presentation XML files against XSD schemas.""" + + # PowerPoint presentation namespace + PRESENTATIONML_NAMESPACE = ( + "http://schemas.openxmlformats.org/presentationml/2006/main" + ) + + # PowerPoint-specific element to relationship type mappings + ELEMENT_RELATIONSHIP_TYPES = { + "sldid": "slide", + "sldmasterid": "slidemaster", + "notesmasterid": "notesmaster", + "sldlayoutid": "slidelayout", + "themeid": "theme", + "tablestyleid": "tablestyles", + } + + def validate(self): + """Run all validation checks and return True if all pass.""" + # Test 0: XML well-formedness + if not self.validate_xml(): + return False + + # Test 1: Namespace declarations + all_valid = True + if not self.validate_namespaces(): + all_valid = False + + # Test 2: Unique IDs + if not self.validate_unique_ids(): + all_valid = False + + # Test 3: UUID ID validation + if not self.validate_uuid_ids(): + all_valid = False + + # Test 4: Relationship and file reference validation + if not self.validate_file_references(): + all_valid = False + + # Test 5: Slide layout ID validation + if not self.validate_slide_layout_ids(): + all_valid = False + + # Test 6: Content type declarations + if not self.validate_content_types(): + all_valid = False + + # Test 7: XSD schema validation + if not self.validate_against_xsd(): + all_valid = False + + # Test 8: Notes slide reference validation + if not self.validate_notes_slide_references(): + all_valid = False + + # Test 9: Relationship ID reference validation + if not self.validate_all_relationship_ids(): + all_valid = False + + # Test 10: Duplicate slide layout references validation + if not self.validate_no_duplicate_slide_layouts(): + all_valid = False + + return all_valid + + def validate_uuid_ids(self): + """Validate that ID attributes that look like UUIDs contain only hex values.""" + import lxml.etree + + errors = [] + # UUID pattern: 8-4-4-4-12 hex digits with optional braces/hyphens + uuid_pattern = re.compile( + r"^[\{\(]?[0-9A-Fa-f]{8}-?[0-9A-Fa-f]{4}-?[0-9A-Fa-f]{4}-?[0-9A-Fa-f]{4}-?[0-9A-Fa-f]{12}[\}\)]?$" + ) + + for xml_file in self.xml_files: + try: + root = lxml.etree.parse(str(xml_file)).getroot() + + # Check all elements for ID attributes + for elem in root.iter(): + for attr, value in elem.attrib.items(): + # Check if this is an ID attribute + attr_name = attr.split("}")[-1].lower() + if attr_name == "id" or attr_name.endswith("id"): + # Check if value looks like a UUID (has the right length and pattern structure) + if self._looks_like_uuid(value): + # Validate that it contains only hex characters in the right positions + if not uuid_pattern.match(value): + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {elem.sourceline}: ID '{value}' appears to be a UUID but contains invalid hex characters" + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} UUID ID validation errors:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - All UUID-like IDs contain valid hex values") + return True + + def _looks_like_uuid(self, value): + """Check if a value has the general structure of a UUID.""" + # Remove common UUID delimiters + clean_value = value.strip("{}()").replace("-", "") + # Check if it's 32 hex-like characters (could include invalid hex chars) + return len(clean_value) == 32 and all(c.isalnum() for c in clean_value) + + def validate_slide_layout_ids(self): + """Validate that sldLayoutId elements in slide masters reference valid slide layouts.""" + import lxml.etree + + errors = [] + + # Find all slide master files + slide_masters = list(self.unpacked_dir.glob("ppt/slideMasters/*.xml")) + + if not slide_masters: + if self.verbose: + print("PASSED - No slide masters found") + return True + + for slide_master in slide_masters: + try: + # Parse the slide master file + root = lxml.etree.parse(str(slide_master)).getroot() + + # Find the corresponding _rels file for this slide master + rels_file = slide_master.parent / "_rels" / f"{slide_master.name}.rels" + + if not rels_file.exists(): + errors.append( + f" {slide_master.relative_to(self.unpacked_dir)}: " + f"Missing relationships file: {rels_file.relative_to(self.unpacked_dir)}" + ) + continue + + # Parse the relationships file + rels_root = lxml.etree.parse(str(rels_file)).getroot() + + # Build a set of valid relationship IDs that point to slide layouts + valid_layout_rids = set() + for rel in rels_root.findall( + f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship" + ): + rel_type = rel.get("Type", "") + if "slideLayout" in rel_type: + valid_layout_rids.add(rel.get("Id")) + + # Find all sldLayoutId elements in the slide master + for sld_layout_id in root.findall( + f".//{{{self.PRESENTATIONML_NAMESPACE}}}sldLayoutId" + ): + r_id = sld_layout_id.get( + f"{{{self.OFFICE_RELATIONSHIPS_NAMESPACE}}}id" + ) + layout_id = sld_layout_id.get("id") + + if r_id and r_id not in valid_layout_rids: + errors.append( + f" {slide_master.relative_to(self.unpacked_dir)}: " + f"Line {sld_layout_id.sourceline}: sldLayoutId with id='{layout_id}' " + f"references r:id='{r_id}' which is not found in slide layout relationships" + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {slide_master.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} slide layout ID validation errors:") + for error in errors: + print(error) + print( + "Remove invalid references or add missing slide layouts to the relationships file." + ) + return False + else: + if self.verbose: + print("PASSED - All slide layout IDs reference valid slide layouts") + return True + + def validate_no_duplicate_slide_layouts(self): + """Validate that each slide has exactly one slideLayout reference.""" + import lxml.etree + + errors = [] + slide_rels_files = list(self.unpacked_dir.glob("ppt/slides/_rels/*.xml.rels")) + + for rels_file in slide_rels_files: + try: + root = lxml.etree.parse(str(rels_file)).getroot() + + # Find all slideLayout relationships + layout_rels = [ + rel + for rel in root.findall( + f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship" + ) + if "slideLayout" in rel.get("Type", "") + ] + + if len(layout_rels) > 1: + errors.append( + f" {rels_file.relative_to(self.unpacked_dir)}: has {len(layout_rels)} slideLayout references" + ) + + except Exception as e: + errors.append( + f" {rels_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print("FAILED - Found slides with duplicate slideLayout references:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - All slides have exactly one slideLayout reference") + return True + + def validate_notes_slide_references(self): + """Validate that each notesSlide file is referenced by only one slide.""" + import lxml.etree + + errors = [] + notes_slide_references = {} # Track which slides reference each notesSlide + + # Find all slide relationship files + slide_rels_files = list(self.unpacked_dir.glob("ppt/slides/_rels/*.xml.rels")) + + if not slide_rels_files: + if self.verbose: + print("PASSED - No slide relationship files found") + return True + + for rels_file in slide_rels_files: + try: + # Parse the relationships file + root = lxml.etree.parse(str(rels_file)).getroot() + + # Find all notesSlide relationships + for rel in root.findall( + f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship" + ): + rel_type = rel.get("Type", "") + if "notesSlide" in rel_type: + target = rel.get("Target", "") + if target: + # Normalize the target path to handle relative paths + normalized_target = target.replace("../", "") + + # Track which slide references this notesSlide + slide_name = rels_file.stem.replace( + ".xml", "" + ) # e.g., "slide1" + + if normalized_target not in notes_slide_references: + notes_slide_references[normalized_target] = [] + notes_slide_references[normalized_target].append( + (slide_name, rels_file) + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {rels_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + # Check for duplicate references + for target, references in notes_slide_references.items(): + if len(references) > 1: + slide_names = [ref[0] for ref in references] + errors.append( + f" Notes slide '{target}' is referenced by multiple slides: {', '.join(slide_names)}" + ) + for slide_name, rels_file in references: + errors.append(f" - {rels_file.relative_to(self.unpacked_dir)}") + + if errors: + print( + f"FAILED - Found {len([e for e in errors if not e.startswith(' ')])} notes slide reference validation errors:" + ) + for error in errors: + print(error) + print("Each slide may optionally have its own slide file.") + return False + else: + if self.verbose: + print("PASSED - All notes slide references are unique") + return True + + +if __name__ == "__main__": + raise RuntimeError("This module should not be run directly.") diff --git a/.claude/skills/docx/ooxml/scripts/validation/redlining.py b/.claude/skills/docx/ooxml/scripts/validation/redlining.py new file mode 100644 index 0000000..7ed425e --- /dev/null +++ b/.claude/skills/docx/ooxml/scripts/validation/redlining.py @@ -0,0 +1,279 @@ +""" +Validator for tracked changes in Word documents. +""" + +import subprocess +import tempfile +import zipfile +from pathlib import Path + + +class RedliningValidator: + """Validator for tracked changes in Word documents.""" + + def __init__(self, unpacked_dir, original_docx, verbose=False): + self.unpacked_dir = Path(unpacked_dir) + self.original_docx = Path(original_docx) + self.verbose = verbose + self.namespaces = { + "w": "http://schemas.openxmlformats.org/wordprocessingml/2006/main" + } + + def validate(self): + """Main validation method that returns True if valid, False otherwise.""" + # Verify unpacked directory exists and has correct structure + modified_file = self.unpacked_dir / "word" / "document.xml" + if not modified_file.exists(): + print(f"FAILED - Modified document.xml not found at {modified_file}") + return False + + # First, check if there are any tracked changes by Claude to validate + try: + import xml.etree.ElementTree as ET + + tree = ET.parse(modified_file) + root = tree.getroot() + + # Check for w:del or w:ins tags authored by Claude + del_elements = root.findall(".//w:del", self.namespaces) + ins_elements = root.findall(".//w:ins", self.namespaces) + + # Filter to only include changes by Claude + claude_del_elements = [ + elem + for elem in del_elements + if elem.get(f"{{{self.namespaces['w']}}}author") == "Claude" + ] + claude_ins_elements = [ + elem + for elem in ins_elements + if elem.get(f"{{{self.namespaces['w']}}}author") == "Claude" + ] + + # Redlining validation is only needed if tracked changes by Claude have been used. + if not claude_del_elements and not claude_ins_elements: + if self.verbose: + print("PASSED - No tracked changes by Claude found.") + return True + + except Exception: + # If we can't parse the XML, continue with full validation + pass + + # Create temporary directory for unpacking original docx + with tempfile.TemporaryDirectory() as temp_dir: + temp_path = Path(temp_dir) + + # Unpack original docx + try: + with zipfile.ZipFile(self.original_docx, "r") as zip_ref: + zip_ref.extractall(temp_path) + except Exception as e: + print(f"FAILED - Error unpacking original docx: {e}") + return False + + original_file = temp_path / "word" / "document.xml" + if not original_file.exists(): + print( + f"FAILED - Original document.xml not found in {self.original_docx}" + ) + return False + + # Parse both XML files using xml.etree.ElementTree for redlining validation + try: + import xml.etree.ElementTree as ET + + modified_tree = ET.parse(modified_file) + modified_root = modified_tree.getroot() + original_tree = ET.parse(original_file) + original_root = original_tree.getroot() + except ET.ParseError as e: + print(f"FAILED - Error parsing XML files: {e}") + return False + + # Remove Claude's tracked changes from both documents + self._remove_claude_tracked_changes(original_root) + self._remove_claude_tracked_changes(modified_root) + + # Extract and compare text content + modified_text = self._extract_text_content(modified_root) + original_text = self._extract_text_content(original_root) + + if modified_text != original_text: + # Show detailed character-level differences for each paragraph + error_message = self._generate_detailed_diff( + original_text, modified_text + ) + print(error_message) + return False + + if self.verbose: + print("PASSED - All changes by Claude are properly tracked") + return True + + def _generate_detailed_diff(self, original_text, modified_text): + """Generate detailed word-level differences using git word diff.""" + error_parts = [ + "FAILED - Document text doesn't match after removing Claude's tracked changes", + "", + "Likely causes:", + " 1. Modified text inside another author's or tags", + " 2. Made edits without proper tracked changes", + " 3. Didn't nest inside when deleting another's insertion", + "", + "For pre-redlined documents, use correct patterns:", + " - To reject another's INSERTION: Nest inside their ", + " - To restore another's DELETION: Add new AFTER their ", + "", + ] + + # Show git word diff + git_diff = self._get_git_word_diff(original_text, modified_text) + if git_diff: + error_parts.extend(["Differences:", "============", git_diff]) + else: + error_parts.append("Unable to generate word diff (git not available)") + + return "\n".join(error_parts) + + def _get_git_word_diff(self, original_text, modified_text): + """Generate word diff using git with character-level precision.""" + try: + with tempfile.TemporaryDirectory() as temp_dir: + temp_path = Path(temp_dir) + + # Create two files + original_file = temp_path / "original.txt" + modified_file = temp_path / "modified.txt" + + original_file.write_text(original_text, encoding="utf-8") + modified_file.write_text(modified_text, encoding="utf-8") + + # Try character-level diff first for precise differences + result = subprocess.run( + [ + "git", + "diff", + "--word-diff=plain", + "--word-diff-regex=.", # Character-by-character diff + "-U0", # Zero lines of context - show only changed lines + "--no-index", + str(original_file), + str(modified_file), + ], + capture_output=True, + text=True, + ) + + if result.stdout.strip(): + # Clean up the output - remove git diff header lines + lines = result.stdout.split("\n") + # Skip the header lines (diff --git, index, +++, ---, @@) + content_lines = [] + in_content = False + for line in lines: + if line.startswith("@@"): + in_content = True + continue + if in_content and line.strip(): + content_lines.append(line) + + if content_lines: + return "\n".join(content_lines) + + # Fallback to word-level diff if character-level is too verbose + result = subprocess.run( + [ + "git", + "diff", + "--word-diff=plain", + "-U0", # Zero lines of context + "--no-index", + str(original_file), + str(modified_file), + ], + capture_output=True, + text=True, + ) + + if result.stdout.strip(): + lines = result.stdout.split("\n") + content_lines = [] + in_content = False + for line in lines: + if line.startswith("@@"): + in_content = True + continue + if in_content and line.strip(): + content_lines.append(line) + return "\n".join(content_lines) + + except (subprocess.CalledProcessError, FileNotFoundError, Exception): + # Git not available or other error, return None to use fallback + pass + + return None + + def _remove_claude_tracked_changes(self, root): + """Remove tracked changes authored by Claude from the XML root.""" + ins_tag = f"{{{self.namespaces['w']}}}ins" + del_tag = f"{{{self.namespaces['w']}}}del" + author_attr = f"{{{self.namespaces['w']}}}author" + + # Remove w:ins elements + for parent in root.iter(): + to_remove = [] + for child in parent: + if child.tag == ins_tag and child.get(author_attr) == "Claude": + to_remove.append(child) + for elem in to_remove: + parent.remove(elem) + + # Unwrap content in w:del elements where author is "Claude" + deltext_tag = f"{{{self.namespaces['w']}}}delText" + t_tag = f"{{{self.namespaces['w']}}}t" + + for parent in root.iter(): + to_process = [] + for child in parent: + if child.tag == del_tag and child.get(author_attr) == "Claude": + to_process.append((child, list(parent).index(child))) + + # Process in reverse order to maintain indices + for del_elem, del_index in reversed(to_process): + # Convert w:delText to w:t before moving + for elem in del_elem.iter(): + if elem.tag == deltext_tag: + elem.tag = t_tag + + # Move all children of w:del to its parent before removing w:del + for child in reversed(list(del_elem)): + parent.insert(del_index, child) + parent.remove(del_elem) + + def _extract_text_content(self, root): + """Extract text content from Word XML, preserving paragraph structure. + + Empty paragraphs are skipped to avoid false positives when tracked + insertions add only structural elements without text content. + """ + p_tag = f"{{{self.namespaces['w']}}}p" + t_tag = f"{{{self.namespaces['w']}}}t" + + paragraphs = [] + for p_elem in root.findall(f".//{p_tag}"): + # Get all text elements within this paragraph + text_parts = [] + for t_elem in p_elem.findall(f".//{t_tag}"): + if t_elem.text: + text_parts.append(t_elem.text) + paragraph_text = "".join(text_parts) + # Skip empty paragraphs - they don't affect content validation + if paragraph_text: + paragraphs.append(paragraph_text) + + return "\n".join(paragraphs) + + +if __name__ == "__main__": + raise RuntimeError("This module should not be run directly.") diff --git a/.claude/skills/docx/scripts/__init__.py b/.claude/skills/docx/scripts/__init__.py new file mode 100644 index 0000000..bf9c562 --- /dev/null +++ b/.claude/skills/docx/scripts/__init__.py @@ -0,0 +1 @@ +# Make scripts directory a package for relative imports in tests diff --git a/.claude/skills/docx/scripts/document.py b/.claude/skills/docx/scripts/document.py new file mode 100644 index 0000000..ae9328d --- /dev/null +++ b/.claude/skills/docx/scripts/document.py @@ -0,0 +1,1276 @@ +#!/usr/bin/env python3 +""" +Library for working with Word documents: comments, tracked changes, and editing. + +Usage: + from skills.docx.scripts.document import Document + + # Initialize + doc = Document('workspace/unpacked') + doc = Document('workspace/unpacked', author="John Doe", initials="JD") + + # Find nodes + node = doc["word/document.xml"].get_node(tag="w:del", attrs={"w:id": "1"}) + node = doc["word/document.xml"].get_node(tag="w:p", line_number=10) + + # Add comments + doc.add_comment(start=node, end=node, text="Comment text") + doc.reply_to_comment(parent_comment_id=0, text="Reply text") + + # Suggest tracked changes + doc["word/document.xml"].suggest_deletion(node) # Delete content + doc["word/document.xml"].revert_insertion(ins_node) # Reject insertion + doc["word/document.xml"].revert_deletion(del_node) # Reject deletion + + # Save + doc.save() +""" + +import html +import random +import shutil +import tempfile +from datetime import datetime, timezone +from pathlib import Path + +from defusedxml import minidom +from ooxml.scripts.pack import pack_document +from ooxml.scripts.validation.docx import DOCXSchemaValidator +from ooxml.scripts.validation.redlining import RedliningValidator + +from .utilities import XMLEditor + +# Path to template files +TEMPLATE_DIR = Path(__file__).parent / "templates" + + +class DocxXMLEditor(XMLEditor): + """XMLEditor that automatically applies RSID, author, and date to new elements. + + Automatically adds attributes to elements that support them when inserting new content: + - w:rsidR, w:rsidRDefault, w:rsidP (for w:p and w:r elements) + - w:author and w:date (for w:ins, w:del, w:comment elements) + - w:id (for w:ins and w:del elements) + + Attributes: + dom (defusedxml.minidom.Document): The DOM document for direct manipulation + """ + + def __init__( + self, xml_path, rsid: str, author: str = "Claude", initials: str = "C" + ): + """Initialize with required RSID and optional author. + + Args: + xml_path: Path to XML file to edit + rsid: RSID to automatically apply to new elements + author: Author name for tracked changes and comments (default: "Claude") + initials: Author initials (default: "C") + """ + super().__init__(xml_path) + self.rsid = rsid + self.author = author + self.initials = initials + + def _get_next_change_id(self): + """Get the next available change ID by checking all tracked change elements.""" + max_id = -1 + for tag in ("w:ins", "w:del"): + elements = self.dom.getElementsByTagName(tag) + for elem in elements: + change_id = elem.getAttribute("w:id") + if change_id: + try: + max_id = max(max_id, int(change_id)) + except ValueError: + pass + return max_id + 1 + + def _ensure_w16du_namespace(self): + """Ensure w16du namespace is declared on the root element.""" + root = self.dom.documentElement + if not root.hasAttribute("xmlns:w16du"): # type: ignore + root.setAttribute( # type: ignore + "xmlns:w16du", + "http://schemas.microsoft.com/office/word/2023/wordml/word16du", + ) + + def _ensure_w16cex_namespace(self): + """Ensure w16cex namespace is declared on the root element.""" + root = self.dom.documentElement + if not root.hasAttribute("xmlns:w16cex"): # type: ignore + root.setAttribute( # type: ignore + "xmlns:w16cex", + "http://schemas.microsoft.com/office/word/2018/wordml/cex", + ) + + def _ensure_w14_namespace(self): + """Ensure w14 namespace is declared on the root element.""" + root = self.dom.documentElement + if not root.hasAttribute("xmlns:w14"): # type: ignore + root.setAttribute( # type: ignore + "xmlns:w14", + "http://schemas.microsoft.com/office/word/2010/wordml", + ) + + def _inject_attributes_to_nodes(self, nodes): + """Inject RSID, author, and date attributes into DOM nodes where applicable. + + Adds attributes to elements that support them: + - w:r: gets w:rsidR (or w:rsidDel if inside w:del) + - w:p: gets w:rsidR, w:rsidRDefault, w:rsidP, w14:paraId, w14:textId + - w:t: gets xml:space="preserve" if text has leading/trailing whitespace + - w:ins, w:del: get w:id, w:author, w:date, w16du:dateUtc + - w:comment: gets w:author, w:date, w:initials + - w16cex:commentExtensible: gets w16cex:dateUtc + + Args: + nodes: List of DOM nodes to process + """ + from datetime import datetime, timezone + + timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") + + def is_inside_deletion(elem): + """Check if element is inside a w:del element.""" + parent = elem.parentNode + while parent: + if parent.nodeType == parent.ELEMENT_NODE and parent.tagName == "w:del": + return True + parent = parent.parentNode + return False + + def add_rsid_to_p(elem): + if not elem.hasAttribute("w:rsidR"): + elem.setAttribute("w:rsidR", self.rsid) + if not elem.hasAttribute("w:rsidRDefault"): + elem.setAttribute("w:rsidRDefault", self.rsid) + if not elem.hasAttribute("w:rsidP"): + elem.setAttribute("w:rsidP", self.rsid) + # Add w14:paraId and w14:textId if not present + if not elem.hasAttribute("w14:paraId"): + self._ensure_w14_namespace() + elem.setAttribute("w14:paraId", _generate_hex_id()) + if not elem.hasAttribute("w14:textId"): + self._ensure_w14_namespace() + elem.setAttribute("w14:textId", _generate_hex_id()) + + def add_rsid_to_r(elem): + # Use w:rsidDel for inside , otherwise w:rsidR + if is_inside_deletion(elem): + if not elem.hasAttribute("w:rsidDel"): + elem.setAttribute("w:rsidDel", self.rsid) + else: + if not elem.hasAttribute("w:rsidR"): + elem.setAttribute("w:rsidR", self.rsid) + + def add_tracked_change_attrs(elem): + # Auto-assign w:id if not present + if not elem.hasAttribute("w:id"): + elem.setAttribute("w:id", str(self._get_next_change_id())) + if not elem.hasAttribute("w:author"): + elem.setAttribute("w:author", self.author) + if not elem.hasAttribute("w:date"): + elem.setAttribute("w:date", timestamp) + # Add w16du:dateUtc for tracked changes (same as w:date since we generate UTC timestamps) + if elem.tagName in ("w:ins", "w:del") and not elem.hasAttribute( + "w16du:dateUtc" + ): + self._ensure_w16du_namespace() + elem.setAttribute("w16du:dateUtc", timestamp) + + def add_comment_attrs(elem): + if not elem.hasAttribute("w:author"): + elem.setAttribute("w:author", self.author) + if not elem.hasAttribute("w:date"): + elem.setAttribute("w:date", timestamp) + if not elem.hasAttribute("w:initials"): + elem.setAttribute("w:initials", self.initials) + + def add_comment_extensible_date(elem): + # Add w16cex:dateUtc for comment extensible elements + if not elem.hasAttribute("w16cex:dateUtc"): + self._ensure_w16cex_namespace() + elem.setAttribute("w16cex:dateUtc", timestamp) + + def add_xml_space_to_t(elem): + # Add xml:space="preserve" to w:t if text has leading/trailing whitespace + if ( + elem.firstChild + and elem.firstChild.nodeType == elem.firstChild.TEXT_NODE + ): + text = elem.firstChild.data + if text and (text[0].isspace() or text[-1].isspace()): + if not elem.hasAttribute("xml:space"): + elem.setAttribute("xml:space", "preserve") + + for node in nodes: + if node.nodeType != node.ELEMENT_NODE: + continue + + # Handle the node itself + if node.tagName == "w:p": + add_rsid_to_p(node) + elif node.tagName == "w:r": + add_rsid_to_r(node) + elif node.tagName == "w:t": + add_xml_space_to_t(node) + elif node.tagName in ("w:ins", "w:del"): + add_tracked_change_attrs(node) + elif node.tagName == "w:comment": + add_comment_attrs(node) + elif node.tagName == "w16cex:commentExtensible": + add_comment_extensible_date(node) + + # Process descendants (getElementsByTagName doesn't return the element itself) + for elem in node.getElementsByTagName("w:p"): + add_rsid_to_p(elem) + for elem in node.getElementsByTagName("w:r"): + add_rsid_to_r(elem) + for elem in node.getElementsByTagName("w:t"): + add_xml_space_to_t(elem) + for tag in ("w:ins", "w:del"): + for elem in node.getElementsByTagName(tag): + add_tracked_change_attrs(elem) + for elem in node.getElementsByTagName("w:comment"): + add_comment_attrs(elem) + for elem in node.getElementsByTagName("w16cex:commentExtensible"): + add_comment_extensible_date(elem) + + def replace_node(self, elem, new_content): + """Replace node with automatic attribute injection.""" + nodes = super().replace_node(elem, new_content) + self._inject_attributes_to_nodes(nodes) + return nodes + + def insert_after(self, elem, xml_content): + """Insert after with automatic attribute injection.""" + nodes = super().insert_after(elem, xml_content) + self._inject_attributes_to_nodes(nodes) + return nodes + + def insert_before(self, elem, xml_content): + """Insert before with automatic attribute injection.""" + nodes = super().insert_before(elem, xml_content) + self._inject_attributes_to_nodes(nodes) + return nodes + + def append_to(self, elem, xml_content): + """Append to with automatic attribute injection.""" + nodes = super().append_to(elem, xml_content) + self._inject_attributes_to_nodes(nodes) + return nodes + + def revert_insertion(self, elem): + """Reject an insertion by wrapping its content in a deletion. + + Wraps all runs inside w:ins in w:del, converting w:t to w:delText. + Can process a single w:ins element or a container element with multiple w:ins. + + Args: + elem: Element to process (w:ins, w:p, w:body, etc.) + + Returns: + list: List containing the processed element(s) + + Raises: + ValueError: If the element contains no w:ins elements + + Example: + # Reject a single insertion + ins = doc["word/document.xml"].get_node(tag="w:ins", attrs={"w:id": "5"}) + doc["word/document.xml"].revert_insertion(ins) + + # Reject all insertions in a paragraph + para = doc["word/document.xml"].get_node(tag="w:p", line_number=42) + doc["word/document.xml"].revert_insertion(para) + """ + # Collect insertions + ins_elements = [] + if elem.tagName == "w:ins": + ins_elements.append(elem) + else: + ins_elements.extend(elem.getElementsByTagName("w:ins")) + + # Validate that there are insertions to reject + if not ins_elements: + raise ValueError( + f"revert_insertion requires w:ins elements. " + f"The provided element <{elem.tagName}> contains no insertions. " + ) + + # Process all insertions - wrap all children in w:del + for ins_elem in ins_elements: + runs = list(ins_elem.getElementsByTagName("w:r")) + if not runs: + continue + + # Create deletion wrapper + del_wrapper = self.dom.createElement("w:del") + + # Process each run + for run in runs: + # Convert w:t → w:delText and w:rsidR → w:rsidDel + if run.hasAttribute("w:rsidR"): + run.setAttribute("w:rsidDel", run.getAttribute("w:rsidR")) + run.removeAttribute("w:rsidR") + elif not run.hasAttribute("w:rsidDel"): + run.setAttribute("w:rsidDel", self.rsid) + + for t_elem in list(run.getElementsByTagName("w:t")): + del_text = self.dom.createElement("w:delText") + # Copy ALL child nodes (not just firstChild) to handle entities + while t_elem.firstChild: + del_text.appendChild(t_elem.firstChild) + for i in range(t_elem.attributes.length): + attr = t_elem.attributes.item(i) + del_text.setAttribute(attr.name, attr.value) + t_elem.parentNode.replaceChild(del_text, t_elem) + + # Move all children from ins to del wrapper + while ins_elem.firstChild: + del_wrapper.appendChild(ins_elem.firstChild) + + # Add del wrapper back to ins + ins_elem.appendChild(del_wrapper) + + # Inject attributes to the deletion wrapper + self._inject_attributes_to_nodes([del_wrapper]) + + return [elem] + + def revert_deletion(self, elem): + """Reject a deletion by re-inserting the deleted content. + + Creates w:ins elements after each w:del, copying deleted content and + converting w:delText back to w:t. + Can process a single w:del element or a container element with multiple w:del. + + Args: + elem: Element to process (w:del, w:p, w:body, etc.) + + Returns: + list: If elem is w:del, returns [elem, new_ins]. Otherwise returns [elem]. + + Raises: + ValueError: If the element contains no w:del elements + + Example: + # Reject a single deletion - returns [w:del, w:ins] + del_elem = doc["word/document.xml"].get_node(tag="w:del", attrs={"w:id": "3"}) + nodes = doc["word/document.xml"].revert_deletion(del_elem) + + # Reject all deletions in a paragraph - returns [para] + para = doc["word/document.xml"].get_node(tag="w:p", line_number=42) + nodes = doc["word/document.xml"].revert_deletion(para) + """ + # Collect deletions FIRST - before we modify the DOM + del_elements = [] + is_single_del = elem.tagName == "w:del" + + if is_single_del: + del_elements.append(elem) + else: + del_elements.extend(elem.getElementsByTagName("w:del")) + + # Validate that there are deletions to reject + if not del_elements: + raise ValueError( + f"revert_deletion requires w:del elements. " + f"The provided element <{elem.tagName}> contains no deletions. " + ) + + # Track created insertion (only relevant if elem is a single w:del) + created_insertion = None + + # Process all deletions - create insertions that copy the deleted content + for del_elem in del_elements: + # Clone the deleted runs and convert them to insertions + runs = list(del_elem.getElementsByTagName("w:r")) + if not runs: + continue + + # Create insertion wrapper + ins_elem = self.dom.createElement("w:ins") + + for run in runs: + # Clone the run + new_run = run.cloneNode(True) + + # Convert w:delText → w:t + for del_text in list(new_run.getElementsByTagName("w:delText")): + t_elem = self.dom.createElement("w:t") + # Copy ALL child nodes (not just firstChild) to handle entities + while del_text.firstChild: + t_elem.appendChild(del_text.firstChild) + for i in range(del_text.attributes.length): + attr = del_text.attributes.item(i) + t_elem.setAttribute(attr.name, attr.value) + del_text.parentNode.replaceChild(t_elem, del_text) + + # Update run attributes: w:rsidDel → w:rsidR + if new_run.hasAttribute("w:rsidDel"): + new_run.setAttribute("w:rsidR", new_run.getAttribute("w:rsidDel")) + new_run.removeAttribute("w:rsidDel") + elif not new_run.hasAttribute("w:rsidR"): + new_run.setAttribute("w:rsidR", self.rsid) + + ins_elem.appendChild(new_run) + + # Insert the new insertion after the deletion + nodes = self.insert_after(del_elem, ins_elem.toxml()) + + # If processing a single w:del, track the created insertion + if is_single_del and nodes: + created_insertion = nodes[0] + + # Return based on input type + if is_single_del and created_insertion: + return [elem, created_insertion] + else: + return [elem] + + @staticmethod + def suggest_paragraph(xml_content: str) -> str: + """Transform paragraph XML to add tracked change wrapping for insertion. + + Wraps runs in and adds to w:rPr in w:pPr for numbered lists. + + Args: + xml_content: XML string containing a element + + Returns: + str: Transformed XML with tracked change wrapping + """ + wrapper = f'{xml_content}' + doc = minidom.parseString(wrapper) + para = doc.getElementsByTagName("w:p")[0] + + # Ensure w:pPr exists + pPr_list = para.getElementsByTagName("w:pPr") + if not pPr_list: + pPr = doc.createElement("w:pPr") + para.insertBefore( + pPr, para.firstChild + ) if para.firstChild else para.appendChild(pPr) + else: + pPr = pPr_list[0] + + # Ensure w:rPr exists in w:pPr + rPr_list = pPr.getElementsByTagName("w:rPr") + if not rPr_list: + rPr = doc.createElement("w:rPr") + pPr.appendChild(rPr) + else: + rPr = rPr_list[0] + + # Add to w:rPr + ins_marker = doc.createElement("w:ins") + rPr.insertBefore( + ins_marker, rPr.firstChild + ) if rPr.firstChild else rPr.appendChild(ins_marker) + + # Wrap all non-pPr children in + ins_wrapper = doc.createElement("w:ins") + for child in [c for c in para.childNodes if c.nodeName != "w:pPr"]: + para.removeChild(child) + ins_wrapper.appendChild(child) + para.appendChild(ins_wrapper) + + return para.toxml() + + def suggest_deletion(self, elem): + """Mark a w:r or w:p element as deleted with tracked changes (in-place DOM manipulation). + + For w:r: wraps in , converts to , preserves w:rPr + For w:p (regular): wraps content in , converts to + For w:p (numbered list): adds to w:rPr in w:pPr, wraps content in + + Args: + elem: A w:r or w:p DOM element without existing tracked changes + + Returns: + Element: The modified element + + Raises: + ValueError: If element has existing tracked changes or invalid structure + """ + if elem.nodeName == "w:r": + # Check for existing w:delText + if elem.getElementsByTagName("w:delText"): + raise ValueError("w:r element already contains w:delText") + + # Convert w:t → w:delText + for t_elem in list(elem.getElementsByTagName("w:t")): + del_text = self.dom.createElement("w:delText") + # Copy ALL child nodes (not just firstChild) to handle entities + while t_elem.firstChild: + del_text.appendChild(t_elem.firstChild) + # Preserve attributes like xml:space + for i in range(t_elem.attributes.length): + attr = t_elem.attributes.item(i) + del_text.setAttribute(attr.name, attr.value) + t_elem.parentNode.replaceChild(del_text, t_elem) + + # Update run attributes: w:rsidR → w:rsidDel + if elem.hasAttribute("w:rsidR"): + elem.setAttribute("w:rsidDel", elem.getAttribute("w:rsidR")) + elem.removeAttribute("w:rsidR") + elif not elem.hasAttribute("w:rsidDel"): + elem.setAttribute("w:rsidDel", self.rsid) + + # Wrap in w:del + del_wrapper = self.dom.createElement("w:del") + parent = elem.parentNode + parent.insertBefore(del_wrapper, elem) + parent.removeChild(elem) + del_wrapper.appendChild(elem) + + # Inject attributes to the deletion wrapper + self._inject_attributes_to_nodes([del_wrapper]) + + return del_wrapper + + elif elem.nodeName == "w:p": + # Check for existing tracked changes + if elem.getElementsByTagName("w:ins") or elem.getElementsByTagName("w:del"): + raise ValueError("w:p element already contains tracked changes") + + # Check if it's a numbered list item + pPr_list = elem.getElementsByTagName("w:pPr") + is_numbered = pPr_list and pPr_list[0].getElementsByTagName("w:numPr") + + if is_numbered: + # Add to w:rPr in w:pPr + pPr = pPr_list[0] + rPr_list = pPr.getElementsByTagName("w:rPr") + + if not rPr_list: + rPr = self.dom.createElement("w:rPr") + pPr.appendChild(rPr) + else: + rPr = rPr_list[0] + + # Add marker + del_marker = self.dom.createElement("w:del") + rPr.insertBefore( + del_marker, rPr.firstChild + ) if rPr.firstChild else rPr.appendChild(del_marker) + + # Convert w:t → w:delText in all runs + for t_elem in list(elem.getElementsByTagName("w:t")): + del_text = self.dom.createElement("w:delText") + # Copy ALL child nodes (not just firstChild) to handle entities + while t_elem.firstChild: + del_text.appendChild(t_elem.firstChild) + # Preserve attributes like xml:space + for i in range(t_elem.attributes.length): + attr = t_elem.attributes.item(i) + del_text.setAttribute(attr.name, attr.value) + t_elem.parentNode.replaceChild(del_text, t_elem) + + # Update run attributes: w:rsidR → w:rsidDel + for run in elem.getElementsByTagName("w:r"): + if run.hasAttribute("w:rsidR"): + run.setAttribute("w:rsidDel", run.getAttribute("w:rsidR")) + run.removeAttribute("w:rsidR") + elif not run.hasAttribute("w:rsidDel"): + run.setAttribute("w:rsidDel", self.rsid) + + # Wrap all non-pPr children in + del_wrapper = self.dom.createElement("w:del") + for child in [c for c in elem.childNodes if c.nodeName != "w:pPr"]: + elem.removeChild(child) + del_wrapper.appendChild(child) + elem.appendChild(del_wrapper) + + # Inject attributes to the deletion wrapper + self._inject_attributes_to_nodes([del_wrapper]) + + return elem + + else: + raise ValueError(f"Element must be w:r or w:p, got {elem.nodeName}") + + +def _generate_hex_id() -> str: + """Generate random 8-character hex ID for para/durable IDs. + + Values are constrained to be less than 0x7FFFFFFF per OOXML spec: + - paraId must be < 0x80000000 + - durableId must be < 0x7FFFFFFF + We use the stricter constraint (0x7FFFFFFF) for both. + """ + return f"{random.randint(1, 0x7FFFFFFE):08X}" + + +def _generate_rsid() -> str: + """Generate random 8-character hex RSID.""" + return "".join(random.choices("0123456789ABCDEF", k=8)) + + +class Document: + """Manages comments in unpacked Word documents.""" + + def __init__( + self, + unpacked_dir, + rsid=None, + track_revisions=False, + author="Claude", + initials="C", + ): + """ + Initialize with path to unpacked Word document directory. + Automatically sets up comment infrastructure (people.xml, RSIDs). + + Args: + unpacked_dir: Path to unpacked DOCX directory (must contain word/ subdirectory) + rsid: Optional RSID to use for all comment elements. If not provided, one will be generated. + track_revisions: If True, enables track revisions in settings.xml (default: False) + author: Default author name for comments (default: "Claude") + initials: Default author initials for comments (default: "C") + """ + self.original_path = Path(unpacked_dir) + + if not self.original_path.exists() or not self.original_path.is_dir(): + raise ValueError(f"Directory not found: {unpacked_dir}") + + # Create temporary directory with subdirectories for unpacked content and baseline + self.temp_dir = tempfile.mkdtemp(prefix="docx_") + self.unpacked_path = Path(self.temp_dir) / "unpacked" + shutil.copytree(self.original_path, self.unpacked_path) + + # Pack original directory into temporary .docx for validation baseline (outside unpacked dir) + self.original_docx = Path(self.temp_dir) / "original.docx" + pack_document(self.original_path, self.original_docx, validate=False) + + self.word_path = self.unpacked_path / "word" + + # Generate RSID if not provided + self.rsid = rsid if rsid else _generate_rsid() + print(f"Using RSID: {self.rsid}") + + # Set default author and initials + self.author = author + self.initials = initials + + # Cache for lazy-loaded editors + self._editors = {} + + # Comment file paths + self.comments_path = self.word_path / "comments.xml" + self.comments_extended_path = self.word_path / "commentsExtended.xml" + self.comments_ids_path = self.word_path / "commentsIds.xml" + self.comments_extensible_path = self.word_path / "commentsExtensible.xml" + + # Load existing comments and determine next ID (before setup modifies files) + self.existing_comments = self._load_existing_comments() + self.next_comment_id = self._get_next_comment_id() + + # Convenient access to document.xml editor (semi-private) + self._document = self["word/document.xml"] + + # Setup tracked changes infrastructure + self._setup_tracking(track_revisions=track_revisions) + + # Add author to people.xml + self._add_author_to_people(author) + + def __getitem__(self, xml_path: str) -> DocxXMLEditor: + """ + Get or create a DocxXMLEditor for the specified XML file. + + Enables lazy-loaded editors with bracket notation: + node = doc["word/document.xml"].get_node(tag="w:p", line_number=42) + + Args: + xml_path: Relative path to XML file (e.g., "word/document.xml", "word/comments.xml") + + Returns: + DocxXMLEditor instance for the specified file + + Raises: + ValueError: If the file does not exist + + Example: + # Get node from document.xml + node = doc["word/document.xml"].get_node(tag="w:del", attrs={"w:id": "1"}) + + # Get node from comments.xml + comment = doc["word/comments.xml"].get_node(tag="w:comment", attrs={"w:id": "0"}) + """ + if xml_path not in self._editors: + file_path = self.unpacked_path / xml_path + if not file_path.exists(): + raise ValueError(f"XML file not found: {xml_path}") + # Use DocxXMLEditor with RSID, author, and initials for all editors + self._editors[xml_path] = DocxXMLEditor( + file_path, rsid=self.rsid, author=self.author, initials=self.initials + ) + return self._editors[xml_path] + + def add_comment(self, start, end, text: str) -> int: + """ + Add a comment spanning from one element to another. + + Args: + start: DOM element for the starting point + end: DOM element for the ending point + text: Comment content + + Returns: + The comment ID that was created + + Example: + start_node = cm.get_document_node(tag="w:del", id="1") + end_node = cm.get_document_node(tag="w:ins", id="2") + cm.add_comment(start=start_node, end=end_node, text="Explanation") + """ + comment_id = self.next_comment_id + para_id = _generate_hex_id() + durable_id = _generate_hex_id() + timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") + + # Add comment ranges to document.xml immediately + self._document.insert_before(start, self._comment_range_start_xml(comment_id)) + + # If end node is a paragraph, append comment markup inside it + # Otherwise insert after it (for run-level anchors) + if end.tagName == "w:p": + self._document.append_to(end, self._comment_range_end_xml(comment_id)) + else: + self._document.insert_after(end, self._comment_range_end_xml(comment_id)) + + # Add to comments.xml immediately + self._add_to_comments_xml( + comment_id, para_id, text, self.author, self.initials, timestamp + ) + + # Add to commentsExtended.xml immediately + self._add_to_comments_extended_xml(para_id, parent_para_id=None) + + # Add to commentsIds.xml immediately + self._add_to_comments_ids_xml(para_id, durable_id) + + # Add to commentsExtensible.xml immediately + self._add_to_comments_extensible_xml(durable_id) + + # Update existing_comments so replies work + self.existing_comments[comment_id] = {"para_id": para_id} + + self.next_comment_id += 1 + return comment_id + + def reply_to_comment( + self, + parent_comment_id: int, + text: str, + ) -> int: + """ + Add a reply to an existing comment. + + Args: + parent_comment_id: The w:id of the parent comment to reply to + text: Reply text + + Returns: + The comment ID that was created for the reply + + Example: + cm.reply_to_comment(parent_comment_id=0, text="I agree with this change") + """ + if parent_comment_id not in self.existing_comments: + raise ValueError(f"Parent comment with id={parent_comment_id} not found") + + parent_info = self.existing_comments[parent_comment_id] + comment_id = self.next_comment_id + para_id = _generate_hex_id() + durable_id = _generate_hex_id() + timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") + + # Add comment ranges to document.xml immediately + parent_start_elem = self._document.get_node( + tag="w:commentRangeStart", attrs={"w:id": str(parent_comment_id)} + ) + parent_ref_elem = self._document.get_node( + tag="w:commentReference", attrs={"w:id": str(parent_comment_id)} + ) + + self._document.insert_after( + parent_start_elem, self._comment_range_start_xml(comment_id) + ) + parent_ref_run = parent_ref_elem.parentNode + self._document.insert_after( + parent_ref_run, f'' + ) + self._document.insert_after( + parent_ref_run, self._comment_ref_run_xml(comment_id) + ) + + # Add to comments.xml immediately + self._add_to_comments_xml( + comment_id, para_id, text, self.author, self.initials, timestamp + ) + + # Add to commentsExtended.xml immediately (with parent) + self._add_to_comments_extended_xml( + para_id, parent_para_id=parent_info["para_id"] + ) + + # Add to commentsIds.xml immediately + self._add_to_comments_ids_xml(para_id, durable_id) + + # Add to commentsExtensible.xml immediately + self._add_to_comments_extensible_xml(durable_id) + + # Update existing_comments so replies work + self.existing_comments[comment_id] = {"para_id": para_id} + + self.next_comment_id += 1 + return comment_id + + def __del__(self): + """Clean up temporary directory on deletion.""" + if hasattr(self, "temp_dir") and Path(self.temp_dir).exists(): + shutil.rmtree(self.temp_dir) + + def validate(self) -> None: + """ + Validate the document against XSD schema and redlining rules. + + Raises: + ValueError: If validation fails. + """ + # Create validators with current state + schema_validator = DOCXSchemaValidator( + self.unpacked_path, self.original_docx, verbose=False + ) + redlining_validator = RedliningValidator( + self.unpacked_path, self.original_docx, verbose=False + ) + + # Run validations + if not schema_validator.validate(): + raise ValueError("Schema validation failed") + if not redlining_validator.validate(): + raise ValueError("Redlining validation failed") + + def save(self, destination=None, validate=True) -> None: + """ + Save all modified XML files to disk and copy to destination directory. + + This persists all changes made via add_comment() and reply_to_comment(). + + Args: + destination: Optional path to save to. If None, saves back to original directory. + validate: If True, validates document before saving (default: True). + """ + # Only ensure comment relationships and content types if comment files exist + if self.comments_path.exists(): + self._ensure_comment_relationships() + self._ensure_comment_content_types() + + # Save all modified XML files in temp directory + for editor in self._editors.values(): + editor.save() + + # Validate by default + if validate: + self.validate() + + # Copy contents from temp directory to destination (or original directory) + target_path = Path(destination) if destination else self.original_path + shutil.copytree(self.unpacked_path, target_path, dirs_exist_ok=True) + + # ==================== Private: Initialization ==================== + + def _get_next_comment_id(self): + """Get the next available comment ID.""" + if not self.comments_path.exists(): + return 0 + + editor = self["word/comments.xml"] + max_id = -1 + for comment_elem in editor.dom.getElementsByTagName("w:comment"): + comment_id = comment_elem.getAttribute("w:id") + if comment_id: + try: + max_id = max(max_id, int(comment_id)) + except ValueError: + pass + return max_id + 1 + + def _load_existing_comments(self): + """Load existing comments from files to enable replies.""" + if not self.comments_path.exists(): + return {} + + editor = self["word/comments.xml"] + existing = {} + + for comment_elem in editor.dom.getElementsByTagName("w:comment"): + comment_id = comment_elem.getAttribute("w:id") + if not comment_id: + continue + + # Find para_id from the w:p element within the comment + para_id = None + for p_elem in comment_elem.getElementsByTagName("w:p"): + para_id = p_elem.getAttribute("w14:paraId") + if para_id: + break + + if not para_id: + continue + + existing[int(comment_id)] = {"para_id": para_id} + + return existing + + # ==================== Private: Setup Methods ==================== + + def _setup_tracking(self, track_revisions=False): + """Set up comment infrastructure in unpacked directory. + + Args: + track_revisions: If True, enables track revisions in settings.xml + """ + # Create or update word/people.xml + people_file = self.word_path / "people.xml" + self._update_people_xml(people_file) + + # Update XML files + self._add_content_type_for_people(self.unpacked_path / "[Content_Types].xml") + self._add_relationship_for_people( + self.word_path / "_rels" / "document.xml.rels" + ) + + # Always add RSID to settings.xml, optionally enable trackRevisions + self._update_settings( + self.word_path / "settings.xml", track_revisions=track_revisions + ) + + def _update_people_xml(self, path): + """Create people.xml if it doesn't exist.""" + if not path.exists(): + # Copy from template + shutil.copy(TEMPLATE_DIR / "people.xml", path) + + def _add_content_type_for_people(self, path): + """Add people.xml content type to [Content_Types].xml if not already present.""" + editor = self["[Content_Types].xml"] + + if self._has_override(editor, "/word/people.xml"): + return + + # Add Override element + root = editor.dom.documentElement + override_xml = '' + editor.append_to(root, override_xml) + + def _add_relationship_for_people(self, path): + """Add people.xml relationship to document.xml.rels if not already present.""" + editor = self["word/_rels/document.xml.rels"] + + if self._has_relationship(editor, "people.xml"): + return + + root = editor.dom.documentElement + root_tag = root.tagName # type: ignore + prefix = root_tag.split(":")[0] + ":" if ":" in root_tag else "" + next_rid = editor.get_next_rid() + + # Create the relationship entry + rel_xml = f'<{prefix}Relationship Id="{next_rid}" Type="http://schemas.microsoft.com/office/2011/relationships/people" Target="people.xml"/>' + editor.append_to(root, rel_xml) + + def _update_settings(self, path, track_revisions=False): + """Add RSID and optionally enable track revisions in settings.xml. + + Args: + path: Path to settings.xml + track_revisions: If True, adds trackRevisions element + + Places elements per OOXML schema order: + - trackRevisions: early (before defaultTabStop) + - rsids: late (after compat) + """ + editor = self["word/settings.xml"] + root = editor.get_node(tag="w:settings") + prefix = root.tagName.split(":")[0] if ":" in root.tagName else "w" + + # Conditionally add trackRevisions if requested + if track_revisions: + track_revisions_exists = any( + elem.tagName == f"{prefix}:trackRevisions" + for elem in editor.dom.getElementsByTagName(f"{prefix}:trackRevisions") + ) + + if not track_revisions_exists: + track_rev_xml = f"<{prefix}:trackRevisions/>" + # Try to insert before documentProtection, defaultTabStop, or at start + inserted = False + for tag in [f"{prefix}:documentProtection", f"{prefix}:defaultTabStop"]: + elements = editor.dom.getElementsByTagName(tag) + if elements: + editor.insert_before(elements[0], track_rev_xml) + inserted = True + break + if not inserted: + # Insert as first child of settings + if root.firstChild: + editor.insert_before(root.firstChild, track_rev_xml) + else: + editor.append_to(root, track_rev_xml) + + # Always check if rsids section exists + rsids_elements = editor.dom.getElementsByTagName(f"{prefix}:rsids") + + if not rsids_elements: + # Add new rsids section + rsids_xml = f'''<{prefix}:rsids> + <{prefix}:rsidRoot {prefix}:val="{self.rsid}"/> + <{prefix}:rsid {prefix}:val="{self.rsid}"/> +''' + + # Try to insert after compat, before clrSchemeMapping, or before closing tag + inserted = False + compat_elements = editor.dom.getElementsByTagName(f"{prefix}:compat") + if compat_elements: + editor.insert_after(compat_elements[0], rsids_xml) + inserted = True + + if not inserted: + clr_elements = editor.dom.getElementsByTagName( + f"{prefix}:clrSchemeMapping" + ) + if clr_elements: + editor.insert_before(clr_elements[0], rsids_xml) + inserted = True + + if not inserted: + editor.append_to(root, rsids_xml) + else: + # Check if this rsid already exists + rsids_elem = rsids_elements[0] + rsid_exists = any( + elem.getAttribute(f"{prefix}:val") == self.rsid + for elem in rsids_elem.getElementsByTagName(f"{prefix}:rsid") + ) + + if not rsid_exists: + rsid_xml = f'<{prefix}:rsid {prefix}:val="{self.rsid}"/>' + editor.append_to(rsids_elem, rsid_xml) + + # ==================== Private: XML File Creation ==================== + + def _add_to_comments_xml( + self, comment_id, para_id, text, author, initials, timestamp + ): + """Add a single comment to comments.xml.""" + if not self.comments_path.exists(): + shutil.copy(TEMPLATE_DIR / "comments.xml", self.comments_path) + + editor = self["word/comments.xml"] + root = editor.get_node(tag="w:comments") + + escaped_text = ( + text.replace("&", "&").replace("<", "<").replace(">", ">") + ) + # Note: w:rsidR, w:rsidRDefault, w:rsidP on w:p, w:rsidR on w:r, + # and w:author, w:date, w:initials on w:comment are automatically added by DocxXMLEditor + comment_xml = f''' + + + {escaped_text} + +''' + editor.append_to(root, comment_xml) + + def _add_to_comments_extended_xml(self, para_id, parent_para_id): + """Add a single comment to commentsExtended.xml.""" + if not self.comments_extended_path.exists(): + shutil.copy( + TEMPLATE_DIR / "commentsExtended.xml", self.comments_extended_path + ) + + editor = self["word/commentsExtended.xml"] + root = editor.get_node(tag="w15:commentsEx") + + if parent_para_id: + xml = f'' + else: + xml = f'' + editor.append_to(root, xml) + + def _add_to_comments_ids_xml(self, para_id, durable_id): + """Add a single comment to commentsIds.xml.""" + if not self.comments_ids_path.exists(): + shutil.copy(TEMPLATE_DIR / "commentsIds.xml", self.comments_ids_path) + + editor = self["word/commentsIds.xml"] + root = editor.get_node(tag="w16cid:commentsIds") + + xml = f'' + editor.append_to(root, xml) + + def _add_to_comments_extensible_xml(self, durable_id): + """Add a single comment to commentsExtensible.xml.""" + if not self.comments_extensible_path.exists(): + shutil.copy( + TEMPLATE_DIR / "commentsExtensible.xml", self.comments_extensible_path + ) + + editor = self["word/commentsExtensible.xml"] + root = editor.get_node(tag="w16cex:commentsExtensible") + + xml = f'' + editor.append_to(root, xml) + + # ==================== Private: XML Fragments ==================== + + def _comment_range_start_xml(self, comment_id): + """Generate XML for comment range start.""" + return f'' + + def _comment_range_end_xml(self, comment_id): + """Generate XML for comment range end with reference run. + + Note: w:rsidR is automatically added by DocxXMLEditor. + """ + return f''' + + + +''' + + def _comment_ref_run_xml(self, comment_id): + """Generate XML for comment reference run. + + Note: w:rsidR is automatically added by DocxXMLEditor. + """ + return f''' + + +''' + + # ==================== Private: Metadata Updates ==================== + + def _has_relationship(self, editor, target): + """Check if a relationship with given target exists.""" + for rel_elem in editor.dom.getElementsByTagName("Relationship"): + if rel_elem.getAttribute("Target") == target: + return True + return False + + def _has_override(self, editor, part_name): + """Check if an override with given part name exists.""" + for override_elem in editor.dom.getElementsByTagName("Override"): + if override_elem.getAttribute("PartName") == part_name: + return True + return False + + def _has_author(self, editor, author): + """Check if an author already exists in people.xml.""" + for person_elem in editor.dom.getElementsByTagName("w15:person"): + if person_elem.getAttribute("w15:author") == author: + return True + return False + + def _add_author_to_people(self, author): + """Add author to people.xml (called during initialization).""" + people_path = self.word_path / "people.xml" + + # people.xml should already exist from _setup_tracking + if not people_path.exists(): + raise ValueError("people.xml should exist after _setup_tracking") + + editor = self["word/people.xml"] + root = editor.get_node(tag="w15:people") + + # Check if author already exists + if self._has_author(editor, author): + return + + # Add author with proper XML escaping to prevent injection + escaped_author = html.escape(author, quote=True) + person_xml = f''' + +''' + editor.append_to(root, person_xml) + + def _ensure_comment_relationships(self): + """Ensure word/_rels/document.xml.rels has comment relationships.""" + editor = self["word/_rels/document.xml.rels"] + + if self._has_relationship(editor, "comments.xml"): + return + + root = editor.dom.documentElement + root_tag = root.tagName # type: ignore + prefix = root_tag.split(":")[0] + ":" if ":" in root_tag else "" + next_rid_num = int(editor.get_next_rid()[3:]) + + # Add relationship elements + rels = [ + ( + next_rid_num, + "http://schemas.openxmlformats.org/officeDocument/2006/relationships/comments", + "comments.xml", + ), + ( + next_rid_num + 1, + "http://schemas.microsoft.com/office/2011/relationships/commentsExtended", + "commentsExtended.xml", + ), + ( + next_rid_num + 2, + "http://schemas.microsoft.com/office/2016/09/relationships/commentsIds", + "commentsIds.xml", + ), + ( + next_rid_num + 3, + "http://schemas.microsoft.com/office/2018/08/relationships/commentsExtensible", + "commentsExtensible.xml", + ), + ] + + for rel_id, rel_type, target in rels: + rel_xml = f'<{prefix}Relationship Id="rId{rel_id}" Type="{rel_type}" Target="{target}"/>' + editor.append_to(root, rel_xml) + + def _ensure_comment_content_types(self): + """Ensure [Content_Types].xml has comment content types.""" + editor = self["[Content_Types].xml"] + + if self._has_override(editor, "/word/comments.xml"): + return + + root = editor.dom.documentElement + + # Add Override elements + overrides = [ + ( + "/word/comments.xml", + "application/vnd.openxmlformats-officedocument.wordprocessingml.comments+xml", + ), + ( + "/word/commentsExtended.xml", + "application/vnd.openxmlformats-officedocument.wordprocessingml.commentsExtended+xml", + ), + ( + "/word/commentsIds.xml", + "application/vnd.openxmlformats-officedocument.wordprocessingml.commentsIds+xml", + ), + ( + "/word/commentsExtensible.xml", + "application/vnd.openxmlformats-officedocument.wordprocessingml.commentsExtensible+xml", + ), + ] + + for part_name, content_type in overrides: + override_xml = ( + f'' + ) + editor.append_to(root, override_xml) diff --git a/.claude/skills/docx/scripts/templates/comments.xml b/.claude/skills/docx/scripts/templates/comments.xml new file mode 100644 index 0000000..b5dace0 --- /dev/null +++ b/.claude/skills/docx/scripts/templates/comments.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/.claude/skills/docx/scripts/templates/commentsExtended.xml b/.claude/skills/docx/scripts/templates/commentsExtended.xml new file mode 100644 index 0000000..b4cf23e --- /dev/null +++ b/.claude/skills/docx/scripts/templates/commentsExtended.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/.claude/skills/docx/scripts/templates/commentsExtensible.xml b/.claude/skills/docx/scripts/templates/commentsExtensible.xml new file mode 100644 index 0000000..e32a05e --- /dev/null +++ b/.claude/skills/docx/scripts/templates/commentsExtensible.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/.claude/skills/docx/scripts/templates/commentsIds.xml b/.claude/skills/docx/scripts/templates/commentsIds.xml new file mode 100644 index 0000000..d04bc8e --- /dev/null +++ b/.claude/skills/docx/scripts/templates/commentsIds.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/.claude/skills/docx/scripts/templates/people.xml b/.claude/skills/docx/scripts/templates/people.xml new file mode 100644 index 0000000..a839caf --- /dev/null +++ b/.claude/skills/docx/scripts/templates/people.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/.claude/skills/docx/scripts/utilities.py b/.claude/skills/docx/scripts/utilities.py new file mode 100644 index 0000000..d92dae6 --- /dev/null +++ b/.claude/skills/docx/scripts/utilities.py @@ -0,0 +1,374 @@ +#!/usr/bin/env python3 +""" +Utilities for editing OOXML documents. + +This module provides XMLEditor, a tool for manipulating XML files with support for +line-number-based node finding and DOM manipulation. Each element is automatically +annotated with its original line and column position during parsing. + +Example usage: + editor = XMLEditor("document.xml") + + # Find node by line number or range + elem = editor.get_node(tag="w:r", line_number=519) + elem = editor.get_node(tag="w:p", line_number=range(100, 200)) + + # Find node by text content + elem = editor.get_node(tag="w:p", contains="specific text") + + # Find node by attributes + elem = editor.get_node(tag="w:r", attrs={"w:id": "target"}) + + # Combine filters + elem = editor.get_node(tag="w:p", line_number=range(1, 50), contains="text") + + # Replace, insert, or manipulate + new_elem = editor.replace_node(elem, "new text") + editor.insert_after(new_elem, "more") + + # Save changes + editor.save() +""" + +import html +from pathlib import Path +from typing import Optional, Union + +import defusedxml.minidom +import defusedxml.sax + + +class XMLEditor: + """ + Editor for manipulating OOXML XML files with line-number-based node finding. + + This class parses XML files and tracks the original line and column position + of each element. This enables finding nodes by their line number in the original + file, which is useful when working with Read tool output. + + Attributes: + xml_path: Path to the XML file being edited + encoding: Detected encoding of the XML file ('ascii' or 'utf-8') + dom: Parsed DOM tree with parse_position attributes on elements + """ + + def __init__(self, xml_path): + """ + Initialize with path to XML file and parse with line number tracking. + + Args: + xml_path: Path to XML file to edit (str or Path) + + Raises: + ValueError: If the XML file does not exist + """ + self.xml_path = Path(xml_path) + if not self.xml_path.exists(): + raise ValueError(f"XML file not found: {xml_path}") + + with open(self.xml_path, "rb") as f: + header = f.read(200).decode("utf-8", errors="ignore") + self.encoding = "ascii" if 'encoding="ascii"' in header else "utf-8" + + parser = _create_line_tracking_parser() + self.dom = defusedxml.minidom.parse(str(self.xml_path), parser) + + def get_node( + self, + tag: str, + attrs: Optional[dict[str, str]] = None, + line_number: Optional[Union[int, range]] = None, + contains: Optional[str] = None, + ): + """ + Get a DOM element by tag and identifier. + + Finds an element by either its line number in the original file or by + matching attribute values. Exactly one match must be found. + + Args: + tag: The XML tag name (e.g., "w:del", "w:ins", "w:r") + attrs: Dictionary of attribute name-value pairs to match (e.g., {"w:id": "1"}) + line_number: Line number (int) or line range (range) in original XML file (1-indexed) + contains: Text string that must appear in any text node within the element. + Supports both entity notation (“) and Unicode characters (\u201c). + + Returns: + defusedxml.minidom.Element: The matching DOM element + + Raises: + ValueError: If node not found or multiple matches found + + Example: + elem = editor.get_node(tag="w:r", line_number=519) + elem = editor.get_node(tag="w:r", line_number=range(100, 200)) + elem = editor.get_node(tag="w:del", attrs={"w:id": "1"}) + elem = editor.get_node(tag="w:p", attrs={"w14:paraId": "12345678"}) + elem = editor.get_node(tag="w:commentRangeStart", attrs={"w:id": "0"}) + elem = editor.get_node(tag="w:p", contains="specific text") + elem = editor.get_node(tag="w:t", contains="“Agreement") # Entity notation + elem = editor.get_node(tag="w:t", contains="\u201cAgreement") # Unicode character + """ + matches = [] + for elem in self.dom.getElementsByTagName(tag): + # Check line_number filter + if line_number is not None: + parse_pos = getattr(elem, "parse_position", (None,)) + elem_line = parse_pos[0] + + # Handle both single line number and range + if isinstance(line_number, range): + if elem_line not in line_number: + continue + else: + if elem_line != line_number: + continue + + # Check attrs filter + if attrs is not None: + if not all( + elem.getAttribute(attr_name) == attr_value + for attr_name, attr_value in attrs.items() + ): + continue + + # Check contains filter + if contains is not None: + elem_text = self._get_element_text(elem) + # Normalize the search string: convert HTML entities to Unicode characters + # This allows searching for both "“Rowan" and ""Rowan" + normalized_contains = html.unescape(contains) + if normalized_contains not in elem_text: + continue + + # If all applicable filters passed, this is a match + matches.append(elem) + + if not matches: + # Build descriptive error message + filters = [] + if line_number is not None: + line_str = ( + f"lines {line_number.start}-{line_number.stop - 1}" + if isinstance(line_number, range) + else f"line {line_number}" + ) + filters.append(f"at {line_str}") + if attrs is not None: + filters.append(f"with attributes {attrs}") + if contains is not None: + filters.append(f"containing '{contains}'") + + filter_desc = " ".join(filters) if filters else "" + base_msg = f"Node not found: <{tag}> {filter_desc}".strip() + + # Add helpful hint based on filters used + if contains: + hint = "Text may be split across elements or use different wording." + elif line_number: + hint = "Line numbers may have changed if document was modified." + elif attrs: + hint = "Verify attribute values are correct." + else: + hint = "Try adding filters (attrs, line_number, or contains)." + + raise ValueError(f"{base_msg}. {hint}") + if len(matches) > 1: + raise ValueError( + f"Multiple nodes found: <{tag}>. " + f"Add more filters (attrs, line_number, or contains) to narrow the search." + ) + return matches[0] + + def _get_element_text(self, elem): + """ + Recursively extract all text content from an element. + + Skips text nodes that contain only whitespace (spaces, tabs, newlines), + which typically represent XML formatting rather than document content. + + Args: + elem: defusedxml.minidom.Element to extract text from + + Returns: + str: Concatenated text from all non-whitespace text nodes within the element + """ + text_parts = [] + for node in elem.childNodes: + if node.nodeType == node.TEXT_NODE: + # Skip whitespace-only text nodes (XML formatting) + if node.data.strip(): + text_parts.append(node.data) + elif node.nodeType == node.ELEMENT_NODE: + text_parts.append(self._get_element_text(node)) + return "".join(text_parts) + + def replace_node(self, elem, new_content): + """ + Replace a DOM element with new XML content. + + Args: + elem: defusedxml.minidom.Element to replace + new_content: String containing XML to replace the node with + + Returns: + List[defusedxml.minidom.Node]: All inserted nodes + + Example: + new_nodes = editor.replace_node(old_elem, "text") + """ + parent = elem.parentNode + nodes = self._parse_fragment(new_content) + for node in nodes: + parent.insertBefore(node, elem) + parent.removeChild(elem) + return nodes + + def insert_after(self, elem, xml_content): + """ + Insert XML content after a DOM element. + + Args: + elem: defusedxml.minidom.Element to insert after + xml_content: String containing XML to insert + + Returns: + List[defusedxml.minidom.Node]: All inserted nodes + + Example: + new_nodes = editor.insert_after(elem, "text") + """ + parent = elem.parentNode + next_sibling = elem.nextSibling + nodes = self._parse_fragment(xml_content) + for node in nodes: + if next_sibling: + parent.insertBefore(node, next_sibling) + else: + parent.appendChild(node) + return nodes + + def insert_before(self, elem, xml_content): + """ + Insert XML content before a DOM element. + + Args: + elem: defusedxml.minidom.Element to insert before + xml_content: String containing XML to insert + + Returns: + List[defusedxml.minidom.Node]: All inserted nodes + + Example: + new_nodes = editor.insert_before(elem, "text") + """ + parent = elem.parentNode + nodes = self._parse_fragment(xml_content) + for node in nodes: + parent.insertBefore(node, elem) + return nodes + + def append_to(self, elem, xml_content): + """ + Append XML content as a child of a DOM element. + + Args: + elem: defusedxml.minidom.Element to append to + xml_content: String containing XML to append + + Returns: + List[defusedxml.minidom.Node]: All inserted nodes + + Example: + new_nodes = editor.append_to(elem, "text") + """ + nodes = self._parse_fragment(xml_content) + for node in nodes: + elem.appendChild(node) + return nodes + + def get_next_rid(self): + """Get the next available rId for relationships files.""" + max_id = 0 + for rel_elem in self.dom.getElementsByTagName("Relationship"): + rel_id = rel_elem.getAttribute("Id") + if rel_id.startswith("rId"): + try: + max_id = max(max_id, int(rel_id[3:])) + except ValueError: + pass + return f"rId{max_id + 1}" + + def save(self): + """ + Save the edited XML back to the file. + + Serializes the DOM tree and writes it back to the original file path, + preserving the original encoding (ascii or utf-8). + """ + content = self.dom.toxml(encoding=self.encoding) + self.xml_path.write_bytes(content) + + def _parse_fragment(self, xml_content): + """ + Parse XML fragment and return list of imported nodes. + + Args: + xml_content: String containing XML fragment + + Returns: + List of defusedxml.minidom.Node objects imported into this document + + Raises: + AssertionError: If fragment contains no element nodes + """ + # Extract namespace declarations from the root document element + root_elem = self.dom.documentElement + namespaces = [] + if root_elem and root_elem.attributes: + for i in range(root_elem.attributes.length): + attr = root_elem.attributes.item(i) + if attr.name.startswith("xmlns"): # type: ignore + namespaces.append(f'{attr.name}="{attr.value}"') # type: ignore + + ns_decl = " ".join(namespaces) + wrapper = f"{xml_content}" + fragment_doc = defusedxml.minidom.parseString(wrapper) + nodes = [ + self.dom.importNode(child, deep=True) + for child in fragment_doc.documentElement.childNodes # type: ignore + ] + elements = [n for n in nodes if n.nodeType == n.ELEMENT_NODE] + assert elements, "Fragment must contain at least one element" + return nodes + + +def _create_line_tracking_parser(): + """ + Create a SAX parser that tracks line and column numbers for each element. + + Monkey patches the SAX content handler to store the current line and column + position from the underlying expat parser onto each element as a parse_position + attribute (line, column) tuple. + + Returns: + defusedxml.sax.xmlreader.XMLReader: Configured SAX parser + """ + + def set_content_handler(dom_handler): + def startElementNS(name, tagName, attrs): + orig_start_cb(name, tagName, attrs) + cur_elem = dom_handler.elementStack[-1] + cur_elem.parse_position = ( + parser._parser.CurrentLineNumber, # type: ignore + parser._parser.CurrentColumnNumber, # type: ignore + ) + + orig_start_cb = dom_handler.startElementNS + dom_handler.startElementNS = startElementNS + orig_set_content_handler(dom_handler) + + parser = defusedxml.sax.make_parser() + orig_set_content_handler = parser.setContentHandler + parser.setContentHandler = set_content_handler # type: ignore + return parser diff --git a/.claude/skills/frontend-design/LICENSE.txt b/.claude/skills/frontend-design/LICENSE.txt new file mode 100644 index 0000000..f433b1a --- /dev/null +++ b/.claude/skills/frontend-design/LICENSE.txt @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/.claude/skills/frontend-design/SKILL.md b/.claude/skills/frontend-design/SKILL.md new file mode 100644 index 0000000..5be498e --- /dev/null +++ b/.claude/skills/frontend-design/SKILL.md @@ -0,0 +1,42 @@ +--- +name: frontend-design +description: Create distinctive, production-grade frontend interfaces with high design quality. Use this skill when the user asks to build web components, pages, artifacts, posters, or applications (examples include websites, landing pages, dashboards, React components, HTML/CSS layouts, or when styling/beautifying any web UI). Generates creative, polished code and UI design that avoids generic AI aesthetics. +license: Complete terms in LICENSE.txt +--- + +This skill guides creation of distinctive, production-grade frontend interfaces that avoid generic "AI slop" aesthetics. Implement real working code with exceptional attention to aesthetic details and creative choices. + +The user provides frontend requirements: a component, page, application, or interface to build. They may include context about the purpose, audience, or technical constraints. + +## Design Thinking + +Before coding, understand the context and commit to a BOLD aesthetic direction: +- **Purpose**: What problem does this interface solve? Who uses it? +- **Tone**: Pick an extreme: brutally minimal, maximalist chaos, retro-futuristic, organic/natural, luxury/refined, playful/toy-like, editorial/magazine, brutalist/raw, art deco/geometric, soft/pastel, industrial/utilitarian, etc. There are so many flavors to choose from. Use these for inspiration but design one that is true to the aesthetic direction. +- **Constraints**: Technical requirements (framework, performance, accessibility). +- **Differentiation**: What makes this UNFORGETTABLE? What's the one thing someone will remember? + +**CRITICAL**: Choose a clear conceptual direction and execute it with precision. Bold maximalism and refined minimalism both work - the key is intentionality, not intensity. + +Then implement working code (HTML/CSS/JS, React, Vue, etc.) that is: +- Production-grade and functional +- Visually striking and memorable +- Cohesive with a clear aesthetic point-of-view +- Meticulously refined in every detail + +## Frontend Aesthetics Guidelines + +Focus on: +- **Typography**: Choose fonts that are beautiful, unique, and interesting. Avoid generic fonts like Arial and Inter; opt instead for distinctive choices that elevate the frontend's aesthetics; unexpected, characterful font choices. Pair a distinctive display font with a refined body font. +- **Color & Theme**: Commit to a cohesive aesthetic. Use CSS variables for consistency. Dominant colors with sharp accents outperform timid, evenly-distributed palettes. +- **Motion**: Use animations for effects and micro-interactions. Prioritize CSS-only solutions for HTML. Use Motion library for React when available. Focus on high-impact moments: one well-orchestrated page load with staggered reveals (animation-delay) creates more delight than scattered micro-interactions. Use scroll-triggering and hover states that surprise. +- **Spatial Composition**: Unexpected layouts. Asymmetry. Overlap. Diagonal flow. Grid-breaking elements. Generous negative space OR controlled density. +- **Backgrounds & Visual Details**: Create atmosphere and depth rather than defaulting to solid colors. Add contextual effects and textures that match the overall aesthetic. Apply creative forms like gradient meshes, noise textures, geometric patterns, layered transparencies, dramatic shadows, decorative borders, custom cursors, and grain overlays. + +NEVER use generic AI-generated aesthetics like overused font families (Inter, Roboto, Arial, system fonts), cliched color schemes (particularly purple gradients on white backgrounds), predictable layouts and component patterns, and cookie-cutter design that lacks context-specific character. + +Interpret creatively and make unexpected choices that feel genuinely designed for the context. No design should be the same. Vary between light and dark themes, different fonts, different aesthetics. NEVER converge on common choices (Space Grotesk, for example) across generations. + +**IMPORTANT**: Match implementation complexity to the aesthetic vision. Maximalist designs need elaborate code with extensive animations and effects. Minimalist or refined designs need restraint, precision, and careful attention to spacing, typography, and subtle details. Elegance comes from executing the vision well. + +Remember: Claude is capable of extraordinary creative work. Don't hold back, show what can truly be created when thinking outside the box and committing fully to a distinctive vision. diff --git a/.claude/skills/internal-comms/LICENSE.txt b/.claude/skills/internal-comms/LICENSE.txt new file mode 100644 index 0000000..7a4a3ea --- /dev/null +++ b/.claude/skills/internal-comms/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/.claude/skills/internal-comms/SKILL.md b/.claude/skills/internal-comms/SKILL.md new file mode 100644 index 0000000..56ea935 --- /dev/null +++ b/.claude/skills/internal-comms/SKILL.md @@ -0,0 +1,32 @@ +--- +name: internal-comms +description: A set of resources to help me write all kinds of internal communications, using the formats that my company likes to use. Claude should use this skill whenever asked to write some sort of internal communications (status reports, leadership updates, 3P updates, company newsletters, FAQs, incident reports, project updates, etc.). +license: Complete terms in LICENSE.txt +--- + +## When to use this skill +To write internal communications, use this skill for: +- 3P updates (Progress, Plans, Problems) +- Company newsletters +- FAQ responses +- Status reports +- Leadership updates +- Project updates +- Incident reports + +## How to use this skill + +To write any internal communication: + +1. **Identify the communication type** from the request +2. **Load the appropriate guideline file** from the `examples/` directory: + - `examples/3p-updates.md` - For Progress/Plans/Problems team updates + - `examples/company-newsletter.md` - For company-wide newsletters + - `examples/faq-answers.md` - For answering frequently asked questions + - `examples/general-comms.md` - For anything else that doesn't explicitly match one of the above +3. **Follow the specific instructions** in that file for formatting, tone, and content gathering + +If the communication type doesn't match any existing guideline, ask for clarification or more context about the desired format. + +## Keywords +3P updates, company newsletter, company comms, weekly update, faqs, common questions, updates, internal comms diff --git a/.claude/skills/internal-comms/examples/3p-updates.md b/.claude/skills/internal-comms/examples/3p-updates.md new file mode 100644 index 0000000..5329bfb --- /dev/null +++ b/.claude/skills/internal-comms/examples/3p-updates.md @@ -0,0 +1,47 @@ +## Instructions +You are being asked to write a 3P update. 3P updates stand for "Progress, Plans, Problems." The main audience is for executives, leadership, other teammates, etc. They're meant to be very succinct and to-the-point: think something you can read in 30-60sec or less. They're also for people with some, but not a lot of context on what the team does. + +3Ps can cover a team of any size, ranging all the way up to the entire company. The bigger the team, the less granular the tasks should be. For example, "mobile team" might have "shipped feature" or "fixed bugs," whereas the company might have really meaty 3Ps, like "hired 20 new people" or "closed 10 new deals." + +They represent the work of the team across a time period, almost always one week. They include three sections: +1) Progress: what the team has accomplished over the next time period. Focus mainly on things shipped, milestones achieved, tasks created, etc. +2) Plans: what the team plans to do over the next time period. Focus on what things are top-of-mind, really high priority, etc. for the team. +3) Problems: anything that is slowing the team down. This could be things like too few people, bugs or blockers that are preventing the team from moving forward, some deal that fell through, etc. + +Before writing them, make sure that you know the team name. If it's not specified, you can ask explicitly what the team name you're writing for is. + + +## Tools Available +Whenever possible, try to pull from available sources to get the information you need: +- Slack: posts from team members with their updates - ideally look for posts in large channels with lots of reactions +- Google Drive: docs written from critical team members with lots of views +- Email: emails with lots of responses of lots of content that seems relevant +- Calendar: non-recurring meetings that have a lot of importance, like product reviews, etc. + + +Try to gather as much context as you can, focusing on the things that covered the time period you're writing for: +- Progress: anything between a week ago and today +- Plans: anything from today to the next week +- Problems: anything between a week ago and today + + +If you don't have access, you can ask the user for things they want to cover. They might also include these things to you directly, in which case you're mostly just formatting for this particular format. + +## Workflow + +1. **Clarify scope**: Confirm the team name and time period (usually past week for Progress/Problems, next +week for Plans) +2. **Gather information**: Use available tools or ask the user directly +3. **Draft the update**: Follow the strict formatting guidelines +4. **Review**: Ensure it's concise (30-60 seconds to read) and data-driven + +## Formatting + +The format is always the same, very strict formatting. Never use any formatting other than this. Pick an emoji that is fun and captures the vibe of the team and update. + +[pick an emoji] [Team Name] (Dates Covered, usually a week) +Progress: [1-3 sentences of content] +Plans: [1-3 sentences of content] +Problems: [1-3 sentences of content] + +Each section should be no more than 1-3 sentences: clear, to the point. It should be data-driven, and generally include metrics where possible. The tone should be very matter-of-fact, not super prose-heavy. \ No newline at end of file diff --git a/.claude/skills/internal-comms/examples/company-newsletter.md b/.claude/skills/internal-comms/examples/company-newsletter.md new file mode 100644 index 0000000..4997a07 --- /dev/null +++ b/.claude/skills/internal-comms/examples/company-newsletter.md @@ -0,0 +1,65 @@ +## Instructions +You are being asked to write a company-wide newsletter update. You are meant to summarize the past week/month of a company in the form of a newsletter that the entire company will read. It should be maybe ~20-25 bullet points long. It will be sent via Slack and email, so make it consumable for that. + +Ideally it includes the following attributes: +- Lots of links: pulling documents from Google Drive that are very relevant, linking to prominent Slack messages in announce channels and from executives, perhgaps referencing emails that went company-wide, highlighting significant things that have happened in the company. +- Short and to-the-point: each bullet should probably be no longer than ~1-2 sentences +- Use the "we" tense, as you are part of the company. Many of the bullets should say "we did this" or "we did that" + +## Tools to use +If you have access to the following tools, please try to use them. If not, you can also let the user know directly that their responses would be better if they gave them access. + +- Slack: look for messages in channels with lots of people, with lots of reactions or lots of responses within the thread +- Email: look for things from executives that discuss company-wide announcements +- Calendar: if there were meetings with large attendee lists, particularly things like All-Hands meetings, big company announcements, etc. If there were documents attached to those meetings, those are great links to include. +- Documents: if there were new docs published in the last week or two that got a lot of attention, you can link them. These should be things like company-wide vision docs, plans for the upcoming quarter or half, things authored by critical executives, etc. +- External press: if you see references to articles or press we've received over the past week, that could be really cool too. + +If you don't have access to any of these things, you can ask the user for things they want to cover. In this case, you'll mostly just be polishing up and fitting to this format more directly. + +## Sections +The company is pretty big: 1000+ people. There are a variety of different teams and initiatives going on across the company. To make sure the update works well, try breaking it into sections of similar things. You might break into clusters like {product development, go to market, finance} or {recruiting, execution, vision}, or {external news, internal news} etc. Try to make sure the different areas of the company are highlighted well. + +## Prioritization +Focus on: +- Company-wide impact (not team-specific details) +- Announcements from leadership +- Major milestones and achievements +- Information that affects most employees +- External recognition or press + +Avoid: +- Overly granular team updates (save those for 3Ps) +- Information only relevant to small groups +- Duplicate information already communicated + +## Example Formats + +:megaphone: Company Announcements +- Announcement 1 +- Announcement 2 +- Announcement 3 + +:dart: Progress on Priorities +- Area 1 + - Sub-area 1 + - Sub-area 2 + - Sub-area 3 +- Area 2 + - Sub-area 1 + - Sub-area 2 + - Sub-area 3 +- Area 3 + - Sub-area 1 + - Sub-area 2 + - Sub-area 3 + +:pillar: Leadership Updates +- Post 1 +- Post 2 +- Post 3 + +:thread: Social Updates +- Update 1 +- Update 2 +- Update 3 diff --git a/.claude/skills/internal-comms/examples/faq-answers.md b/.claude/skills/internal-comms/examples/faq-answers.md new file mode 100644 index 0000000..395262a --- /dev/null +++ b/.claude/skills/internal-comms/examples/faq-answers.md @@ -0,0 +1,30 @@ +## Instructions +You are an assistant for answering questions that are being asked across the company. Every week, there are lots of questions that get asked across the company, and your goal is to try to summarize what those questions are. We want our company to be well-informed and on the same page, so your job is to produce a set of frequently asked questions that our employees are asking and attempt to answer them. Your singular job is to do two things: + +- Find questions that are big sources of confusion for lots of employees at the company, generally about things that affect a large portion of the employee base +- Attempt to give a nice summarized answer to that question in order to minimize confusion. + +Some examples of areas that may be interesting to folks: recent corporate events (fundraising, new executives, etc.), upcoming launches, hiring progress, changes to vision or focus, etc. + + +## Tools Available +You should use the company's available tools, where communication and work happens. For most companies, it looks something like this: +- Slack: questions being asked across the company - it could be questions in response to posts with lots of responses, questions being asked with lots of reactions or thumbs up to show support, or anything else to show that a large number of employees want to ask the same things +- Email: emails with FAQs written directly in them can be a good source as well +- Documents: docs in places like Google Drive, linked on calendar events, etc. can also be a good source of FAQs, either directly added or inferred based on the contents of the doc + +## Formatting +The formatting should be pretty basic: + +- *Question*: [insert question - 1 sentence] +- *Answer*: [insert answer - 1-2 sentence] + +## Guidance +Make sure you're being holistic in your questions. Don't focus too much on just the user in question or the team they are a part of, but try to capture the entire company. Try to be as holistic as you can in reading all the tools available, producing responses that are relevant to all at the company. + +## Answer Guidelines +- Base answers on official company communications when possible +- If information is uncertain, indicate that clearly +- Link to authoritative sources (docs, announcements, emails) +- Keep tone professional but approachable +- Flag if a question requires executive input or official response \ No newline at end of file diff --git a/.claude/skills/internal-comms/examples/general-comms.md b/.claude/skills/internal-comms/examples/general-comms.md new file mode 100644 index 0000000..0ea9770 --- /dev/null +++ b/.claude/skills/internal-comms/examples/general-comms.md @@ -0,0 +1,16 @@ + ## Instructions + You are being asked to write internal company communication that doesn't fit into the standard formats (3P + updates, newsletters, or FAQs). + + Before proceeding: + 1. Ask the user about their target audience + 2. Understand the communication's purpose + 3. Clarify the desired tone (formal, casual, urgent, informational) + 4. Confirm any specific formatting requirements + + Use these general principles: + - Be clear and concise + - Use active voice + - Put the most important information first + - Include relevant links and references + - Match the company's communication style \ No newline at end of file diff --git a/.claude/skills/mcp-builder/LICENSE.txt b/.claude/skills/mcp-builder/LICENSE.txt new file mode 100644 index 0000000..7a4a3ea --- /dev/null +++ b/.claude/skills/mcp-builder/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/.claude/skills/mcp-builder/SKILL.md b/.claude/skills/mcp-builder/SKILL.md new file mode 100644 index 0000000..8a1a77a --- /dev/null +++ b/.claude/skills/mcp-builder/SKILL.md @@ -0,0 +1,236 @@ +--- +name: mcp-builder +description: Guide for creating high-quality MCP (Model Context Protocol) servers that enable LLMs to interact with external services through well-designed tools. Use when building MCP servers to integrate external APIs or services, whether in Python (FastMCP) or Node/TypeScript (MCP SDK). +license: Complete terms in LICENSE.txt +--- + +# MCP Server Development Guide + +## Overview + +Create MCP (Model Context Protocol) servers that enable LLMs to interact with external services through well-designed tools. The quality of an MCP server is measured by how well it enables LLMs to accomplish real-world tasks. + +--- + +# Process + +## 🚀 High-Level Workflow + +Creating a high-quality MCP server involves four main phases: + +### Phase 1: Deep Research and Planning + +#### 1.1 Understand Modern MCP Design + +**API Coverage vs. Workflow Tools:** +Balance comprehensive API endpoint coverage with specialized workflow tools. Workflow tools can be more convenient for specific tasks, while comprehensive coverage gives agents flexibility to compose operations. Performance varies by client—some clients benefit from code execution that combines basic tools, while others work better with higher-level workflows. When uncertain, prioritize comprehensive API coverage. + +**Tool Naming and Discoverability:** +Clear, descriptive tool names help agents find the right tools quickly. Use consistent prefixes (e.g., `github_create_issue`, `github_list_repos`) and action-oriented naming. + +**Context Management:** +Agents benefit from concise tool descriptions and the ability to filter/paginate results. Design tools that return focused, relevant data. Some clients support code execution which can help agents filter and process data efficiently. + +**Actionable Error Messages:** +Error messages should guide agents toward solutions with specific suggestions and next steps. + +#### 1.2 Study MCP Protocol Documentation + +**Navigate the MCP specification:** + +Start with the sitemap to find relevant pages: `https://modelcontextprotocol.io/sitemap.xml` + +Then fetch specific pages with `.md` suffix for markdown format (e.g., `https://modelcontextprotocol.io/specification/draft.md`). + +Key pages to review: +- Specification overview and architecture +- Transport mechanisms (streamable HTTP, stdio) +- Tool, resource, and prompt definitions + +#### 1.3 Study Framework Documentation + +**Recommended stack:** +- **Language**: TypeScript (high-quality SDK support and good compatibility in many execution environments e.g. MCPB. Plus AI models are good at generating TypeScript code, benefiting from its broad usage, static typing and good linting tools) +- **Transport**: Streamable HTTP for remote servers, using stateless JSON (simpler to scale and maintain, as opposed to stateful sessions and streaming responses). stdio for local servers. + +**Load framework documentation:** + +- **MCP Best Practices**: [📋 View Best Practices](./reference/mcp_best_practices.md) - Core guidelines + +**For TypeScript (recommended):** +- **TypeScript SDK**: Use WebFetch to load `https://raw.githubusercontent.com/modelcontextprotocol/typescript-sdk/main/README.md` +- [⚡ TypeScript Guide](./reference/node_mcp_server.md) - TypeScript patterns and examples + +**For Python:** +- **Python SDK**: Use WebFetch to load `https://raw.githubusercontent.com/modelcontextprotocol/python-sdk/main/README.md` +- [🐍 Python Guide](./reference/python_mcp_server.md) - Python patterns and examples + +#### 1.4 Plan Your Implementation + +**Understand the API:** +Review the service's API documentation to identify key endpoints, authentication requirements, and data models. Use web search and WebFetch as needed. + +**Tool Selection:** +Prioritize comprehensive API coverage. List endpoints to implement, starting with the most common operations. + +--- + +### Phase 2: Implementation + +#### 2.1 Set Up Project Structure + +See language-specific guides for project setup: +- [⚡ TypeScript Guide](./reference/node_mcp_server.md) - Project structure, package.json, tsconfig.json +- [🐍 Python Guide](./reference/python_mcp_server.md) - Module organization, dependencies + +#### 2.2 Implement Core Infrastructure + +Create shared utilities: +- API client with authentication +- Error handling helpers +- Response formatting (JSON/Markdown) +- Pagination support + +#### 2.3 Implement Tools + +For each tool: + +**Input Schema:** +- Use Zod (TypeScript) or Pydantic (Python) +- Include constraints and clear descriptions +- Add examples in field descriptions + +**Output Schema:** +- Define `outputSchema` where possible for structured data +- Use `structuredContent` in tool responses (TypeScript SDK feature) +- Helps clients understand and process tool outputs + +**Tool Description:** +- Concise summary of functionality +- Parameter descriptions +- Return type schema + +**Implementation:** +- Async/await for I/O operations +- Proper error handling with actionable messages +- Support pagination where applicable +- Return both text content and structured data when using modern SDKs + +**Annotations:** +- `readOnlyHint`: true/false +- `destructiveHint`: true/false +- `idempotentHint`: true/false +- `openWorldHint`: true/false + +--- + +### Phase 3: Review and Test + +#### 3.1 Code Quality + +Review for: +- No duplicated code (DRY principle) +- Consistent error handling +- Full type coverage +- Clear tool descriptions + +#### 3.2 Build and Test + +**TypeScript:** +- Run `npm run build` to verify compilation +- Test with MCP Inspector: `npx @modelcontextprotocol/inspector` + +**Python:** +- Verify syntax: `python -m py_compile your_server.py` +- Test with MCP Inspector + +See language-specific guides for detailed testing approaches and quality checklists. + +--- + +### Phase 4: Create Evaluations + +After implementing your MCP server, create comprehensive evaluations to test its effectiveness. + +**Load [✅ Evaluation Guide](./reference/evaluation.md) for complete evaluation guidelines.** + +#### 4.1 Understand Evaluation Purpose + +Use evaluations to test whether LLMs can effectively use your MCP server to answer realistic, complex questions. + +#### 4.2 Create 10 Evaluation Questions + +To create effective evaluations, follow the process outlined in the evaluation guide: + +1. **Tool Inspection**: List available tools and understand their capabilities +2. **Content Exploration**: Use READ-ONLY operations to explore available data +3. **Question Generation**: Create 10 complex, realistic questions +4. **Answer Verification**: Solve each question yourself to verify answers + +#### 4.3 Evaluation Requirements + +Ensure each question is: +- **Independent**: Not dependent on other questions +- **Read-only**: Only non-destructive operations required +- **Complex**: Requiring multiple tool calls and deep exploration +- **Realistic**: Based on real use cases humans would care about +- **Verifiable**: Single, clear answer that can be verified by string comparison +- **Stable**: Answer won't change over time + +#### 4.4 Output Format + +Create an XML file with this structure: + +```xml + + + Find discussions about AI model launches with animal codenames. One model needed a specific safety designation that uses the format ASL-X. What number X was being determined for the model named after a spotted wild cat? + 3 + + + +``` + +--- + +# Reference Files + +## 📚 Documentation Library + +Load these resources as needed during development: + +### Core MCP Documentation (Load First) +- **MCP Protocol**: Start with sitemap at `https://modelcontextprotocol.io/sitemap.xml`, then fetch specific pages with `.md` suffix +- [📋 MCP Best Practices](./reference/mcp_best_practices.md) - Universal MCP guidelines including: + - Server and tool naming conventions + - Response format guidelines (JSON vs Markdown) + - Pagination best practices + - Transport selection (streamable HTTP vs stdio) + - Security and error handling standards + +### SDK Documentation (Load During Phase 1/2) +- **Python SDK**: Fetch from `https://raw.githubusercontent.com/modelcontextprotocol/python-sdk/main/README.md` +- **TypeScript SDK**: Fetch from `https://raw.githubusercontent.com/modelcontextprotocol/typescript-sdk/main/README.md` + +### Language-Specific Implementation Guides (Load During Phase 2) +- [🐍 Python Implementation Guide](./reference/python_mcp_server.md) - Complete Python/FastMCP guide with: + - Server initialization patterns + - Pydantic model examples + - Tool registration with `@mcp.tool` + - Complete working examples + - Quality checklist + +- [⚡ TypeScript Implementation Guide](./reference/node_mcp_server.md) - Complete TypeScript guide with: + - Project structure + - Zod schema patterns + - Tool registration with `server.registerTool` + - Complete working examples + - Quality checklist + +### Evaluation Guide (Load During Phase 4) +- [✅ Evaluation Guide](./reference/evaluation.md) - Complete evaluation creation guide with: + - Question creation guidelines + - Answer verification strategies + - XML format specifications + - Example questions and answers + - Running an evaluation with the provided scripts diff --git a/.claude/skills/mcp-builder/reference/evaluation.md b/.claude/skills/mcp-builder/reference/evaluation.md new file mode 100644 index 0000000..87e9bb7 --- /dev/null +++ b/.claude/skills/mcp-builder/reference/evaluation.md @@ -0,0 +1,602 @@ +# MCP Server Evaluation Guide + +## Overview + +This document provides guidance on creating comprehensive evaluations for MCP servers. Evaluations test whether LLMs can effectively use your MCP server to answer realistic, complex questions using only the tools provided. + +--- + +## Quick Reference + +### Evaluation Requirements +- Create 10 human-readable questions +- Questions must be READ-ONLY, INDEPENDENT, NON-DESTRUCTIVE +- Each question requires multiple tool calls (potentially dozens) +- Answers must be single, verifiable values +- Answers must be STABLE (won't change over time) + +### Output Format +```xml + + + Your question here + Single verifiable answer + + +``` + +--- + +## Purpose of Evaluations + +The measure of quality of an MCP server is NOT how well or comprehensively the server implements tools, but how well these implementations (input/output schemas, docstrings/descriptions, functionality) enable LLMs with no other context and access ONLY to the MCP servers to answer realistic and difficult questions. + +## Evaluation Overview + +Create 10 human-readable questions requiring ONLY READ-ONLY, INDEPENDENT, NON-DESTRUCTIVE, and IDEMPOTENT operations to answer. Each question should be: +- Realistic +- Clear and concise +- Unambiguous +- Complex, requiring potentially dozens of tool calls or steps +- Answerable with a single, verifiable value that you identify in advance + +## Question Guidelines + +### Core Requirements + +1. **Questions MUST be independent** + - Each question should NOT depend on the answer to any other question + - Should not assume prior write operations from processing another question + +2. **Questions MUST require ONLY NON-DESTRUCTIVE AND IDEMPOTENT tool use** + - Should not instruct or require modifying state to arrive at the correct answer + +3. **Questions must be REALISTIC, CLEAR, CONCISE, and COMPLEX** + - Must require another LLM to use multiple (potentially dozens of) tools or steps to answer + +### Complexity and Depth + +4. **Questions must require deep exploration** + - Consider multi-hop questions requiring multiple sub-questions and sequential tool calls + - Each step should benefit from information found in previous questions + +5. **Questions may require extensive paging** + - May need paging through multiple pages of results + - May require querying old data (1-2 years out-of-date) to find niche information + - The questions must be DIFFICULT + +6. **Questions must require deep understanding** + - Rather than surface-level knowledge + - May pose complex ideas as True/False questions requiring evidence + - May use multiple-choice format where LLM must search different hypotheses + +7. **Questions must not be solvable with straightforward keyword search** + - Do not include specific keywords from the target content + - Use synonyms, related concepts, or paraphrases + - Require multiple searches, analyzing multiple related items, extracting context, then deriving the answer + +### Tool Testing + +8. **Questions should stress-test tool return values** + - May elicit tools returning large JSON objects or lists, overwhelming the LLM + - Should require understanding multiple modalities of data: + - IDs and names + - Timestamps and datetimes (months, days, years, seconds) + - File IDs, names, extensions, and mimetypes + - URLs, GIDs, etc. + - Should probe the tool's ability to return all useful forms of data + +9. **Questions should MOSTLY reflect real human use cases** + - The kinds of information retrieval tasks that HUMANS assisted by an LLM would care about + +10. **Questions may require dozens of tool calls** + - This challenges LLMs with limited context + - Encourages MCP server tools to reduce information returned + +11. **Include ambiguous questions** + - May be ambiguous OR require difficult decisions on which tools to call + - Force the LLM to potentially make mistakes or misinterpret + - Ensure that despite AMBIGUITY, there is STILL A SINGLE VERIFIABLE ANSWER + +### Stability + +12. **Questions must be designed so the answer DOES NOT CHANGE** + - Do not ask questions that rely on "current state" which is dynamic + - For example, do not count: + - Number of reactions to a post + - Number of replies to a thread + - Number of members in a channel + +13. **DO NOT let the MCP server RESTRICT the kinds of questions you create** + - Create challenging and complex questions + - Some may not be solvable with the available MCP server tools + - Questions may require specific output formats (datetime vs. epoch time, JSON vs. MARKDOWN) + - Questions may require dozens of tool calls to complete + +## Answer Guidelines + +### Verification + +1. **Answers must be VERIFIABLE via direct string comparison** + - If the answer can be re-written in many formats, clearly specify the output format in the QUESTION + - Examples: "Use YYYY/MM/DD.", "Respond True or False.", "Answer A, B, C, or D and nothing else." + - Answer should be a single VERIFIABLE value such as: + - User ID, user name, display name, first name, last name + - Channel ID, channel name + - Message ID, string + - URL, title + - Numerical quantity + - Timestamp, datetime + - Boolean (for True/False questions) + - Email address, phone number + - File ID, file name, file extension + - Multiple choice answer + - Answers must not require special formatting or complex, structured output + - Answer will be verified using DIRECT STRING COMPARISON + +### Readability + +2. **Answers should generally prefer HUMAN-READABLE formats** + - Examples: names, first name, last name, datetime, file name, message string, URL, yes/no, true/false, a/b/c/d + - Rather than opaque IDs (though IDs are acceptable) + - The VAST MAJORITY of answers should be human-readable + +### Stability + +3. **Answers must be STABLE/STATIONARY** + - Look at old content (e.g., conversations that have ended, projects that have launched, questions answered) + - Create QUESTIONS based on "closed" concepts that will always return the same answer + - Questions may ask to consider a fixed time window to insulate from non-stationary answers + - Rely on context UNLIKELY to change + - Example: if finding a paper name, be SPECIFIC enough so answer is not confused with papers published later + +4. **Answers must be CLEAR and UNAMBIGUOUS** + - Questions must be designed so there is a single, clear answer + - Answer can be derived from using the MCP server tools + +### Diversity + +5. **Answers must be DIVERSE** + - Answer should be a single VERIFIABLE value in diverse modalities and formats + - User concept: user ID, user name, display name, first name, last name, email address, phone number + - Channel concept: channel ID, channel name, channel topic + - Message concept: message ID, message string, timestamp, month, day, year + +6. **Answers must NOT be complex structures** + - Not a list of values + - Not a complex object + - Not a list of IDs or strings + - Not natural language text + - UNLESS the answer can be straightforwardly verified using DIRECT STRING COMPARISON + - And can be realistically reproduced + - It should be unlikely that an LLM would return the same list in any other order or format + +## Evaluation Process + +### Step 1: Documentation Inspection + +Read the documentation of the target API to understand: +- Available endpoints and functionality +- If ambiguity exists, fetch additional information from the web +- Parallelize this step AS MUCH AS POSSIBLE +- Ensure each subagent is ONLY examining documentation from the file system or on the web + +### Step 2: Tool Inspection + +List the tools available in the MCP server: +- Inspect the MCP server directly +- Understand input/output schemas, docstrings, and descriptions +- WITHOUT calling the tools themselves at this stage + +### Step 3: Developing Understanding + +Repeat steps 1 & 2 until you have a good understanding: +- Iterate multiple times +- Think about the kinds of tasks you want to create +- Refine your understanding +- At NO stage should you READ the code of the MCP server implementation itself +- Use your intuition and understanding to create reasonable, realistic, but VERY challenging tasks + +### Step 4: Read-Only Content Inspection + +After understanding the API and tools, USE the MCP server tools: +- Inspect content using READ-ONLY and NON-DESTRUCTIVE operations ONLY +- Goal: identify specific content (e.g., users, channels, messages, projects, tasks) for creating realistic questions +- Should NOT call any tools that modify state +- Will NOT read the code of the MCP server implementation itself +- Parallelize this step with individual sub-agents pursuing independent explorations +- Ensure each subagent is only performing READ-ONLY, NON-DESTRUCTIVE, and IDEMPOTENT operations +- BE CAREFUL: SOME TOOLS may return LOTS OF DATA which would cause you to run out of CONTEXT +- Make INCREMENTAL, SMALL, AND TARGETED tool calls for exploration +- In all tool call requests, use the `limit` parameter to limit results (<10) +- Use pagination + +### Step 5: Task Generation + +After inspecting the content, create 10 human-readable questions: +- An LLM should be able to answer these with the MCP server +- Follow all question and answer guidelines above + +## Output Format + +Each QA pair consists of a question and an answer. The output should be an XML file with this structure: + +```xml + + + Find the project created in Q2 2024 with the highest number of completed tasks. What is the project name? + Website Redesign + + + Search for issues labeled as "bug" that were closed in March 2024. Which user closed the most issues? Provide their username. + sarah_dev + + + Look for pull requests that modified files in the /api directory and were merged between January 1 and January 31, 2024. How many different contributors worked on these PRs? + 7 + + + Find the repository with the most stars that was created before 2023. What is the repository name? + data-pipeline + + +``` + +## Evaluation Examples + +### Good Questions + +**Example 1: Multi-hop question requiring deep exploration (GitHub MCP)** +```xml + + Find the repository that was archived in Q3 2023 and had previously been the most forked project in the organization. What was the primary programming language used in that repository? + Python + +``` + +This question is good because: +- Requires multiple searches to find archived repositories +- Needs to identify which had the most forks before archival +- Requires examining repository details for the language +- Answer is a simple, verifiable value +- Based on historical (closed) data that won't change + +**Example 2: Requires understanding context without keyword matching (Project Management MCP)** +```xml + + Locate the initiative focused on improving customer onboarding that was completed in late 2023. The project lead created a retrospective document after completion. What was the lead's role title at that time? + Product Manager + +``` + +This question is good because: +- Doesn't use specific project name ("initiative focused on improving customer onboarding") +- Requires finding completed projects from specific timeframe +- Needs to identify the project lead and their role +- Requires understanding context from retrospective documents +- Answer is human-readable and stable +- Based on completed work (won't change) + +**Example 3: Complex aggregation requiring multiple steps (Issue Tracker MCP)** +```xml + + Among all bugs reported in January 2024 that were marked as critical priority, which assignee resolved the highest percentage of their assigned bugs within 48 hours? Provide the assignee's username. + alex_eng + +``` + +This question is good because: +- Requires filtering bugs by date, priority, and status +- Needs to group by assignee and calculate resolution rates +- Requires understanding timestamps to determine 48-hour windows +- Tests pagination (potentially many bugs to process) +- Answer is a single username +- Based on historical data from specific time period + +**Example 4: Requires synthesis across multiple data types (CRM MCP)** +```xml + + Find the account that upgraded from the Starter to Enterprise plan in Q4 2023 and had the highest annual contract value. What industry does this account operate in? + Healthcare + +``` + +This question is good because: +- Requires understanding subscription tier changes +- Needs to identify upgrade events in specific timeframe +- Requires comparing contract values +- Must access account industry information +- Answer is simple and verifiable +- Based on completed historical transactions + +### Poor Questions + +**Example 1: Answer changes over time** +```xml + + How many open issues are currently assigned to the engineering team? + 47 + +``` + +This question is poor because: +- The answer will change as issues are created, closed, or reassigned +- Not based on stable/stationary data +- Relies on "current state" which is dynamic + +**Example 2: Too easy with keyword search** +```xml + + Find the pull request with title "Add authentication feature" and tell me who created it. + developer123 + +``` + +This question is poor because: +- Can be solved with a straightforward keyword search for exact title +- Doesn't require deep exploration or understanding +- No synthesis or analysis needed + +**Example 3: Ambiguous answer format** +```xml + + List all the repositories that have Python as their primary language. + repo1, repo2, repo3, data-pipeline, ml-tools + +``` + +This question is poor because: +- Answer is a list that could be returned in any order +- Difficult to verify with direct string comparison +- LLM might format differently (JSON array, comma-separated, newline-separated) +- Better to ask for a specific aggregate (count) or superlative (most stars) + +## Verification Process + +After creating evaluations: + +1. **Examine the XML file** to understand the schema +2. **Load each task instruction** and in parallel using the MCP server and tools, identify the correct answer by attempting to solve the task YOURSELF +3. **Flag any operations** that require WRITE or DESTRUCTIVE operations +4. **Accumulate all CORRECT answers** and replace any incorrect answers in the document +5. **Remove any ``** that require WRITE or DESTRUCTIVE operations + +Remember to parallelize solving tasks to avoid running out of context, then accumulate all answers and make changes to the file at the end. + +## Tips for Creating Quality Evaluations + +1. **Think Hard and Plan Ahead** before generating tasks +2. **Parallelize Where Opportunity Arises** to speed up the process and manage context +3. **Focus on Realistic Use Cases** that humans would actually want to accomplish +4. **Create Challenging Questions** that test the limits of the MCP server's capabilities +5. **Ensure Stability** by using historical data and closed concepts +6. **Verify Answers** by solving the questions yourself using the MCP server tools +7. **Iterate and Refine** based on what you learn during the process + +--- + +# Running Evaluations + +After creating your evaluation file, you can use the provided evaluation harness to test your MCP server. + +## Setup + +1. **Install Dependencies** + + ```bash + pip install -r scripts/requirements.txt + ``` + + Or install manually: + ```bash + pip install anthropic mcp + ``` + +2. **Set API Key** + + ```bash + export ANTHROPIC_API_KEY=your_api_key_here + ``` + +## Evaluation File Format + +Evaluation files use XML format with `` elements: + +```xml + + + Find the project created in Q2 2024 with the highest number of completed tasks. What is the project name? + Website Redesign + + + Search for issues labeled as "bug" that were closed in March 2024. Which user closed the most issues? Provide their username. + sarah_dev + + +``` + +## Running Evaluations + +The evaluation script (`scripts/evaluation.py`) supports three transport types: + +**Important:** +- **stdio transport**: The evaluation script automatically launches and manages the MCP server process for you. Do not run the server manually. +- **sse/http transports**: You must start the MCP server separately before running the evaluation. The script connects to the already-running server at the specified URL. + +### 1. Local STDIO Server + +For locally-run MCP servers (script launches the server automatically): + +```bash +python scripts/evaluation.py \ + -t stdio \ + -c python \ + -a my_mcp_server.py \ + evaluation.xml +``` + +With environment variables: +```bash +python scripts/evaluation.py \ + -t stdio \ + -c python \ + -a my_mcp_server.py \ + -e API_KEY=abc123 \ + -e DEBUG=true \ + evaluation.xml +``` + +### 2. Server-Sent Events (SSE) + +For SSE-based MCP servers (you must start the server first): + +```bash +python scripts/evaluation.py \ + -t sse \ + -u https://example.com/mcp \ + -H "Authorization: Bearer token123" \ + -H "X-Custom-Header: value" \ + evaluation.xml +``` + +### 3. HTTP (Streamable HTTP) + +For HTTP-based MCP servers (you must start the server first): + +```bash +python scripts/evaluation.py \ + -t http \ + -u https://example.com/mcp \ + -H "Authorization: Bearer token123" \ + evaluation.xml +``` + +## Command-Line Options + +``` +usage: evaluation.py [-h] [-t {stdio,sse,http}] [-m MODEL] [-c COMMAND] + [-a ARGS [ARGS ...]] [-e ENV [ENV ...]] [-u URL] + [-H HEADERS [HEADERS ...]] [-o OUTPUT] + eval_file + +positional arguments: + eval_file Path to evaluation XML file + +optional arguments: + -h, --help Show help message + -t, --transport Transport type: stdio, sse, or http (default: stdio) + -m, --model Claude model to use (default: claude-3-7-sonnet-20250219) + -o, --output Output file for report (default: print to stdout) + +stdio options: + -c, --command Command to run MCP server (e.g., python, node) + -a, --args Arguments for the command (e.g., server.py) + -e, --env Environment variables in KEY=VALUE format + +sse/http options: + -u, --url MCP server URL + -H, --header HTTP headers in 'Key: Value' format +``` + +## Output + +The evaluation script generates a detailed report including: + +- **Summary Statistics**: + - Accuracy (correct/total) + - Average task duration + - Average tool calls per task + - Total tool calls + +- **Per-Task Results**: + - Prompt and expected response + - Actual response from the agent + - Whether the answer was correct (✅/❌) + - Duration and tool call details + - Agent's summary of its approach + - Agent's feedback on the tools + +### Save Report to File + +```bash +python scripts/evaluation.py \ + -t stdio \ + -c python \ + -a my_server.py \ + -o evaluation_report.md \ + evaluation.xml +``` + +## Complete Example Workflow + +Here's a complete example of creating and running an evaluation: + +1. **Create your evaluation file** (`my_evaluation.xml`): + +```xml + + + Find the user who created the most issues in January 2024. What is their username? + alice_developer + + + Among all pull requests merged in Q1 2024, which repository had the highest number? Provide the repository name. + backend-api + + + Find the project that was completed in December 2023 and had the longest duration from start to finish. How many days did it take? + 127 + + +``` + +2. **Install dependencies**: + +```bash +pip install -r scripts/requirements.txt +export ANTHROPIC_API_KEY=your_api_key +``` + +3. **Run evaluation**: + +```bash +python scripts/evaluation.py \ + -t stdio \ + -c python \ + -a github_mcp_server.py \ + -e GITHUB_TOKEN=ghp_xxx \ + -o github_eval_report.md \ + my_evaluation.xml +``` + +4. **Review the report** in `github_eval_report.md` to: + - See which questions passed/failed + - Read the agent's feedback on your tools + - Identify areas for improvement + - Iterate on your MCP server design + +## Troubleshooting + +### Connection Errors + +If you get connection errors: +- **STDIO**: Verify the command and arguments are correct +- **SSE/HTTP**: Check the URL is accessible and headers are correct +- Ensure any required API keys are set in environment variables or headers + +### Low Accuracy + +If many evaluations fail: +- Review the agent's feedback for each task +- Check if tool descriptions are clear and comprehensive +- Verify input parameters are well-documented +- Consider whether tools return too much or too little data +- Ensure error messages are actionable + +### Timeout Issues + +If tasks are timing out: +- Use a more capable model (e.g., `claude-3-7-sonnet-20250219`) +- Check if tools are returning too much data +- Verify pagination is working correctly +- Consider simplifying complex questions \ No newline at end of file diff --git a/.claude/skills/mcp-builder/reference/mcp_best_practices.md b/.claude/skills/mcp-builder/reference/mcp_best_practices.md new file mode 100644 index 0000000..b9d343c --- /dev/null +++ b/.claude/skills/mcp-builder/reference/mcp_best_practices.md @@ -0,0 +1,249 @@ +# MCP Server Best Practices + +## Quick Reference + +### Server Naming +- **Python**: `{service}_mcp` (e.g., `slack_mcp`) +- **Node/TypeScript**: `{service}-mcp-server` (e.g., `slack-mcp-server`) + +### Tool Naming +- Use snake_case with service prefix +- Format: `{service}_{action}_{resource}` +- Example: `slack_send_message`, `github_create_issue` + +### Response Formats +- Support both JSON and Markdown formats +- JSON for programmatic processing +- Markdown for human readability + +### Pagination +- Always respect `limit` parameter +- Return `has_more`, `next_offset`, `total_count` +- Default to 20-50 items + +### Transport +- **Streamable HTTP**: For remote servers, multi-client scenarios +- **stdio**: For local integrations, command-line tools +- Avoid SSE (deprecated in favor of streamable HTTP) + +--- + +## Server Naming Conventions + +Follow these standardized naming patterns: + +**Python**: Use format `{service}_mcp` (lowercase with underscores) +- Examples: `slack_mcp`, `github_mcp`, `jira_mcp` + +**Node/TypeScript**: Use format `{service}-mcp-server` (lowercase with hyphens) +- Examples: `slack-mcp-server`, `github-mcp-server`, `jira-mcp-server` + +The name should be general, descriptive of the service being integrated, easy to infer from the task description, and without version numbers. + +--- + +## Tool Naming and Design + +### Tool Naming + +1. **Use snake_case**: `search_users`, `create_project`, `get_channel_info` +2. **Include service prefix**: Anticipate that your MCP server may be used alongside other MCP servers + - Use `slack_send_message` instead of just `send_message` + - Use `github_create_issue` instead of just `create_issue` +3. **Be action-oriented**: Start with verbs (get, list, search, create, etc.) +4. **Be specific**: Avoid generic names that could conflict with other servers + +### Tool Design + +- Tool descriptions must narrowly and unambiguously describe functionality +- Descriptions must precisely match actual functionality +- Provide tool annotations (readOnlyHint, destructiveHint, idempotentHint, openWorldHint) +- Keep tool operations focused and atomic + +--- + +## Response Formats + +All tools that return data should support multiple formats: + +### JSON Format (`response_format="json"`) +- Machine-readable structured data +- Include all available fields and metadata +- Consistent field names and types +- Use for programmatic processing + +### Markdown Format (`response_format="markdown"`, typically default) +- Human-readable formatted text +- Use headers, lists, and formatting for clarity +- Convert timestamps to human-readable format +- Show display names with IDs in parentheses +- Omit verbose metadata + +--- + +## Pagination + +For tools that list resources: + +- **Always respect the `limit` parameter** +- **Implement pagination**: Use `offset` or cursor-based pagination +- **Return pagination metadata**: Include `has_more`, `next_offset`/`next_cursor`, `total_count` +- **Never load all results into memory**: Especially important for large datasets +- **Default to reasonable limits**: 20-50 items is typical + +Example pagination response: +```json +{ + "total": 150, + "count": 20, + "offset": 0, + "items": [...], + "has_more": true, + "next_offset": 20 +} +``` + +--- + +## Transport Options + +### Streamable HTTP + +**Best for**: Remote servers, web services, multi-client scenarios + +**Characteristics**: +- Bidirectional communication over HTTP +- Supports multiple simultaneous clients +- Can be deployed as a web service +- Enables server-to-client notifications + +**Use when**: +- Serving multiple clients simultaneously +- Deploying as a cloud service +- Integration with web applications + +### stdio + +**Best for**: Local integrations, command-line tools + +**Characteristics**: +- Standard input/output stream communication +- Simple setup, no network configuration needed +- Runs as a subprocess of the client + +**Use when**: +- Building tools for local development environments +- Integrating with desktop applications +- Single-user, single-session scenarios + +**Note**: stdio servers should NOT log to stdout (use stderr for logging) + +### Transport Selection + +| Criterion | stdio | Streamable HTTP | +|-----------|-------|-----------------| +| **Deployment** | Local | Remote | +| **Clients** | Single | Multiple | +| **Complexity** | Low | Medium | +| **Real-time** | No | Yes | + +--- + +## Security Best Practices + +### Authentication and Authorization + +**OAuth 2.1**: +- Use secure OAuth 2.1 with certificates from recognized authorities +- Validate access tokens before processing requests +- Only accept tokens specifically intended for your server + +**API Keys**: +- Store API keys in environment variables, never in code +- Validate keys on server startup +- Provide clear error messages when authentication fails + +### Input Validation + +- Sanitize file paths to prevent directory traversal +- Validate URLs and external identifiers +- Check parameter sizes and ranges +- Prevent command injection in system calls +- Use schema validation (Pydantic/Zod) for all inputs + +### Error Handling + +- Don't expose internal errors to clients +- Log security-relevant errors server-side +- Provide helpful but not revealing error messages +- Clean up resources after errors + +### DNS Rebinding Protection + +For streamable HTTP servers running locally: +- Enable DNS rebinding protection +- Validate the `Origin` header on all incoming connections +- Bind to `127.0.0.1` rather than `0.0.0.0` + +--- + +## Tool Annotations + +Provide annotations to help clients understand tool behavior: + +| Annotation | Type | Default | Description | +|-----------|------|---------|-------------| +| `readOnlyHint` | boolean | false | Tool does not modify its environment | +| `destructiveHint` | boolean | true | Tool may perform destructive updates | +| `idempotentHint` | boolean | false | Repeated calls with same args have no additional effect | +| `openWorldHint` | boolean | true | Tool interacts with external entities | + +**Important**: Annotations are hints, not security guarantees. Clients should not make security-critical decisions based solely on annotations. + +--- + +## Error Handling + +- Use standard JSON-RPC error codes +- Report tool errors within result objects (not protocol-level errors) +- Provide helpful, specific error messages with suggested next steps +- Don't expose internal implementation details +- Clean up resources properly on errors + +Example error handling: +```typescript +try { + const result = performOperation(); + return { content: [{ type: "text", text: result }] }; +} catch (error) { + return { + isError: true, + content: [{ + type: "text", + text: `Error: ${error.message}. Try using filter='active_only' to reduce results.` + }] + }; +} +``` + +--- + +## Testing Requirements + +Comprehensive testing should cover: + +- **Functional testing**: Verify correct execution with valid/invalid inputs +- **Integration testing**: Test interaction with external systems +- **Security testing**: Validate auth, input sanitization, rate limiting +- **Performance testing**: Check behavior under load, timeouts +- **Error handling**: Ensure proper error reporting and cleanup + +--- + +## Documentation Requirements + +- Provide clear documentation of all tools and capabilities +- Include working examples (at least 3 per major feature) +- Document security considerations +- Specify required permissions and access levels +- Document rate limits and performance characteristics diff --git a/.claude/skills/mcp-builder/reference/node_mcp_server.md b/.claude/skills/mcp-builder/reference/node_mcp_server.md new file mode 100644 index 0000000..f6e5df9 --- /dev/null +++ b/.claude/skills/mcp-builder/reference/node_mcp_server.md @@ -0,0 +1,970 @@ +# Node/TypeScript MCP Server Implementation Guide + +## Overview + +This document provides Node/TypeScript-specific best practices and examples for implementing MCP servers using the MCP TypeScript SDK. It covers project structure, server setup, tool registration patterns, input validation with Zod, error handling, and complete working examples. + +--- + +## Quick Reference + +### Key Imports +```typescript +import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; +import { StreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/streamableHttp.js"; +import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; +import express from "express"; +import { z } from "zod"; +``` + +### Server Initialization +```typescript +const server = new McpServer({ + name: "service-mcp-server", + version: "1.0.0" +}); +``` + +### Tool Registration Pattern +```typescript +server.registerTool( + "tool_name", + { + title: "Tool Display Name", + description: "What the tool does", + inputSchema: { param: z.string() }, + outputSchema: { result: z.string() } + }, + async ({ param }) => { + const output = { result: `Processed: ${param}` }; + return { + content: [{ type: "text", text: JSON.stringify(output) }], + structuredContent: output // Modern pattern for structured data + }; + } +); +``` + +--- + +## MCP TypeScript SDK + +The official MCP TypeScript SDK provides: +- `McpServer` class for server initialization +- `registerTool` method for tool registration +- Zod schema integration for runtime input validation +- Type-safe tool handler implementations + +**IMPORTANT - Use Modern APIs Only:** +- **DO use**: `server.registerTool()`, `server.registerResource()`, `server.registerPrompt()` +- **DO NOT use**: Old deprecated APIs such as `server.tool()`, `server.setRequestHandler(ListToolsRequestSchema, ...)`, or manual handler registration +- The `register*` methods provide better type safety, automatic schema handling, and are the recommended approach + +See the MCP SDK documentation in the references for complete details. + +## Server Naming Convention + +Node/TypeScript MCP servers must follow this naming pattern: +- **Format**: `{service}-mcp-server` (lowercase with hyphens) +- **Examples**: `github-mcp-server`, `jira-mcp-server`, `stripe-mcp-server` + +The name should be: +- General (not tied to specific features) +- Descriptive of the service/API being integrated +- Easy to infer from the task description +- Without version numbers or dates + +## Project Structure + +Create the following structure for Node/TypeScript MCP servers: + +``` +{service}-mcp-server/ +├── package.json +├── tsconfig.json +├── README.md +├── src/ +│ ├── index.ts # Main entry point with McpServer initialization +│ ├── types.ts # TypeScript type definitions and interfaces +│ ├── tools/ # Tool implementations (one file per domain) +│ ├── services/ # API clients and shared utilities +│ ├── schemas/ # Zod validation schemas +│ └── constants.ts # Shared constants (API_URL, CHARACTER_LIMIT, etc.) +└── dist/ # Built JavaScript files (entry point: dist/index.js) +``` + +## Tool Implementation + +### Tool Naming + +Use snake_case for tool names (e.g., "search_users", "create_project", "get_channel_info") with clear, action-oriented names. + +**Avoid Naming Conflicts**: Include the service context to prevent overlaps: +- Use "slack_send_message" instead of just "send_message" +- Use "github_create_issue" instead of just "create_issue" +- Use "asana_list_tasks" instead of just "list_tasks" + +### Tool Structure + +Tools are registered using the `registerTool` method with the following requirements: +- Use Zod schemas for runtime input validation and type safety +- The `description` field must be explicitly provided - JSDoc comments are NOT automatically extracted +- Explicitly provide `title`, `description`, `inputSchema`, and `annotations` +- The `inputSchema` must be a Zod schema object (not a JSON schema) +- Type all parameters and return values explicitly + +```typescript +import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; +import { z } from "zod"; + +const server = new McpServer({ + name: "example-mcp", + version: "1.0.0" +}); + +// Zod schema for input validation +const UserSearchInputSchema = z.object({ + query: z.string() + .min(2, "Query must be at least 2 characters") + .max(200, "Query must not exceed 200 characters") + .describe("Search string to match against names/emails"), + limit: z.number() + .int() + .min(1) + .max(100) + .default(20) + .describe("Maximum results to return"), + offset: z.number() + .int() + .min(0) + .default(0) + .describe("Number of results to skip for pagination"), + response_format: z.nativeEnum(ResponseFormat) + .default(ResponseFormat.MARKDOWN) + .describe("Output format: 'markdown' for human-readable or 'json' for machine-readable") +}).strict(); + +// Type definition from Zod schema +type UserSearchInput = z.infer; + +server.registerTool( + "example_search_users", + { + title: "Search Example Users", + description: `Search for users in the Example system by name, email, or team. + +This tool searches across all user profiles in the Example platform, supporting partial matches and various search filters. It does NOT create or modify users, only searches existing ones. + +Args: + - query (string): Search string to match against names/emails + - limit (number): Maximum results to return, between 1-100 (default: 20) + - offset (number): Number of results to skip for pagination (default: 0) + - response_format ('markdown' | 'json'): Output format (default: 'markdown') + +Returns: + For JSON format: Structured data with schema: + { + "total": number, // Total number of matches found + "count": number, // Number of results in this response + "offset": number, // Current pagination offset + "users": [ + { + "id": string, // User ID (e.g., "U123456789") + "name": string, // Full name (e.g., "John Doe") + "email": string, // Email address + "team": string, // Team name (optional) + "active": boolean // Whether user is active + } + ], + "has_more": boolean, // Whether more results are available + "next_offset": number // Offset for next page (if has_more is true) + } + +Examples: + - Use when: "Find all marketing team members" -> params with query="team:marketing" + - Use when: "Search for John's account" -> params with query="john" + - Don't use when: You need to create a user (use example_create_user instead) + +Error Handling: + - Returns "Error: Rate limit exceeded" if too many requests (429 status) + - Returns "No users found matching ''" if search returns empty`, + inputSchema: UserSearchInputSchema, + annotations: { + readOnlyHint: true, + destructiveHint: false, + idempotentHint: true, + openWorldHint: true + } + }, + async (params: UserSearchInput) => { + try { + // Input validation is handled by Zod schema + // Make API request using validated parameters + const data = await makeApiRequest( + "users/search", + "GET", + undefined, + { + q: params.query, + limit: params.limit, + offset: params.offset + } + ); + + const users = data.users || []; + const total = data.total || 0; + + if (!users.length) { + return { + content: [{ + type: "text", + text: `No users found matching '${params.query}'` + }] + }; + } + + // Prepare structured output + const output = { + total, + count: users.length, + offset: params.offset, + users: users.map((user: any) => ({ + id: user.id, + name: user.name, + email: user.email, + ...(user.team ? { team: user.team } : {}), + active: user.active ?? true + })), + has_more: total > params.offset + users.length, + ...(total > params.offset + users.length ? { + next_offset: params.offset + users.length + } : {}) + }; + + // Format text representation based on requested format + let textContent: string; + if (params.response_format === ResponseFormat.MARKDOWN) { + const lines = [`# User Search Results: '${params.query}'`, "", + `Found ${total} users (showing ${users.length})`, ""]; + for (const user of users) { + lines.push(`## ${user.name} (${user.id})`); + lines.push(`- **Email**: ${user.email}`); + if (user.team) lines.push(`- **Team**: ${user.team}`); + lines.push(""); + } + textContent = lines.join("\n"); + } else { + textContent = JSON.stringify(output, null, 2); + } + + return { + content: [{ type: "text", text: textContent }], + structuredContent: output // Modern pattern for structured data + }; + } catch (error) { + return { + content: [{ + type: "text", + text: handleApiError(error) + }] + }; + } + } +); +``` + +## Zod Schemas for Input Validation + +Zod provides runtime type validation: + +```typescript +import { z } from "zod"; + +// Basic schema with validation +const CreateUserSchema = z.object({ + name: z.string() + .min(1, "Name is required") + .max(100, "Name must not exceed 100 characters"), + email: z.string() + .email("Invalid email format"), + age: z.number() + .int("Age must be a whole number") + .min(0, "Age cannot be negative") + .max(150, "Age cannot be greater than 150") +}).strict(); // Use .strict() to forbid extra fields + +// Enums +enum ResponseFormat { + MARKDOWN = "markdown", + JSON = "json" +} + +const SearchSchema = z.object({ + response_format: z.nativeEnum(ResponseFormat) + .default(ResponseFormat.MARKDOWN) + .describe("Output format") +}); + +// Optional fields with defaults +const PaginationSchema = z.object({ + limit: z.number() + .int() + .min(1) + .max(100) + .default(20) + .describe("Maximum results to return"), + offset: z.number() + .int() + .min(0) + .default(0) + .describe("Number of results to skip") +}); +``` + +## Response Format Options + +Support multiple output formats for flexibility: + +```typescript +enum ResponseFormat { + MARKDOWN = "markdown", + JSON = "json" +} + +const inputSchema = z.object({ + query: z.string(), + response_format: z.nativeEnum(ResponseFormat) + .default(ResponseFormat.MARKDOWN) + .describe("Output format: 'markdown' for human-readable or 'json' for machine-readable") +}); +``` + +**Markdown format**: +- Use headers, lists, and formatting for clarity +- Convert timestamps to human-readable format +- Show display names with IDs in parentheses +- Omit verbose metadata +- Group related information logically + +**JSON format**: +- Return complete, structured data suitable for programmatic processing +- Include all available fields and metadata +- Use consistent field names and types + +## Pagination Implementation + +For tools that list resources: + +```typescript +const ListSchema = z.object({ + limit: z.number().int().min(1).max(100).default(20), + offset: z.number().int().min(0).default(0) +}); + +async function listItems(params: z.infer) { + const data = await apiRequest(params.limit, params.offset); + + const response = { + total: data.total, + count: data.items.length, + offset: params.offset, + items: data.items, + has_more: data.total > params.offset + data.items.length, + next_offset: data.total > params.offset + data.items.length + ? params.offset + data.items.length + : undefined + }; + + return JSON.stringify(response, null, 2); +} +``` + +## Character Limits and Truncation + +Add a CHARACTER_LIMIT constant to prevent overwhelming responses: + +```typescript +// At module level in constants.ts +export const CHARACTER_LIMIT = 25000; // Maximum response size in characters + +async function searchTool(params: SearchInput) { + let result = generateResponse(data); + + // Check character limit and truncate if needed + if (result.length > CHARACTER_LIMIT) { + const truncatedData = data.slice(0, Math.max(1, data.length / 2)); + response.data = truncatedData; + response.truncated = true; + response.truncation_message = + `Response truncated from ${data.length} to ${truncatedData.length} items. ` + + `Use 'offset' parameter or add filters to see more results.`; + result = JSON.stringify(response, null, 2); + } + + return result; +} +``` + +## Error Handling + +Provide clear, actionable error messages: + +```typescript +import axios, { AxiosError } from "axios"; + +function handleApiError(error: unknown): string { + if (error instanceof AxiosError) { + if (error.response) { + switch (error.response.status) { + case 404: + return "Error: Resource not found. Please check the ID is correct."; + case 403: + return "Error: Permission denied. You don't have access to this resource."; + case 429: + return "Error: Rate limit exceeded. Please wait before making more requests."; + default: + return `Error: API request failed with status ${error.response.status}`; + } + } else if (error.code === "ECONNABORTED") { + return "Error: Request timed out. Please try again."; + } + } + return `Error: Unexpected error occurred: ${error instanceof Error ? error.message : String(error)}`; +} +``` + +## Shared Utilities + +Extract common functionality into reusable functions: + +```typescript +// Shared API request function +async function makeApiRequest( + endpoint: string, + method: "GET" | "POST" | "PUT" | "DELETE" = "GET", + data?: any, + params?: any +): Promise { + try { + const response = await axios({ + method, + url: `${API_BASE_URL}/${endpoint}`, + data, + params, + timeout: 30000, + headers: { + "Content-Type": "application/json", + "Accept": "application/json" + } + }); + return response.data; + } catch (error) { + throw error; + } +} +``` + +## Async/Await Best Practices + +Always use async/await for network requests and I/O operations: + +```typescript +// Good: Async network request +async function fetchData(resourceId: string): Promise { + const response = await axios.get(`${API_URL}/resource/${resourceId}`); + return response.data; +} + +// Bad: Promise chains +function fetchData(resourceId: string): Promise { + return axios.get(`${API_URL}/resource/${resourceId}`) + .then(response => response.data); // Harder to read and maintain +} +``` + +## TypeScript Best Practices + +1. **Use Strict TypeScript**: Enable strict mode in tsconfig.json +2. **Define Interfaces**: Create clear interface definitions for all data structures +3. **Avoid `any`**: Use proper types or `unknown` instead of `any` +4. **Zod for Runtime Validation**: Use Zod schemas to validate external data +5. **Type Guards**: Create type guard functions for complex type checking +6. **Error Handling**: Always use try-catch with proper error type checking +7. **Null Safety**: Use optional chaining (`?.`) and nullish coalescing (`??`) + +```typescript +// Good: Type-safe with Zod and interfaces +interface UserResponse { + id: string; + name: string; + email: string; + team?: string; + active: boolean; +} + +const UserSchema = z.object({ + id: z.string(), + name: z.string(), + email: z.string().email(), + team: z.string().optional(), + active: z.boolean() +}); + +type User = z.infer; + +async function getUser(id: string): Promise { + const data = await apiCall(`/users/${id}`); + return UserSchema.parse(data); // Runtime validation +} + +// Bad: Using any +async function getUser(id: string): Promise { + return await apiCall(`/users/${id}`); // No type safety +} +``` + +## Package Configuration + +### package.json + +```json +{ + "name": "{service}-mcp-server", + "version": "1.0.0", + "description": "MCP server for {Service} API integration", + "type": "module", + "main": "dist/index.js", + "scripts": { + "start": "node dist/index.js", + "dev": "tsx watch src/index.ts", + "build": "tsc", + "clean": "rm -rf dist" + }, + "engines": { + "node": ">=18" + }, + "dependencies": { + "@modelcontextprotocol/sdk": "^1.6.1", + "axios": "^1.7.9", + "zod": "^3.23.8" + }, + "devDependencies": { + "@types/node": "^22.10.0", + "tsx": "^4.19.2", + "typescript": "^5.7.2" + } +} +``` + +### tsconfig.json + +```json +{ + "compilerOptions": { + "target": "ES2022", + "module": "Node16", + "moduleResolution": "Node16", + "lib": ["ES2022"], + "outDir": "./dist", + "rootDir": "./src", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "declaration": true, + "declarationMap": true, + "sourceMap": true, + "allowSyntheticDefaultImports": true + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "dist"] +} +``` + +## Complete Example + +```typescript +#!/usr/bin/env node +/** + * MCP Server for Example Service. + * + * This server provides tools to interact with Example API, including user search, + * project management, and data export capabilities. + */ + +import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; +import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; +import { z } from "zod"; +import axios, { AxiosError } from "axios"; + +// Constants +const API_BASE_URL = "https://api.example.com/v1"; +const CHARACTER_LIMIT = 25000; + +// Enums +enum ResponseFormat { + MARKDOWN = "markdown", + JSON = "json" +} + +// Zod schemas +const UserSearchInputSchema = z.object({ + query: z.string() + .min(2, "Query must be at least 2 characters") + .max(200, "Query must not exceed 200 characters") + .describe("Search string to match against names/emails"), + limit: z.number() + .int() + .min(1) + .max(100) + .default(20) + .describe("Maximum results to return"), + offset: z.number() + .int() + .min(0) + .default(0) + .describe("Number of results to skip for pagination"), + response_format: z.nativeEnum(ResponseFormat) + .default(ResponseFormat.MARKDOWN) + .describe("Output format: 'markdown' for human-readable or 'json' for machine-readable") +}).strict(); + +type UserSearchInput = z.infer; + +// Shared utility functions +async function makeApiRequest( + endpoint: string, + method: "GET" | "POST" | "PUT" | "DELETE" = "GET", + data?: any, + params?: any +): Promise { + try { + const response = await axios({ + method, + url: `${API_BASE_URL}/${endpoint}`, + data, + params, + timeout: 30000, + headers: { + "Content-Type": "application/json", + "Accept": "application/json" + } + }); + return response.data; + } catch (error) { + throw error; + } +} + +function handleApiError(error: unknown): string { + if (error instanceof AxiosError) { + if (error.response) { + switch (error.response.status) { + case 404: + return "Error: Resource not found. Please check the ID is correct."; + case 403: + return "Error: Permission denied. You don't have access to this resource."; + case 429: + return "Error: Rate limit exceeded. Please wait before making more requests."; + default: + return `Error: API request failed with status ${error.response.status}`; + } + } else if (error.code === "ECONNABORTED") { + return "Error: Request timed out. Please try again."; + } + } + return `Error: Unexpected error occurred: ${error instanceof Error ? error.message : String(error)}`; +} + +// Create MCP server instance +const server = new McpServer({ + name: "example-mcp", + version: "1.0.0" +}); + +// Register tools +server.registerTool( + "example_search_users", + { + title: "Search Example Users", + description: `[Full description as shown above]`, + inputSchema: UserSearchInputSchema, + annotations: { + readOnlyHint: true, + destructiveHint: false, + idempotentHint: true, + openWorldHint: true + } + }, + async (params: UserSearchInput) => { + // Implementation as shown above + } +); + +// Main function +// For stdio (local): +async function runStdio() { + if (!process.env.EXAMPLE_API_KEY) { + console.error("ERROR: EXAMPLE_API_KEY environment variable is required"); + process.exit(1); + } + + const transport = new StdioServerTransport(); + await server.connect(transport); + console.error("MCP server running via stdio"); +} + +// For streamable HTTP (remote): +async function runHTTP() { + if (!process.env.EXAMPLE_API_KEY) { + console.error("ERROR: EXAMPLE_API_KEY environment variable is required"); + process.exit(1); + } + + const app = express(); + app.use(express.json()); + + app.post('/mcp', async (req, res) => { + const transport = new StreamableHTTPServerTransport({ + sessionIdGenerator: undefined, + enableJsonResponse: true + }); + res.on('close', () => transport.close()); + await server.connect(transport); + await transport.handleRequest(req, res, req.body); + }); + + const port = parseInt(process.env.PORT || '3000'); + app.listen(port, () => { + console.error(`MCP server running on http://localhost:${port}/mcp`); + }); +} + +// Choose transport based on environment +const transport = process.env.TRANSPORT || 'stdio'; +if (transport === 'http') { + runHTTP().catch(error => { + console.error("Server error:", error); + process.exit(1); + }); +} else { + runStdio().catch(error => { + console.error("Server error:", error); + process.exit(1); + }); +} +``` + +--- + +## Advanced MCP Features + +### Resource Registration + +Expose data as resources for efficient, URI-based access: + +```typescript +import { ResourceTemplate } from "@modelcontextprotocol/sdk/types.js"; + +// Register a resource with URI template +server.registerResource( + { + uri: "file://documents/{name}", + name: "Document Resource", + description: "Access documents by name", + mimeType: "text/plain" + }, + async (uri: string) => { + // Extract parameter from URI + const match = uri.match(/^file:\/\/documents\/(.+)$/); + if (!match) { + throw new Error("Invalid URI format"); + } + + const documentName = match[1]; + const content = await loadDocument(documentName); + + return { + contents: [{ + uri, + mimeType: "text/plain", + text: content + }] + }; + } +); + +// List available resources dynamically +server.registerResourceList(async () => { + const documents = await getAvailableDocuments(); + return { + resources: documents.map(doc => ({ + uri: `file://documents/${doc.name}`, + name: doc.name, + mimeType: "text/plain", + description: doc.description + })) + }; +}); +``` + +**When to use Resources vs Tools:** +- **Resources**: For data access with simple URI-based parameters +- **Tools**: For complex operations requiring validation and business logic +- **Resources**: When data is relatively static or template-based +- **Tools**: When operations have side effects or complex workflows + +### Transport Options + +The TypeScript SDK supports two main transport mechanisms: + +#### Streamable HTTP (Recommended for Remote Servers) + +```typescript +import { StreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/streamableHttp.js"; +import express from "express"; + +const app = express(); +app.use(express.json()); + +app.post('/mcp', async (req, res) => { + // Create new transport for each request (stateless, prevents request ID collisions) + const transport = new StreamableHTTPServerTransport({ + sessionIdGenerator: undefined, + enableJsonResponse: true + }); + + res.on('close', () => transport.close()); + + await server.connect(transport); + await transport.handleRequest(req, res, req.body); +}); + +app.listen(3000); +``` + +#### stdio (For Local Integrations) + +```typescript +import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; + +const transport = new StdioServerTransport(); +await server.connect(transport); +``` + +**Transport selection:** +- **Streamable HTTP**: Web services, remote access, multiple clients +- **stdio**: Command-line tools, local development, subprocess integration + +### Notification Support + +Notify clients when server state changes: + +```typescript +// Notify when tools list changes +server.notification({ + method: "notifications/tools/list_changed" +}); + +// Notify when resources change +server.notification({ + method: "notifications/resources/list_changed" +}); +``` + +Use notifications sparingly - only when server capabilities genuinely change. + +--- + +## Code Best Practices + +### Code Composability and Reusability + +Your implementation MUST prioritize composability and code reuse: + +1. **Extract Common Functionality**: + - Create reusable helper functions for operations used across multiple tools + - Build shared API clients for HTTP requests instead of duplicating code + - Centralize error handling logic in utility functions + - Extract business logic into dedicated functions that can be composed + - Extract shared markdown or JSON field selection & formatting functionality + +2. **Avoid Duplication**: + - NEVER copy-paste similar code between tools + - If you find yourself writing similar logic twice, extract it into a function + - Common operations like pagination, filtering, field selection, and formatting should be shared + - Authentication/authorization logic should be centralized + +## Building and Running + +Always build your TypeScript code before running: + +```bash +# Build the project +npm run build + +# Run the server +npm start + +# Development with auto-reload +npm run dev +``` + +Always ensure `npm run build` completes successfully before considering the implementation complete. + +## Quality Checklist + +Before finalizing your Node/TypeScript MCP server implementation, ensure: + +### Strategic Design +- [ ] Tools enable complete workflows, not just API endpoint wrappers +- [ ] Tool names reflect natural task subdivisions +- [ ] Response formats optimize for agent context efficiency +- [ ] Human-readable identifiers used where appropriate +- [ ] Error messages guide agents toward correct usage + +### Implementation Quality +- [ ] FOCUSED IMPLEMENTATION: Most important and valuable tools implemented +- [ ] All tools registered using `registerTool` with complete configuration +- [ ] All tools include `title`, `description`, `inputSchema`, and `annotations` +- [ ] Annotations correctly set (readOnlyHint, destructiveHint, idempotentHint, openWorldHint) +- [ ] All tools use Zod schemas for runtime input validation with `.strict()` enforcement +- [ ] All Zod schemas have proper constraints and descriptive error messages +- [ ] All tools have comprehensive descriptions with explicit input/output types +- [ ] Descriptions include return value examples and complete schema documentation +- [ ] Error messages are clear, actionable, and educational + +### TypeScript Quality +- [ ] TypeScript interfaces are defined for all data structures +- [ ] Strict TypeScript is enabled in tsconfig.json +- [ ] No use of `any` type - use `unknown` or proper types instead +- [ ] All async functions have explicit Promise return types +- [ ] Error handling uses proper type guards (e.g., `axios.isAxiosError`, `z.ZodError`) + +### Advanced Features (where applicable) +- [ ] Resources registered for appropriate data endpoints +- [ ] Appropriate transport configured (stdio or streamable HTTP) +- [ ] Notifications implemented for dynamic server capabilities +- [ ] Type-safe with SDK interfaces + +### Project Configuration +- [ ] Package.json includes all necessary dependencies +- [ ] Build script produces working JavaScript in dist/ directory +- [ ] Main entry point is properly configured as dist/index.js +- [ ] Server name follows format: `{service}-mcp-server` +- [ ] tsconfig.json properly configured with strict mode + +### Code Quality +- [ ] Pagination is properly implemented where applicable +- [ ] Large responses check CHARACTER_LIMIT constant and truncate with clear messages +- [ ] Filtering options are provided for potentially large result sets +- [ ] All network operations handle timeouts and connection errors gracefully +- [ ] Common functionality is extracted into reusable functions +- [ ] Return types are consistent across similar operations + +### Testing and Build +- [ ] `npm run build` completes successfully without errors +- [ ] dist/index.js created and executable +- [ ] Server runs: `node dist/index.js --help` +- [ ] All imports resolve correctly +- [ ] Sample tool calls work as expected \ No newline at end of file diff --git a/.claude/skills/mcp-builder/reference/python_mcp_server.md b/.claude/skills/mcp-builder/reference/python_mcp_server.md new file mode 100644 index 0000000..cf7ec99 --- /dev/null +++ b/.claude/skills/mcp-builder/reference/python_mcp_server.md @@ -0,0 +1,719 @@ +# Python MCP Server Implementation Guide + +## Overview + +This document provides Python-specific best practices and examples for implementing MCP servers using the MCP Python SDK. It covers server setup, tool registration patterns, input validation with Pydantic, error handling, and complete working examples. + +--- + +## Quick Reference + +### Key Imports +```python +from mcp.server.fastmcp import FastMCP +from pydantic import BaseModel, Field, field_validator, ConfigDict +from typing import Optional, List, Dict, Any +from enum import Enum +import httpx +``` + +### Server Initialization +```python +mcp = FastMCP("service_mcp") +``` + +### Tool Registration Pattern +```python +@mcp.tool(name="tool_name", annotations={...}) +async def tool_function(params: InputModel) -> str: + # Implementation + pass +``` + +--- + +## MCP Python SDK and FastMCP + +The official MCP Python SDK provides FastMCP, a high-level framework for building MCP servers. It provides: +- Automatic description and inputSchema generation from function signatures and docstrings +- Pydantic model integration for input validation +- Decorator-based tool registration with `@mcp.tool` + +**For complete SDK documentation, use WebFetch to load:** +`https://raw.githubusercontent.com/modelcontextprotocol/python-sdk/main/README.md` + +## Server Naming Convention + +Python MCP servers must follow this naming pattern: +- **Format**: `{service}_mcp` (lowercase with underscores) +- **Examples**: `github_mcp`, `jira_mcp`, `stripe_mcp` + +The name should be: +- General (not tied to specific features) +- Descriptive of the service/API being integrated +- Easy to infer from the task description +- Without version numbers or dates + +## Tool Implementation + +### Tool Naming + +Use snake_case for tool names (e.g., "search_users", "create_project", "get_channel_info") with clear, action-oriented names. + +**Avoid Naming Conflicts**: Include the service context to prevent overlaps: +- Use "slack_send_message" instead of just "send_message" +- Use "github_create_issue" instead of just "create_issue" +- Use "asana_list_tasks" instead of just "list_tasks" + +### Tool Structure with FastMCP + +Tools are defined using the `@mcp.tool` decorator with Pydantic models for input validation: + +```python +from pydantic import BaseModel, Field, ConfigDict +from mcp.server.fastmcp import FastMCP + +# Initialize the MCP server +mcp = FastMCP("example_mcp") + +# Define Pydantic model for input validation +class ServiceToolInput(BaseModel): + '''Input model for service tool operation.''' + model_config = ConfigDict( + str_strip_whitespace=True, # Auto-strip whitespace from strings + validate_assignment=True, # Validate on assignment + extra='forbid' # Forbid extra fields + ) + + param1: str = Field(..., description="First parameter description (e.g., 'user123', 'project-abc')", min_length=1, max_length=100) + param2: Optional[int] = Field(default=None, description="Optional integer parameter with constraints", ge=0, le=1000) + tags: Optional[List[str]] = Field(default_factory=list, description="List of tags to apply", max_items=10) + +@mcp.tool( + name="service_tool_name", + annotations={ + "title": "Human-Readable Tool Title", + "readOnlyHint": True, # Tool does not modify environment + "destructiveHint": False, # Tool does not perform destructive operations + "idempotentHint": True, # Repeated calls have no additional effect + "openWorldHint": False # Tool does not interact with external entities + } +) +async def service_tool_name(params: ServiceToolInput) -> str: + '''Tool description automatically becomes the 'description' field. + + This tool performs a specific operation on the service. It validates all inputs + using the ServiceToolInput Pydantic model before processing. + + Args: + params (ServiceToolInput): Validated input parameters containing: + - param1 (str): First parameter description + - param2 (Optional[int]): Optional parameter with default + - tags (Optional[List[str]]): List of tags + + Returns: + str: JSON-formatted response containing operation results + ''' + # Implementation here + pass +``` + +## Pydantic v2 Key Features + +- Use `model_config` instead of nested `Config` class +- Use `field_validator` instead of deprecated `validator` +- Use `model_dump()` instead of deprecated `dict()` +- Validators require `@classmethod` decorator +- Type hints are required for validator methods + +```python +from pydantic import BaseModel, Field, field_validator, ConfigDict + +class CreateUserInput(BaseModel): + model_config = ConfigDict( + str_strip_whitespace=True, + validate_assignment=True + ) + + name: str = Field(..., description="User's full name", min_length=1, max_length=100) + email: str = Field(..., description="User's email address", pattern=r'^[\w\.-]+@[\w\.-]+\.\w+$') + age: int = Field(..., description="User's age", ge=0, le=150) + + @field_validator('email') + @classmethod + def validate_email(cls, v: str) -> str: + if not v.strip(): + raise ValueError("Email cannot be empty") + return v.lower() +``` + +## Response Format Options + +Support multiple output formats for flexibility: + +```python +from enum import Enum + +class ResponseFormat(str, Enum): + '''Output format for tool responses.''' + MARKDOWN = "markdown" + JSON = "json" + +class UserSearchInput(BaseModel): + query: str = Field(..., description="Search query") + response_format: ResponseFormat = Field( + default=ResponseFormat.MARKDOWN, + description="Output format: 'markdown' for human-readable or 'json' for machine-readable" + ) +``` + +**Markdown format**: +- Use headers, lists, and formatting for clarity +- Convert timestamps to human-readable format (e.g., "2024-01-15 10:30:00 UTC" instead of epoch) +- Show display names with IDs in parentheses (e.g., "@john.doe (U123456)") +- Omit verbose metadata (e.g., show only one profile image URL, not all sizes) +- Group related information logically + +**JSON format**: +- Return complete, structured data suitable for programmatic processing +- Include all available fields and metadata +- Use consistent field names and types + +## Pagination Implementation + +For tools that list resources: + +```python +class ListInput(BaseModel): + limit: Optional[int] = Field(default=20, description="Maximum results to return", ge=1, le=100) + offset: Optional[int] = Field(default=0, description="Number of results to skip for pagination", ge=0) + +async def list_items(params: ListInput) -> str: + # Make API request with pagination + data = await api_request(limit=params.limit, offset=params.offset) + + # Return pagination info + response = { + "total": data["total"], + "count": len(data["items"]), + "offset": params.offset, + "items": data["items"], + "has_more": data["total"] > params.offset + len(data["items"]), + "next_offset": params.offset + len(data["items"]) if data["total"] > params.offset + len(data["items"]) else None + } + return json.dumps(response, indent=2) +``` + +## Error Handling + +Provide clear, actionable error messages: + +```python +def _handle_api_error(e: Exception) -> str: + '''Consistent error formatting across all tools.''' + if isinstance(e, httpx.HTTPStatusError): + if e.response.status_code == 404: + return "Error: Resource not found. Please check the ID is correct." + elif e.response.status_code == 403: + return "Error: Permission denied. You don't have access to this resource." + elif e.response.status_code == 429: + return "Error: Rate limit exceeded. Please wait before making more requests." + return f"Error: API request failed with status {e.response.status_code}" + elif isinstance(e, httpx.TimeoutException): + return "Error: Request timed out. Please try again." + return f"Error: Unexpected error occurred: {type(e).__name__}" +``` + +## Shared Utilities + +Extract common functionality into reusable functions: + +```python +# Shared API request function +async def _make_api_request(endpoint: str, method: str = "GET", **kwargs) -> dict: + '''Reusable function for all API calls.''' + async with httpx.AsyncClient() as client: + response = await client.request( + method, + f"{API_BASE_URL}/{endpoint}", + timeout=30.0, + **kwargs + ) + response.raise_for_status() + return response.json() +``` + +## Async/Await Best Practices + +Always use async/await for network requests and I/O operations: + +```python +# Good: Async network request +async def fetch_data(resource_id: str) -> dict: + async with httpx.AsyncClient() as client: + response = await client.get(f"{API_URL}/resource/{resource_id}") + response.raise_for_status() + return response.json() + +# Bad: Synchronous request +def fetch_data(resource_id: str) -> dict: + response = requests.get(f"{API_URL}/resource/{resource_id}") # Blocks + return response.json() +``` + +## Type Hints + +Use type hints throughout: + +```python +from typing import Optional, List, Dict, Any + +async def get_user(user_id: str) -> Dict[str, Any]: + data = await fetch_user(user_id) + return {"id": data["id"], "name": data["name"]} +``` + +## Tool Docstrings + +Every tool must have comprehensive docstrings with explicit type information: + +```python +async def search_users(params: UserSearchInput) -> str: + ''' + Search for users in the Example system by name, email, or team. + + This tool searches across all user profiles in the Example platform, + supporting partial matches and various search filters. It does NOT + create or modify users, only searches existing ones. + + Args: + params (UserSearchInput): Validated input parameters containing: + - query (str): Search string to match against names/emails (e.g., "john", "@example.com", "team:marketing") + - limit (Optional[int]): Maximum results to return, between 1-100 (default: 20) + - offset (Optional[int]): Number of results to skip for pagination (default: 0) + + Returns: + str: JSON-formatted string containing search results with the following schema: + + Success response: + { + "total": int, # Total number of matches found + "count": int, # Number of results in this response + "offset": int, # Current pagination offset + "users": [ + { + "id": str, # User ID (e.g., "U123456789") + "name": str, # Full name (e.g., "John Doe") + "email": str, # Email address (e.g., "john@example.com") + "team": str # Team name (e.g., "Marketing") - optional + } + ] + } + + Error response: + "Error: " or "No users found matching ''" + + Examples: + - Use when: "Find all marketing team members" -> params with query="team:marketing" + - Use when: "Search for John's account" -> params with query="john" + - Don't use when: You need to create a user (use example_create_user instead) + - Don't use when: You have a user ID and need full details (use example_get_user instead) + + Error Handling: + - Input validation errors are handled by Pydantic model + - Returns "Error: Rate limit exceeded" if too many requests (429 status) + - Returns "Error: Invalid API authentication" if API key is invalid (401 status) + - Returns formatted list of results or "No users found matching 'query'" + ''' +``` + +## Complete Example + +See below for a complete Python MCP server example: + +```python +#!/usr/bin/env python3 +''' +MCP Server for Example Service. + +This server provides tools to interact with Example API, including user search, +project management, and data export capabilities. +''' + +from typing import Optional, List, Dict, Any +from enum import Enum +import httpx +from pydantic import BaseModel, Field, field_validator, ConfigDict +from mcp.server.fastmcp import FastMCP + +# Initialize the MCP server +mcp = FastMCP("example_mcp") + +# Constants +API_BASE_URL = "https://api.example.com/v1" + +# Enums +class ResponseFormat(str, Enum): + '''Output format for tool responses.''' + MARKDOWN = "markdown" + JSON = "json" + +# Pydantic Models for Input Validation +class UserSearchInput(BaseModel): + '''Input model for user search operations.''' + model_config = ConfigDict( + str_strip_whitespace=True, + validate_assignment=True + ) + + query: str = Field(..., description="Search string to match against names/emails", min_length=2, max_length=200) + limit: Optional[int] = Field(default=20, description="Maximum results to return", ge=1, le=100) + offset: Optional[int] = Field(default=0, description="Number of results to skip for pagination", ge=0) + response_format: ResponseFormat = Field(default=ResponseFormat.MARKDOWN, description="Output format") + + @field_validator('query') + @classmethod + def validate_query(cls, v: str) -> str: + if not v.strip(): + raise ValueError("Query cannot be empty or whitespace only") + return v.strip() + +# Shared utility functions +async def _make_api_request(endpoint: str, method: str = "GET", **kwargs) -> dict: + '''Reusable function for all API calls.''' + async with httpx.AsyncClient() as client: + response = await client.request( + method, + f"{API_BASE_URL}/{endpoint}", + timeout=30.0, + **kwargs + ) + response.raise_for_status() + return response.json() + +def _handle_api_error(e: Exception) -> str: + '''Consistent error formatting across all tools.''' + if isinstance(e, httpx.HTTPStatusError): + if e.response.status_code == 404: + return "Error: Resource not found. Please check the ID is correct." + elif e.response.status_code == 403: + return "Error: Permission denied. You don't have access to this resource." + elif e.response.status_code == 429: + return "Error: Rate limit exceeded. Please wait before making more requests." + return f"Error: API request failed with status {e.response.status_code}" + elif isinstance(e, httpx.TimeoutException): + return "Error: Request timed out. Please try again." + return f"Error: Unexpected error occurred: {type(e).__name__}" + +# Tool definitions +@mcp.tool( + name="example_search_users", + annotations={ + "title": "Search Example Users", + "readOnlyHint": True, + "destructiveHint": False, + "idempotentHint": True, + "openWorldHint": True + } +) +async def example_search_users(params: UserSearchInput) -> str: + '''Search for users in the Example system by name, email, or team. + + [Full docstring as shown above] + ''' + try: + # Make API request using validated parameters + data = await _make_api_request( + "users/search", + params={ + "q": params.query, + "limit": params.limit, + "offset": params.offset + } + ) + + users = data.get("users", []) + total = data.get("total", 0) + + if not users: + return f"No users found matching '{params.query}'" + + # Format response based on requested format + if params.response_format == ResponseFormat.MARKDOWN: + lines = [f"# User Search Results: '{params.query}'", ""] + lines.append(f"Found {total} users (showing {len(users)})") + lines.append("") + + for user in users: + lines.append(f"## {user['name']} ({user['id']})") + lines.append(f"- **Email**: {user['email']}") + if user.get('team'): + lines.append(f"- **Team**: {user['team']}") + lines.append("") + + return "\n".join(lines) + + else: + # Machine-readable JSON format + import json + response = { + "total": total, + "count": len(users), + "offset": params.offset, + "users": users + } + return json.dumps(response, indent=2) + + except Exception as e: + return _handle_api_error(e) + +if __name__ == "__main__": + mcp.run() +``` + +--- + +## Advanced FastMCP Features + +### Context Parameter Injection + +FastMCP can automatically inject a `Context` parameter into tools for advanced capabilities like logging, progress reporting, resource reading, and user interaction: + +```python +from mcp.server.fastmcp import FastMCP, Context + +mcp = FastMCP("example_mcp") + +@mcp.tool() +async def advanced_search(query: str, ctx: Context) -> str: + '''Advanced tool with context access for logging and progress.''' + + # Report progress for long operations + await ctx.report_progress(0.25, "Starting search...") + + # Log information for debugging + await ctx.log_info("Processing query", {"query": query, "timestamp": datetime.now()}) + + # Perform search + results = await search_api(query) + await ctx.report_progress(0.75, "Formatting results...") + + # Access server configuration + server_name = ctx.fastmcp.name + + return format_results(results) + +@mcp.tool() +async def interactive_tool(resource_id: str, ctx: Context) -> str: + '''Tool that can request additional input from users.''' + + # Request sensitive information when needed + api_key = await ctx.elicit( + prompt="Please provide your API key:", + input_type="password" + ) + + # Use the provided key + return await api_call(resource_id, api_key) +``` + +**Context capabilities:** +- `ctx.report_progress(progress, message)` - Report progress for long operations +- `ctx.log_info(message, data)` / `ctx.log_error()` / `ctx.log_debug()` - Logging +- `ctx.elicit(prompt, input_type)` - Request input from users +- `ctx.fastmcp.name` - Access server configuration +- `ctx.read_resource(uri)` - Read MCP resources + +### Resource Registration + +Expose data as resources for efficient, template-based access: + +```python +@mcp.resource("file://documents/{name}") +async def get_document(name: str) -> str: + '''Expose documents as MCP resources. + + Resources are useful for static or semi-static data that doesn't + require complex parameters. They use URI templates for flexible access. + ''' + document_path = f"./docs/{name}" + with open(document_path, "r") as f: + return f.read() + +@mcp.resource("config://settings/{key}") +async def get_setting(key: str, ctx: Context) -> str: + '''Expose configuration as resources with context.''' + settings = await load_settings() + return json.dumps(settings.get(key, {})) +``` + +**When to use Resources vs Tools:** +- **Resources**: For data access with simple parameters (URI templates) +- **Tools**: For complex operations with validation and business logic + +### Structured Output Types + +FastMCP supports multiple return types beyond strings: + +```python +from typing import TypedDict +from dataclasses import dataclass +from pydantic import BaseModel + +# TypedDict for structured returns +class UserData(TypedDict): + id: str + name: str + email: str + +@mcp.tool() +async def get_user_typed(user_id: str) -> UserData: + '''Returns structured data - FastMCP handles serialization.''' + return {"id": user_id, "name": "John Doe", "email": "john@example.com"} + +# Pydantic models for complex validation +class DetailedUser(BaseModel): + id: str + name: str + email: str + created_at: datetime + metadata: Dict[str, Any] + +@mcp.tool() +async def get_user_detailed(user_id: str) -> DetailedUser: + '''Returns Pydantic model - automatically generates schema.''' + user = await fetch_user(user_id) + return DetailedUser(**user) +``` + +### Lifespan Management + +Initialize resources that persist across requests: + +```python +from contextlib import asynccontextmanager + +@asynccontextmanager +async def app_lifespan(): + '''Manage resources that live for the server's lifetime.''' + # Initialize connections, load config, etc. + db = await connect_to_database() + config = load_configuration() + + # Make available to all tools + yield {"db": db, "config": config} + + # Cleanup on shutdown + await db.close() + +mcp = FastMCP("example_mcp", lifespan=app_lifespan) + +@mcp.tool() +async def query_data(query: str, ctx: Context) -> str: + '''Access lifespan resources through context.''' + db = ctx.request_context.lifespan_state["db"] + results = await db.query(query) + return format_results(results) +``` + +### Transport Options + +FastMCP supports two main transport mechanisms: + +```python +# stdio transport (for local tools) - default +if __name__ == "__main__": + mcp.run() + +# Streamable HTTP transport (for remote servers) +if __name__ == "__main__": + mcp.run(transport="streamable_http", port=8000) +``` + +**Transport selection:** +- **stdio**: Command-line tools, local integrations, subprocess execution +- **Streamable HTTP**: Web services, remote access, multiple clients + +--- + +## Code Best Practices + +### Code Composability and Reusability + +Your implementation MUST prioritize composability and code reuse: + +1. **Extract Common Functionality**: + - Create reusable helper functions for operations used across multiple tools + - Build shared API clients for HTTP requests instead of duplicating code + - Centralize error handling logic in utility functions + - Extract business logic into dedicated functions that can be composed + - Extract shared markdown or JSON field selection & formatting functionality + +2. **Avoid Duplication**: + - NEVER copy-paste similar code between tools + - If you find yourself writing similar logic twice, extract it into a function + - Common operations like pagination, filtering, field selection, and formatting should be shared + - Authentication/authorization logic should be centralized + +### Python-Specific Best Practices + +1. **Use Type Hints**: Always include type annotations for function parameters and return values +2. **Pydantic Models**: Define clear Pydantic models for all input validation +3. **Avoid Manual Validation**: Let Pydantic handle input validation with constraints +4. **Proper Imports**: Group imports (standard library, third-party, local) +5. **Error Handling**: Use specific exception types (httpx.HTTPStatusError, not generic Exception) +6. **Async Context Managers**: Use `async with` for resources that need cleanup +7. **Constants**: Define module-level constants in UPPER_CASE + +## Quality Checklist + +Before finalizing your Python MCP server implementation, ensure: + +### Strategic Design +- [ ] Tools enable complete workflows, not just API endpoint wrappers +- [ ] Tool names reflect natural task subdivisions +- [ ] Response formats optimize for agent context efficiency +- [ ] Human-readable identifiers used where appropriate +- [ ] Error messages guide agents toward correct usage + +### Implementation Quality +- [ ] FOCUSED IMPLEMENTATION: Most important and valuable tools implemented +- [ ] All tools have descriptive names and documentation +- [ ] Return types are consistent across similar operations +- [ ] Error handling is implemented for all external calls +- [ ] Server name follows format: `{service}_mcp` +- [ ] All network operations use async/await +- [ ] Common functionality is extracted into reusable functions +- [ ] Error messages are clear, actionable, and educational +- [ ] Outputs are properly validated and formatted + +### Tool Configuration +- [ ] All tools implement 'name' and 'annotations' in the decorator +- [ ] Annotations correctly set (readOnlyHint, destructiveHint, idempotentHint, openWorldHint) +- [ ] All tools use Pydantic BaseModel for input validation with Field() definitions +- [ ] All Pydantic Fields have explicit types and descriptions with constraints +- [ ] All tools have comprehensive docstrings with explicit input/output types +- [ ] Docstrings include complete schema structure for dict/JSON returns +- [ ] Pydantic models handle input validation (no manual validation needed) + +### Advanced Features (where applicable) +- [ ] Context injection used for logging, progress, or elicitation +- [ ] Resources registered for appropriate data endpoints +- [ ] Lifespan management implemented for persistent connections +- [ ] Structured output types used (TypedDict, Pydantic models) +- [ ] Appropriate transport configured (stdio or streamable HTTP) + +### Code Quality +- [ ] File includes proper imports including Pydantic imports +- [ ] Pagination is properly implemented where applicable +- [ ] Filtering options are provided for potentially large result sets +- [ ] All async functions are properly defined with `async def` +- [ ] HTTP client usage follows async patterns with proper context managers +- [ ] Type hints are used throughout the code +- [ ] Constants are defined at module level in UPPER_CASE + +### Testing +- [ ] Server runs successfully: `python your_server.py --help` +- [ ] All imports resolve correctly +- [ ] Sample tool calls work as expected +- [ ] Error scenarios handled gracefully \ No newline at end of file diff --git a/.claude/skills/mcp-builder/scripts/connections.py b/.claude/skills/mcp-builder/scripts/connections.py new file mode 100644 index 0000000..ffcd0da --- /dev/null +++ b/.claude/skills/mcp-builder/scripts/connections.py @@ -0,0 +1,151 @@ +"""Lightweight connection handling for MCP servers.""" + +from abc import ABC, abstractmethod +from contextlib import AsyncExitStack +from typing import Any + +from mcp import ClientSession, StdioServerParameters +from mcp.client.sse import sse_client +from mcp.client.stdio import stdio_client +from mcp.client.streamable_http import streamablehttp_client + + +class MCPConnection(ABC): + """Base class for MCP server connections.""" + + def __init__(self): + self.session = None + self._stack = None + + @abstractmethod + def _create_context(self): + """Create the connection context based on connection type.""" + + async def __aenter__(self): + """Initialize MCP server connection.""" + self._stack = AsyncExitStack() + await self._stack.__aenter__() + + try: + ctx = self._create_context() + result = await self._stack.enter_async_context(ctx) + + if len(result) == 2: + read, write = result + elif len(result) == 3: + read, write, _ = result + else: + raise ValueError(f"Unexpected context result: {result}") + + session_ctx = ClientSession(read, write) + self.session = await self._stack.enter_async_context(session_ctx) + await self.session.initialize() + return self + except BaseException: + await self._stack.__aexit__(None, None, None) + raise + + async def __aexit__(self, exc_type, exc_val, exc_tb): + """Clean up MCP server connection resources.""" + if self._stack: + await self._stack.__aexit__(exc_type, exc_val, exc_tb) + self.session = None + self._stack = None + + async def list_tools(self) -> list[dict[str, Any]]: + """Retrieve available tools from the MCP server.""" + response = await self.session.list_tools() + return [ + { + "name": tool.name, + "description": tool.description, + "input_schema": tool.inputSchema, + } + for tool in response.tools + ] + + async def call_tool(self, tool_name: str, arguments: dict[str, Any]) -> Any: + """Call a tool on the MCP server with provided arguments.""" + result = await self.session.call_tool(tool_name, arguments=arguments) + return result.content + + +class MCPConnectionStdio(MCPConnection): + """MCP connection using standard input/output.""" + + def __init__(self, command: str, args: list[str] = None, env: dict[str, str] = None): + super().__init__() + self.command = command + self.args = args or [] + self.env = env + + def _create_context(self): + return stdio_client( + StdioServerParameters(command=self.command, args=self.args, env=self.env) + ) + + +class MCPConnectionSSE(MCPConnection): + """MCP connection using Server-Sent Events.""" + + def __init__(self, url: str, headers: dict[str, str] = None): + super().__init__() + self.url = url + self.headers = headers or {} + + def _create_context(self): + return sse_client(url=self.url, headers=self.headers) + + +class MCPConnectionHTTP(MCPConnection): + """MCP connection using Streamable HTTP.""" + + def __init__(self, url: str, headers: dict[str, str] = None): + super().__init__() + self.url = url + self.headers = headers or {} + + def _create_context(self): + return streamablehttp_client(url=self.url, headers=self.headers) + + +def create_connection( + transport: str, + command: str = None, + args: list[str] = None, + env: dict[str, str] = None, + url: str = None, + headers: dict[str, str] = None, +) -> MCPConnection: + """Factory function to create the appropriate MCP connection. + + Args: + transport: Connection type ("stdio", "sse", or "http") + command: Command to run (stdio only) + args: Command arguments (stdio only) + env: Environment variables (stdio only) + url: Server URL (sse and http only) + headers: HTTP headers (sse and http only) + + Returns: + MCPConnection instance + """ + transport = transport.lower() + + if transport == "stdio": + if not command: + raise ValueError("Command is required for stdio transport") + return MCPConnectionStdio(command=command, args=args, env=env) + + elif transport == "sse": + if not url: + raise ValueError("URL is required for sse transport") + return MCPConnectionSSE(url=url, headers=headers) + + elif transport in ["http", "streamable_http", "streamable-http"]: + if not url: + raise ValueError("URL is required for http transport") + return MCPConnectionHTTP(url=url, headers=headers) + + else: + raise ValueError(f"Unsupported transport type: {transport}. Use 'stdio', 'sse', or 'http'") diff --git a/.claude/skills/mcp-builder/scripts/evaluation.py b/.claude/skills/mcp-builder/scripts/evaluation.py new file mode 100644 index 0000000..4177856 --- /dev/null +++ b/.claude/skills/mcp-builder/scripts/evaluation.py @@ -0,0 +1,373 @@ +"""MCP Server Evaluation Harness + +This script evaluates MCP servers by running test questions against them using Claude. +""" + +import argparse +import asyncio +import json +import re +import sys +import time +import traceback +import xml.etree.ElementTree as ET +from pathlib import Path +from typing import Any + +from anthropic import Anthropic + +from connections import create_connection + +EVALUATION_PROMPT = """You are an AI assistant with access to tools. + +When given a task, you MUST: +1. Use the available tools to complete the task +2. Provide summary of each step in your approach, wrapped in tags +3. Provide feedback on the tools provided, wrapped in tags +4. Provide your final response, wrapped in tags + +Summary Requirements: +- In your tags, you must explain: + - The steps you took to complete the task + - Which tools you used, in what order, and why + - The inputs you provided to each tool + - The outputs you received from each tool + - A summary for how you arrived at the response + +Feedback Requirements: +- In your tags, provide constructive feedback on the tools: + - Comment on tool names: Are they clear and descriptive? + - Comment on input parameters: Are they well-documented? Are required vs optional parameters clear? + - Comment on descriptions: Do they accurately describe what the tool does? + - Comment on any errors encountered during tool usage: Did the tool fail to execute? Did the tool return too many tokens? + - Identify specific areas for improvement and explain WHY they would help + - Be specific and actionable in your suggestions + +Response Requirements: +- Your response should be concise and directly address what was asked +- Always wrap your final response in tags +- If you cannot solve the task return NOT_FOUND +- For numeric responses, provide just the number +- For IDs, provide just the ID +- For names or text, provide the exact text requested +- Your response should go last""" + + +def parse_evaluation_file(file_path: Path) -> list[dict[str, Any]]: + """Parse XML evaluation file with qa_pair elements.""" + try: + tree = ET.parse(file_path) + root = tree.getroot() + evaluations = [] + + for qa_pair in root.findall(".//qa_pair"): + question_elem = qa_pair.find("question") + answer_elem = qa_pair.find("answer") + + if question_elem is not None and answer_elem is not None: + evaluations.append({ + "question": (question_elem.text or "").strip(), + "answer": (answer_elem.text or "").strip(), + }) + + return evaluations + except Exception as e: + print(f"Error parsing evaluation file {file_path}: {e}") + return [] + + +def extract_xml_content(text: str, tag: str) -> str | None: + """Extract content from XML tags.""" + pattern = rf"<{tag}>(.*?)" + matches = re.findall(pattern, text, re.DOTALL) + return matches[-1].strip() if matches else None + + +async def agent_loop( + client: Anthropic, + model: str, + question: str, + tools: list[dict[str, Any]], + connection: Any, +) -> tuple[str, dict[str, Any]]: + """Run the agent loop with MCP tools.""" + messages = [{"role": "user", "content": question}] + + response = await asyncio.to_thread( + client.messages.create, + model=model, + max_tokens=4096, + system=EVALUATION_PROMPT, + messages=messages, + tools=tools, + ) + + messages.append({"role": "assistant", "content": response.content}) + + tool_metrics = {} + + while response.stop_reason == "tool_use": + tool_use = next(block for block in response.content if block.type == "tool_use") + tool_name = tool_use.name + tool_input = tool_use.input + + tool_start_ts = time.time() + try: + tool_result = await connection.call_tool(tool_name, tool_input) + tool_response = json.dumps(tool_result) if isinstance(tool_result, (dict, list)) else str(tool_result) + except Exception as e: + tool_response = f"Error executing tool {tool_name}: {str(e)}\n" + tool_response += traceback.format_exc() + tool_duration = time.time() - tool_start_ts + + if tool_name not in tool_metrics: + tool_metrics[tool_name] = {"count": 0, "durations": []} + tool_metrics[tool_name]["count"] += 1 + tool_metrics[tool_name]["durations"].append(tool_duration) + + messages.append({ + "role": "user", + "content": [{ + "type": "tool_result", + "tool_use_id": tool_use.id, + "content": tool_response, + }] + }) + + response = await asyncio.to_thread( + client.messages.create, + model=model, + max_tokens=4096, + system=EVALUATION_PROMPT, + messages=messages, + tools=tools, + ) + messages.append({"role": "assistant", "content": response.content}) + + response_text = next( + (block.text for block in response.content if hasattr(block, "text")), + None, + ) + return response_text, tool_metrics + + +async def evaluate_single_task( + client: Anthropic, + model: str, + qa_pair: dict[str, Any], + tools: list[dict[str, Any]], + connection: Any, + task_index: int, +) -> dict[str, Any]: + """Evaluate a single QA pair with the given tools.""" + start_time = time.time() + + print(f"Task {task_index + 1}: Running task with question: {qa_pair['question']}") + response, tool_metrics = await agent_loop(client, model, qa_pair["question"], tools, connection) + + response_value = extract_xml_content(response, "response") + summary = extract_xml_content(response, "summary") + feedback = extract_xml_content(response, "feedback") + + duration_seconds = time.time() - start_time + + return { + "question": qa_pair["question"], + "expected": qa_pair["answer"], + "actual": response_value, + "score": int(response_value == qa_pair["answer"]) if response_value else 0, + "total_duration": duration_seconds, + "tool_calls": tool_metrics, + "num_tool_calls": sum(len(metrics["durations"]) for metrics in tool_metrics.values()), + "summary": summary, + "feedback": feedback, + } + + +REPORT_HEADER = """ +# Evaluation Report + +## Summary + +- **Accuracy**: {correct}/{total} ({accuracy:.1f}%) +- **Average Task Duration**: {average_duration_s:.2f}s +- **Average Tool Calls per Task**: {average_tool_calls:.2f} +- **Total Tool Calls**: {total_tool_calls} + +--- +""" + +TASK_TEMPLATE = """ +### Task {task_num} + +**Question**: {question} +**Ground Truth Answer**: `{expected_answer}` +**Actual Answer**: `{actual_answer}` +**Correct**: {correct_indicator} +**Duration**: {total_duration:.2f}s +**Tool Calls**: {tool_calls} + +**Summary** +{summary} + +**Feedback** +{feedback} + +--- +""" + + +async def run_evaluation( + eval_path: Path, + connection: Any, + model: str = "claude-3-7-sonnet-20250219", +) -> str: + """Run evaluation with MCP server tools.""" + print("🚀 Starting Evaluation") + + client = Anthropic() + + tools = await connection.list_tools() + print(f"📋 Loaded {len(tools)} tools from MCP server") + + qa_pairs = parse_evaluation_file(eval_path) + print(f"📋 Loaded {len(qa_pairs)} evaluation tasks") + + results = [] + for i, qa_pair in enumerate(qa_pairs): + print(f"Processing task {i + 1}/{len(qa_pairs)}") + result = await evaluate_single_task(client, model, qa_pair, tools, connection, i) + results.append(result) + + correct = sum(r["score"] for r in results) + accuracy = (correct / len(results)) * 100 if results else 0 + average_duration_s = sum(r["total_duration"] for r in results) / len(results) if results else 0 + average_tool_calls = sum(r["num_tool_calls"] for r in results) / len(results) if results else 0 + total_tool_calls = sum(r["num_tool_calls"] for r in results) + + report = REPORT_HEADER.format( + correct=correct, + total=len(results), + accuracy=accuracy, + average_duration_s=average_duration_s, + average_tool_calls=average_tool_calls, + total_tool_calls=total_tool_calls, + ) + + report += "".join([ + TASK_TEMPLATE.format( + task_num=i + 1, + question=qa_pair["question"], + expected_answer=qa_pair["answer"], + actual_answer=result["actual"] or "N/A", + correct_indicator="✅" if result["score"] else "❌", + total_duration=result["total_duration"], + tool_calls=json.dumps(result["tool_calls"], indent=2), + summary=result["summary"] or "N/A", + feedback=result["feedback"] or "N/A", + ) + for i, (qa_pair, result) in enumerate(zip(qa_pairs, results)) + ]) + + return report + + +def parse_headers(header_list: list[str]) -> dict[str, str]: + """Parse header strings in format 'Key: Value' into a dictionary.""" + headers = {} + if not header_list: + return headers + + for header in header_list: + if ":" in header: + key, value = header.split(":", 1) + headers[key.strip()] = value.strip() + else: + print(f"Warning: Ignoring malformed header: {header}") + return headers + + +def parse_env_vars(env_list: list[str]) -> dict[str, str]: + """Parse environment variable strings in format 'KEY=VALUE' into a dictionary.""" + env = {} + if not env_list: + return env + + for env_var in env_list: + if "=" in env_var: + key, value = env_var.split("=", 1) + env[key.strip()] = value.strip() + else: + print(f"Warning: Ignoring malformed environment variable: {env_var}") + return env + + +async def main(): + parser = argparse.ArgumentParser( + description="Evaluate MCP servers using test questions", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Evaluate a local stdio MCP server + python evaluation.py -t stdio -c python -a my_server.py eval.xml + + # Evaluate an SSE MCP server + python evaluation.py -t sse -u https://example.com/mcp -H "Authorization: Bearer token" eval.xml + + # Evaluate an HTTP MCP server with custom model + python evaluation.py -t http -u https://example.com/mcp -m claude-3-5-sonnet-20241022 eval.xml + """, + ) + + parser.add_argument("eval_file", type=Path, help="Path to evaluation XML file") + parser.add_argument("-t", "--transport", choices=["stdio", "sse", "http"], default="stdio", help="Transport type (default: stdio)") + parser.add_argument("-m", "--model", default="claude-3-7-sonnet-20250219", help="Claude model to use (default: claude-3-7-sonnet-20250219)") + + stdio_group = parser.add_argument_group("stdio options") + stdio_group.add_argument("-c", "--command", help="Command to run MCP server (stdio only)") + stdio_group.add_argument("-a", "--args", nargs="+", help="Arguments for the command (stdio only)") + stdio_group.add_argument("-e", "--env", nargs="+", help="Environment variables in KEY=VALUE format (stdio only)") + + remote_group = parser.add_argument_group("sse/http options") + remote_group.add_argument("-u", "--url", help="MCP server URL (sse/http only)") + remote_group.add_argument("-H", "--header", nargs="+", dest="headers", help="HTTP headers in 'Key: Value' format (sse/http only)") + + parser.add_argument("-o", "--output", type=Path, help="Output file for evaluation report (default: stdout)") + + args = parser.parse_args() + + if not args.eval_file.exists(): + print(f"Error: Evaluation file not found: {args.eval_file}") + sys.exit(1) + + headers = parse_headers(args.headers) if args.headers else None + env_vars = parse_env_vars(args.env) if args.env else None + + try: + connection = create_connection( + transport=args.transport, + command=args.command, + args=args.args, + env=env_vars, + url=args.url, + headers=headers, + ) + except ValueError as e: + print(f"Error: {e}") + sys.exit(1) + + print(f"🔗 Connecting to MCP server via {args.transport}...") + + async with connection: + print("✅ Connected successfully") + report = await run_evaluation(args.eval_file, connection, args.model) + + if args.output: + args.output.write_text(report) + print(f"\n✅ Report saved to {args.output}") + else: + print("\n" + report) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/.claude/skills/mcp-builder/scripts/example_evaluation.xml b/.claude/skills/mcp-builder/scripts/example_evaluation.xml new file mode 100644 index 0000000..41e4459 --- /dev/null +++ b/.claude/skills/mcp-builder/scripts/example_evaluation.xml @@ -0,0 +1,22 @@ + + + Calculate the compound interest on $10,000 invested at 5% annual interest rate, compounded monthly for 3 years. What is the final amount in dollars (rounded to 2 decimal places)? + 11614.72 + + + A projectile is launched at a 45-degree angle with an initial velocity of 50 m/s. Calculate the total distance (in meters) it has traveled from the launch point after 2 seconds, assuming g=9.8 m/s². Round to 2 decimal places. + 87.25 + + + A sphere has a volume of 500 cubic meters. Calculate its surface area in square meters. Round to 2 decimal places. + 304.65 + + + Calculate the population standard deviation of this dataset: [12, 15, 18, 22, 25, 30, 35]. Round to 2 decimal places. + 7.61 + + + Calculate the pH of a solution with a hydrogen ion concentration of 3.5 × 10^-5 M. Round to 2 decimal places. + 4.46 + + diff --git a/.claude/skills/mcp-builder/scripts/requirements.txt b/.claude/skills/mcp-builder/scripts/requirements.txt new file mode 100644 index 0000000..e73e5d1 --- /dev/null +++ b/.claude/skills/mcp-builder/scripts/requirements.txt @@ -0,0 +1,2 @@ +anthropic>=0.39.0 +mcp>=1.1.0 diff --git a/.claude/skills/pdf/LICENSE.txt b/.claude/skills/pdf/LICENSE.txt new file mode 100644 index 0000000..c55ab42 --- /dev/null +++ b/.claude/skills/pdf/LICENSE.txt @@ -0,0 +1,30 @@ +© 2025 Anthropic, PBC. All rights reserved. + +LICENSE: Use of these materials (including all code, prompts, assets, files, +and other components of this Skill) is governed by your agreement with +Anthropic regarding use of Anthropic's services. If no separate agreement +exists, use is governed by Anthropic's Consumer Terms of Service or +Commercial Terms of Service, as applicable: +https://www.anthropic.com/legal/consumer-terms +https://www.anthropic.com/legal/commercial-terms +Your applicable agreement is referred to as the "Agreement." "Services" are +as defined in the Agreement. + +ADDITIONAL RESTRICTIONS: Notwithstanding anything in the Agreement to the +contrary, users may not: + +- Extract these materials from the Services or retain copies of these + materials outside the Services +- Reproduce or copy these materials, except for temporary copies created + automatically during authorized use of the Services +- Create derivative works based on these materials +- Distribute, sublicense, or transfer these materials to any third party +- Make, offer to sell, sell, or import any inventions embodied in these + materials +- Reverse engineer, decompile, or disassemble these materials + +The receipt, viewing, or possession of these materials does not convey or +imply any license or right beyond those expressly granted above. + +Anthropic retains all right, title, and interest in these materials, +including all copyrights, patents, and other intellectual property rights. diff --git a/.claude/skills/pdf/SKILL.md b/.claude/skills/pdf/SKILL.md new file mode 100644 index 0000000..f6a22dd --- /dev/null +++ b/.claude/skills/pdf/SKILL.md @@ -0,0 +1,294 @@ +--- +name: pdf +description: Comprehensive PDF manipulation toolkit for extracting text and tables, creating new PDFs, merging/splitting documents, and handling forms. When Claude needs to fill in a PDF form or programmatically process, generate, or analyze PDF documents at scale. +license: Proprietary. LICENSE.txt has complete terms +--- + +# PDF Processing Guide + +## Overview + +This guide covers essential PDF processing operations using Python libraries and command-line tools. For advanced features, JavaScript libraries, and detailed examples, see reference.md. If you need to fill out a PDF form, read forms.md and follow its instructions. + +## Quick Start + +```python +from pypdf import PdfReader, PdfWriter + +# Read a PDF +reader = PdfReader("document.pdf") +print(f"Pages: {len(reader.pages)}") + +# Extract text +text = "" +for page in reader.pages: + text += page.extract_text() +``` + +## Python Libraries + +### pypdf - Basic Operations + +#### Merge PDFs +```python +from pypdf import PdfWriter, PdfReader + +writer = PdfWriter() +for pdf_file in ["doc1.pdf", "doc2.pdf", "doc3.pdf"]: + reader = PdfReader(pdf_file) + for page in reader.pages: + writer.add_page(page) + +with open("merged.pdf", "wb") as output: + writer.write(output) +``` + +#### Split PDF +```python +reader = PdfReader("input.pdf") +for i, page in enumerate(reader.pages): + writer = PdfWriter() + writer.add_page(page) + with open(f"page_{i+1}.pdf", "wb") as output: + writer.write(output) +``` + +#### Extract Metadata +```python +reader = PdfReader("document.pdf") +meta = reader.metadata +print(f"Title: {meta.title}") +print(f"Author: {meta.author}") +print(f"Subject: {meta.subject}") +print(f"Creator: {meta.creator}") +``` + +#### Rotate Pages +```python +reader = PdfReader("input.pdf") +writer = PdfWriter() + +page = reader.pages[0] +page.rotate(90) # Rotate 90 degrees clockwise +writer.add_page(page) + +with open("rotated.pdf", "wb") as output: + writer.write(output) +``` + +### pdfplumber - Text and Table Extraction + +#### Extract Text with Layout +```python +import pdfplumber + +with pdfplumber.open("document.pdf") as pdf: + for page in pdf.pages: + text = page.extract_text() + print(text) +``` + +#### Extract Tables +```python +with pdfplumber.open("document.pdf") as pdf: + for i, page in enumerate(pdf.pages): + tables = page.extract_tables() + for j, table in enumerate(tables): + print(f"Table {j+1} on page {i+1}:") + for row in table: + print(row) +``` + +#### Advanced Table Extraction +```python +import pandas as pd + +with pdfplumber.open("document.pdf") as pdf: + all_tables = [] + for page in pdf.pages: + tables = page.extract_tables() + for table in tables: + if table: # Check if table is not empty + df = pd.DataFrame(table[1:], columns=table[0]) + all_tables.append(df) + +# Combine all tables +if all_tables: + combined_df = pd.concat(all_tables, ignore_index=True) + combined_df.to_excel("extracted_tables.xlsx", index=False) +``` + +### reportlab - Create PDFs + +#### Basic PDF Creation +```python +from reportlab.lib.pagesizes import letter +from reportlab.pdfgen import canvas + +c = canvas.Canvas("hello.pdf", pagesize=letter) +width, height = letter + +# Add text +c.drawString(100, height - 100, "Hello World!") +c.drawString(100, height - 120, "This is a PDF created with reportlab") + +# Add a line +c.line(100, height - 140, 400, height - 140) + +# Save +c.save() +``` + +#### Create PDF with Multiple Pages +```python +from reportlab.lib.pagesizes import letter +from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, PageBreak +from reportlab.lib.styles import getSampleStyleSheet + +doc = SimpleDocTemplate("report.pdf", pagesize=letter) +styles = getSampleStyleSheet() +story = [] + +# Add content +title = Paragraph("Report Title", styles['Title']) +story.append(title) +story.append(Spacer(1, 12)) + +body = Paragraph("This is the body of the report. " * 20, styles['Normal']) +story.append(body) +story.append(PageBreak()) + +# Page 2 +story.append(Paragraph("Page 2", styles['Heading1'])) +story.append(Paragraph("Content for page 2", styles['Normal'])) + +# Build PDF +doc.build(story) +``` + +## Command-Line Tools + +### pdftotext (poppler-utils) +```bash +# Extract text +pdftotext input.pdf output.txt + +# Extract text preserving layout +pdftotext -layout input.pdf output.txt + +# Extract specific pages +pdftotext -f 1 -l 5 input.pdf output.txt # Pages 1-5 +``` + +### qpdf +```bash +# Merge PDFs +qpdf --empty --pages file1.pdf file2.pdf -- merged.pdf + +# Split pages +qpdf input.pdf --pages . 1-5 -- pages1-5.pdf +qpdf input.pdf --pages . 6-10 -- pages6-10.pdf + +# Rotate pages +qpdf input.pdf output.pdf --rotate=+90:1 # Rotate page 1 by 90 degrees + +# Remove password +qpdf --password=mypassword --decrypt encrypted.pdf decrypted.pdf +``` + +### pdftk (if available) +```bash +# Merge +pdftk file1.pdf file2.pdf cat output merged.pdf + +# Split +pdftk input.pdf burst + +# Rotate +pdftk input.pdf rotate 1east output rotated.pdf +``` + +## Common Tasks + +### Extract Text from Scanned PDFs +```python +# Requires: pip install pytesseract pdf2image +import pytesseract +from pdf2image import convert_from_path + +# Convert PDF to images +images = convert_from_path('scanned.pdf') + +# OCR each page +text = "" +for i, image in enumerate(images): + text += f"Page {i+1}:\n" + text += pytesseract.image_to_string(image) + text += "\n\n" + +print(text) +``` + +### Add Watermark +```python +from pypdf import PdfReader, PdfWriter + +# Create watermark (or load existing) +watermark = PdfReader("watermark.pdf").pages[0] + +# Apply to all pages +reader = PdfReader("document.pdf") +writer = PdfWriter() + +for page in reader.pages: + page.merge_page(watermark) + writer.add_page(page) + +with open("watermarked.pdf", "wb") as output: + writer.write(output) +``` + +### Extract Images +```bash +# Using pdfimages (poppler-utils) +pdfimages -j input.pdf output_prefix + +# This extracts all images as output_prefix-000.jpg, output_prefix-001.jpg, etc. +``` + +### Password Protection +```python +from pypdf import PdfReader, PdfWriter + +reader = PdfReader("input.pdf") +writer = PdfWriter() + +for page in reader.pages: + writer.add_page(page) + +# Add password +writer.encrypt("userpassword", "ownerpassword") + +with open("encrypted.pdf", "wb") as output: + writer.write(output) +``` + +## Quick Reference + +| Task | Best Tool | Command/Code | +|------|-----------|--------------| +| Merge PDFs | pypdf | `writer.add_page(page)` | +| Split PDFs | pypdf | One page per file | +| Extract text | pdfplumber | `page.extract_text()` | +| Extract tables | pdfplumber | `page.extract_tables()` | +| Create PDFs | reportlab | Canvas or Platypus | +| Command line merge | qpdf | `qpdf --empty --pages ...` | +| OCR scanned PDFs | pytesseract | Convert to image first | +| Fill PDF forms | pdf-lib or pypdf (see forms.md) | See forms.md | + +## Next Steps + +- For advanced pypdfium2 usage, see reference.md +- For JavaScript libraries (pdf-lib), see reference.md +- If you need to fill out a PDF form, follow the instructions in forms.md +- For troubleshooting guides, see reference.md diff --git a/.claude/skills/pdf/forms.md b/.claude/skills/pdf/forms.md new file mode 100644 index 0000000..4e23450 --- /dev/null +++ b/.claude/skills/pdf/forms.md @@ -0,0 +1,205 @@ +**CRITICAL: You MUST complete these steps in order. Do not skip ahead to writing code.** + +If you need to fill out a PDF form, first check to see if the PDF has fillable form fields. Run this script from this file's directory: + `python scripts/check_fillable_fields `, and depending on the result go to either the "Fillable fields" or "Non-fillable fields" and follow those instructions. + +# Fillable fields +If the PDF has fillable form fields: +- Run this script from this file's directory: `python scripts/extract_form_field_info.py `. It will create a JSON file with a list of fields in this format: +``` +[ + { + "field_id": (unique ID for the field), + "page": (page number, 1-based), + "rect": ([left, bottom, right, top] bounding box in PDF coordinates, y=0 is the bottom of the page), + "type": ("text", "checkbox", "radio_group", or "choice"), + }, + // Checkboxes have "checked_value" and "unchecked_value" properties: + { + "field_id": (unique ID for the field), + "page": (page number, 1-based), + "type": "checkbox", + "checked_value": (Set the field to this value to check the checkbox), + "unchecked_value": (Set the field to this value to uncheck the checkbox), + }, + // Radio groups have a "radio_options" list with the possible choices. + { + "field_id": (unique ID for the field), + "page": (page number, 1-based), + "type": "radio_group", + "radio_options": [ + { + "value": (set the field to this value to select this radio option), + "rect": (bounding box for the radio button for this option) + }, + // Other radio options + ] + }, + // Multiple choice fields have a "choice_options" list with the possible choices: + { + "field_id": (unique ID for the field), + "page": (page number, 1-based), + "type": "choice", + "choice_options": [ + { + "value": (set the field to this value to select this option), + "text": (display text of the option) + }, + // Other choice options + ], + } +] +``` +- Convert the PDF to PNGs (one image for each page) with this script (run from this file's directory): +`python scripts/convert_pdf_to_images.py ` +Then analyze the images to determine the purpose of each form field (make sure to convert the bounding box PDF coordinates to image coordinates). +- Create a `field_values.json` file in this format with the values to be entered for each field: +``` +[ + { + "field_id": "last_name", // Must match the field_id from `extract_form_field_info.py` + "description": "The user's last name", + "page": 1, // Must match the "page" value in field_info.json + "value": "Simpson" + }, + { + "field_id": "Checkbox12", + "description": "Checkbox to be checked if the user is 18 or over", + "page": 1, + "value": "/On" // If this is a checkbox, use its "checked_value" value to check it. If it's a radio button group, use one of the "value" values in "radio_options". + }, + // more fields +] +``` +- Run the `fill_fillable_fields.py` script from this file's directory to create a filled-in PDF: +`python scripts/fill_fillable_fields.py ` +This script will verify that the field IDs and values you provide are valid; if it prints error messages, correct the appropriate fields and try again. + +# Non-fillable fields +If the PDF doesn't have fillable form fields, you'll need to visually determine where the data should be added and create text annotations. Follow the below steps *exactly*. You MUST perform all of these steps to ensure that the the form is accurately completed. Details for each step are below. +- Convert the PDF to PNG images and determine field bounding boxes. +- Create a JSON file with field information and validation images showing the bounding boxes. +- Validate the the bounding boxes. +- Use the bounding boxes to fill in the form. + +## Step 1: Visual Analysis (REQUIRED) +- Convert the PDF to PNG images. Run this script from this file's directory: +`python scripts/convert_pdf_to_images.py ` +The script will create a PNG image for each page in the PDF. +- Carefully examine each PNG image and identify all form fields and areas where the user should enter data. For each form field where the user should enter text, determine bounding boxes for both the form field label, and the area where the user should enter text. The label and entry bounding boxes MUST NOT INTERSECT; the text entry box should only include the area where data should be entered. Usually this area will be immediately to the side, above, or below its label. Entry bounding boxes must be tall and wide enough to contain their text. + +These are some examples of form structures that you might see: + +*Label inside box* +``` +┌────────────────────────┐ +│ Name: │ +└────────────────────────┘ +``` +The input area should be to the right of the "Name" label and extend to the edge of the box. + +*Label before line* +``` +Email: _______________________ +``` +The input area should be above the line and include its entire width. + +*Label under line* +``` +_________________________ +Name +``` +The input area should be above the line and include the entire width of the line. This is common for signature and date fields. + +*Label above line* +``` +Please enter any special requests: +________________________________________________ +``` +The input area should extend from the bottom of the label to the line, and should include the entire width of the line. + +*Checkboxes* +``` +Are you a US citizen? Yes □ No □ +``` +For checkboxes: +- Look for small square boxes (□) - these are the actual checkboxes to target. They may be to the left or right of their labels. +- Distinguish between label text ("Yes", "No") and the clickable checkbox squares. +- The entry bounding box should cover ONLY the small square, not the text label. + +### Step 2: Create fields.json and validation images (REQUIRED) +- Create a file named `fields.json` with information for the form fields and bounding boxes in this format: +``` +{ + "pages": [ + { + "page_number": 1, + "image_width": (first page image width in pixels), + "image_height": (first page image height in pixels), + }, + { + "page_number": 2, + "image_width": (second page image width in pixels), + "image_height": (second page image height in pixels), + } + // additional pages + ], + "form_fields": [ + // Example for a text field. + { + "page_number": 1, + "description": "The user's last name should be entered here", + // Bounding boxes are [left, top, right, bottom]. The bounding boxes for the label and text entry should not overlap. + "field_label": "Last name", + "label_bounding_box": [30, 125, 95, 142], + "entry_bounding_box": [100, 125, 280, 142], + "entry_text": { + "text": "Johnson", // This text will be added as an annotation at the entry_bounding_box location + "font_size": 14, // optional, defaults to 14 + "font_color": "000000", // optional, RRGGBB format, defaults to 000000 (black) + } + }, + // Example for a checkbox. TARGET THE SQUARE for the entry bounding box, NOT THE TEXT + { + "page_number": 2, + "description": "Checkbox that should be checked if the user is over 18", + "entry_bounding_box": [140, 525, 155, 540], // Small box over checkbox square + "field_label": "Yes", + "label_bounding_box": [100, 525, 132, 540], // Box containing "Yes" text + // Use "X" to check a checkbox. + "entry_text": { + "text": "X", + } + } + // additional form field entries + ] +} +``` + +Create validation images by running this script from this file's directory for each page: +`python scripts/create_validation_image.py + +The validation images will have red rectangles where text should be entered, and blue rectangles covering label text. + +### Step 3: Validate Bounding Boxes (REQUIRED) +#### Automated intersection check +- Verify that none of bounding boxes intersect and that the entry bounding boxes are tall enough by checking the fields.json file with the `check_bounding_boxes.py` script (run from this file's directory): +`python scripts/check_bounding_boxes.py ` + +If there are errors, reanalyze the relevant fields, adjust the bounding boxes, and iterate until there are no remaining errors. Remember: label (blue) bounding boxes should contain text labels, entry (red) boxes should not. + +#### Manual image inspection +**CRITICAL: Do not proceed without visually inspecting validation images** +- Red rectangles must ONLY cover input areas +- Red rectangles MUST NOT contain any text +- Blue rectangles should contain label text +- For checkboxes: + - Red rectangle MUST be centered on the checkbox square + - Blue rectangle should cover the text label for the checkbox + +- If any rectangles look wrong, fix fields.json, regenerate the validation images, and verify again. Repeat this process until the bounding boxes are fully accurate. + + +### Step 4: Add annotations to the PDF +Run this script from this file's directory to create a filled-out PDF using the information in fields.json: +`python scripts/fill_pdf_form_with_annotations.py diff --git a/.claude/skills/pdf/reference.md b/.claude/skills/pdf/reference.md new file mode 100644 index 0000000..41400bf --- /dev/null +++ b/.claude/skills/pdf/reference.md @@ -0,0 +1,612 @@ +# PDF Processing Advanced Reference + +This document contains advanced PDF processing features, detailed examples, and additional libraries not covered in the main skill instructions. + +## pypdfium2 Library (Apache/BSD License) + +### Overview +pypdfium2 is a Python binding for PDFium (Chromium's PDF library). It's excellent for fast PDF rendering, image generation, and serves as a PyMuPDF replacement. + +### Render PDF to Images +```python +import pypdfium2 as pdfium +from PIL import Image + +# Load PDF +pdf = pdfium.PdfDocument("document.pdf") + +# Render page to image +page = pdf[0] # First page +bitmap = page.render( + scale=2.0, # Higher resolution + rotation=0 # No rotation +) + +# Convert to PIL Image +img = bitmap.to_pil() +img.save("page_1.png", "PNG") + +# Process multiple pages +for i, page in enumerate(pdf): + bitmap = page.render(scale=1.5) + img = bitmap.to_pil() + img.save(f"page_{i+1}.jpg", "JPEG", quality=90) +``` + +### Extract Text with pypdfium2 +```python +import pypdfium2 as pdfium + +pdf = pdfium.PdfDocument("document.pdf") +for i, page in enumerate(pdf): + text = page.get_text() + print(f"Page {i+1} text length: {len(text)} chars") +``` + +## JavaScript Libraries + +### pdf-lib (MIT License) + +pdf-lib is a powerful JavaScript library for creating and modifying PDF documents in any JavaScript environment. + +#### Load and Manipulate Existing PDF +```javascript +import { PDFDocument } from 'pdf-lib'; +import fs from 'fs'; + +async function manipulatePDF() { + // Load existing PDF + const existingPdfBytes = fs.readFileSync('input.pdf'); + const pdfDoc = await PDFDocument.load(existingPdfBytes); + + // Get page count + const pageCount = pdfDoc.getPageCount(); + console.log(`Document has ${pageCount} pages`); + + // Add new page + const newPage = pdfDoc.addPage([600, 400]); + newPage.drawText('Added by pdf-lib', { + x: 100, + y: 300, + size: 16 + }); + + // Save modified PDF + const pdfBytes = await pdfDoc.save(); + fs.writeFileSync('modified.pdf', pdfBytes); +} +``` + +#### Create Complex PDFs from Scratch +```javascript +import { PDFDocument, rgb, StandardFonts } from 'pdf-lib'; +import fs from 'fs'; + +async function createPDF() { + const pdfDoc = await PDFDocument.create(); + + // Add fonts + const helveticaFont = await pdfDoc.embedFont(StandardFonts.Helvetica); + const helveticaBold = await pdfDoc.embedFont(StandardFonts.HelveticaBold); + + // Add page + const page = pdfDoc.addPage([595, 842]); // A4 size + const { width, height } = page.getSize(); + + // Add text with styling + page.drawText('Invoice #12345', { + x: 50, + y: height - 50, + size: 18, + font: helveticaBold, + color: rgb(0.2, 0.2, 0.8) + }); + + // Add rectangle (header background) + page.drawRectangle({ + x: 40, + y: height - 100, + width: width - 80, + height: 30, + color: rgb(0.9, 0.9, 0.9) + }); + + // Add table-like content + const items = [ + ['Item', 'Qty', 'Price', 'Total'], + ['Widget', '2', '$50', '$100'], + ['Gadget', '1', '$75', '$75'] + ]; + + let yPos = height - 150; + items.forEach(row => { + let xPos = 50; + row.forEach(cell => { + page.drawText(cell, { + x: xPos, + y: yPos, + size: 12, + font: helveticaFont + }); + xPos += 120; + }); + yPos -= 25; + }); + + const pdfBytes = await pdfDoc.save(); + fs.writeFileSync('created.pdf', pdfBytes); +} +``` + +#### Advanced Merge and Split Operations +```javascript +import { PDFDocument } from 'pdf-lib'; +import fs from 'fs'; + +async function mergePDFs() { + // Create new document + const mergedPdf = await PDFDocument.create(); + + // Load source PDFs + const pdf1Bytes = fs.readFileSync('doc1.pdf'); + const pdf2Bytes = fs.readFileSync('doc2.pdf'); + + const pdf1 = await PDFDocument.load(pdf1Bytes); + const pdf2 = await PDFDocument.load(pdf2Bytes); + + // Copy pages from first PDF + const pdf1Pages = await mergedPdf.copyPages(pdf1, pdf1.getPageIndices()); + pdf1Pages.forEach(page => mergedPdf.addPage(page)); + + // Copy specific pages from second PDF (pages 0, 2, 4) + const pdf2Pages = await mergedPdf.copyPages(pdf2, [0, 2, 4]); + pdf2Pages.forEach(page => mergedPdf.addPage(page)); + + const mergedPdfBytes = await mergedPdf.save(); + fs.writeFileSync('merged.pdf', mergedPdfBytes); +} +``` + +### pdfjs-dist (Apache License) + +PDF.js is Mozilla's JavaScript library for rendering PDFs in the browser. + +#### Basic PDF Loading and Rendering +```javascript +import * as pdfjsLib from 'pdfjs-dist'; + +// Configure worker (important for performance) +pdfjsLib.GlobalWorkerOptions.workerSrc = './pdf.worker.js'; + +async function renderPDF() { + // Load PDF + const loadingTask = pdfjsLib.getDocument('document.pdf'); + const pdf = await loadingTask.promise; + + console.log(`Loaded PDF with ${pdf.numPages} pages`); + + // Get first page + const page = await pdf.getPage(1); + const viewport = page.getViewport({ scale: 1.5 }); + + // Render to canvas + const canvas = document.createElement('canvas'); + const context = canvas.getContext('2d'); + canvas.height = viewport.height; + canvas.width = viewport.width; + + const renderContext = { + canvasContext: context, + viewport: viewport + }; + + await page.render(renderContext).promise; + document.body.appendChild(canvas); +} +``` + +#### Extract Text with Coordinates +```javascript +import * as pdfjsLib from 'pdfjs-dist'; + +async function extractText() { + const loadingTask = pdfjsLib.getDocument('document.pdf'); + const pdf = await loadingTask.promise; + + let fullText = ''; + + // Extract text from all pages + for (let i = 1; i <= pdf.numPages; i++) { + const page = await pdf.getPage(i); + const textContent = await page.getTextContent(); + + const pageText = textContent.items + .map(item => item.str) + .join(' '); + + fullText += `\n--- Page ${i} ---\n${pageText}`; + + // Get text with coordinates for advanced processing + const textWithCoords = textContent.items.map(item => ({ + text: item.str, + x: item.transform[4], + y: item.transform[5], + width: item.width, + height: item.height + })); + } + + console.log(fullText); + return fullText; +} +``` + +#### Extract Annotations and Forms +```javascript +import * as pdfjsLib from 'pdfjs-dist'; + +async function extractAnnotations() { + const loadingTask = pdfjsLib.getDocument('annotated.pdf'); + const pdf = await loadingTask.promise; + + for (let i = 1; i <= pdf.numPages; i++) { + const page = await pdf.getPage(i); + const annotations = await page.getAnnotations(); + + annotations.forEach(annotation => { + console.log(`Annotation type: ${annotation.subtype}`); + console.log(`Content: ${annotation.contents}`); + console.log(`Coordinates: ${JSON.stringify(annotation.rect)}`); + }); + } +} +``` + +## Advanced Command-Line Operations + +### poppler-utils Advanced Features + +#### Extract Text with Bounding Box Coordinates +```bash +# Extract text with bounding box coordinates (essential for structured data) +pdftotext -bbox-layout document.pdf output.xml + +# The XML output contains precise coordinates for each text element +``` + +#### Advanced Image Conversion +```bash +# Convert to PNG images with specific resolution +pdftoppm -png -r 300 document.pdf output_prefix + +# Convert specific page range with high resolution +pdftoppm -png -r 600 -f 1 -l 3 document.pdf high_res_pages + +# Convert to JPEG with quality setting +pdftoppm -jpeg -jpegopt quality=85 -r 200 document.pdf jpeg_output +``` + +#### Extract Embedded Images +```bash +# Extract all embedded images with metadata +pdfimages -j -p document.pdf page_images + +# List image info without extracting +pdfimages -list document.pdf + +# Extract images in their original format +pdfimages -all document.pdf images/img +``` + +### qpdf Advanced Features + +#### Complex Page Manipulation +```bash +# Split PDF into groups of pages +qpdf --split-pages=3 input.pdf output_group_%02d.pdf + +# Extract specific pages with complex ranges +qpdf input.pdf --pages input.pdf 1,3-5,8,10-end -- extracted.pdf + +# Merge specific pages from multiple PDFs +qpdf --empty --pages doc1.pdf 1-3 doc2.pdf 5-7 doc3.pdf 2,4 -- combined.pdf +``` + +#### PDF Optimization and Repair +```bash +# Optimize PDF for web (linearize for streaming) +qpdf --linearize input.pdf optimized.pdf + +# Remove unused objects and compress +qpdf --optimize-level=all input.pdf compressed.pdf + +# Attempt to repair corrupted PDF structure +qpdf --check input.pdf +qpdf --fix-qdf damaged.pdf repaired.pdf + +# Show detailed PDF structure for debugging +qpdf --show-all-pages input.pdf > structure.txt +``` + +#### Advanced Encryption +```bash +# Add password protection with specific permissions +qpdf --encrypt user_pass owner_pass 256 --print=none --modify=none -- input.pdf encrypted.pdf + +# Check encryption status +qpdf --show-encryption encrypted.pdf + +# Remove password protection (requires password) +qpdf --password=secret123 --decrypt encrypted.pdf decrypted.pdf +``` + +## Advanced Python Techniques + +### pdfplumber Advanced Features + +#### Extract Text with Precise Coordinates +```python +import pdfplumber + +with pdfplumber.open("document.pdf") as pdf: + page = pdf.pages[0] + + # Extract all text with coordinates + chars = page.chars + for char in chars[:10]: # First 10 characters + print(f"Char: '{char['text']}' at x:{char['x0']:.1f} y:{char['y0']:.1f}") + + # Extract text by bounding box (left, top, right, bottom) + bbox_text = page.within_bbox((100, 100, 400, 200)).extract_text() +``` + +#### Advanced Table Extraction with Custom Settings +```python +import pdfplumber +import pandas as pd + +with pdfplumber.open("complex_table.pdf") as pdf: + page = pdf.pages[0] + + # Extract tables with custom settings for complex layouts + table_settings = { + "vertical_strategy": "lines", + "horizontal_strategy": "lines", + "snap_tolerance": 3, + "intersection_tolerance": 15 + } + tables = page.extract_tables(table_settings) + + # Visual debugging for table extraction + img = page.to_image(resolution=150) + img.save("debug_layout.png") +``` + +### reportlab Advanced Features + +#### Create Professional Reports with Tables +```python +from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph +from reportlab.lib.styles import getSampleStyleSheet +from reportlab.lib import colors + +# Sample data +data = [ + ['Product', 'Q1', 'Q2', 'Q3', 'Q4'], + ['Widgets', '120', '135', '142', '158'], + ['Gadgets', '85', '92', '98', '105'] +] + +# Create PDF with table +doc = SimpleDocTemplate("report.pdf") +elements = [] + +# Add title +styles = getSampleStyleSheet() +title = Paragraph("Quarterly Sales Report", styles['Title']) +elements.append(title) + +# Add table with advanced styling +table = Table(data) +table.setStyle(TableStyle([ + ('BACKGROUND', (0, 0), (-1, 0), colors.grey), + ('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke), + ('ALIGN', (0, 0), (-1, -1), 'CENTER'), + ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'), + ('FONTSIZE', (0, 0), (-1, 0), 14), + ('BOTTOMPADDING', (0, 0), (-1, 0), 12), + ('BACKGROUND', (0, 1), (-1, -1), colors.beige), + ('GRID', (0, 0), (-1, -1), 1, colors.black) +])) +elements.append(table) + +doc.build(elements) +``` + +## Complex Workflows + +### Extract Figures/Images from PDF + +#### Method 1: Using pdfimages (fastest) +```bash +# Extract all images with original quality +pdfimages -all document.pdf images/img +``` + +#### Method 2: Using pypdfium2 + Image Processing +```python +import pypdfium2 as pdfium +from PIL import Image +import numpy as np + +def extract_figures(pdf_path, output_dir): + pdf = pdfium.PdfDocument(pdf_path) + + for page_num, page in enumerate(pdf): + # Render high-resolution page + bitmap = page.render(scale=3.0) + img = bitmap.to_pil() + + # Convert to numpy for processing + img_array = np.array(img) + + # Simple figure detection (non-white regions) + mask = np.any(img_array != [255, 255, 255], axis=2) + + # Find contours and extract bounding boxes + # (This is simplified - real implementation would need more sophisticated detection) + + # Save detected figures + # ... implementation depends on specific needs +``` + +### Batch PDF Processing with Error Handling +```python +import os +import glob +from pypdf import PdfReader, PdfWriter +import logging + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +def batch_process_pdfs(input_dir, operation='merge'): + pdf_files = glob.glob(os.path.join(input_dir, "*.pdf")) + + if operation == 'merge': + writer = PdfWriter() + for pdf_file in pdf_files: + try: + reader = PdfReader(pdf_file) + for page in reader.pages: + writer.add_page(page) + logger.info(f"Processed: {pdf_file}") + except Exception as e: + logger.error(f"Failed to process {pdf_file}: {e}") + continue + + with open("batch_merged.pdf", "wb") as output: + writer.write(output) + + elif operation == 'extract_text': + for pdf_file in pdf_files: + try: + reader = PdfReader(pdf_file) + text = "" + for page in reader.pages: + text += page.extract_text() + + output_file = pdf_file.replace('.pdf', '.txt') + with open(output_file, 'w', encoding='utf-8') as f: + f.write(text) + logger.info(f"Extracted text from: {pdf_file}") + + except Exception as e: + logger.error(f"Failed to extract text from {pdf_file}: {e}") + continue +``` + +### Advanced PDF Cropping +```python +from pypdf import PdfWriter, PdfReader + +reader = PdfReader("input.pdf") +writer = PdfWriter() + +# Crop page (left, bottom, right, top in points) +page = reader.pages[0] +page.mediabox.left = 50 +page.mediabox.bottom = 50 +page.mediabox.right = 550 +page.mediabox.top = 750 + +writer.add_page(page) +with open("cropped.pdf", "wb") as output: + writer.write(output) +``` + +## Performance Optimization Tips + +### 1. For Large PDFs +- Use streaming approaches instead of loading entire PDF in memory +- Use `qpdf --split-pages` for splitting large files +- Process pages individually with pypdfium2 + +### 2. For Text Extraction +- `pdftotext -bbox-layout` is fastest for plain text extraction +- Use pdfplumber for structured data and tables +- Avoid `pypdf.extract_text()` for very large documents + +### 3. For Image Extraction +- `pdfimages` is much faster than rendering pages +- Use low resolution for previews, high resolution for final output + +### 4. For Form Filling +- pdf-lib maintains form structure better than most alternatives +- Pre-validate form fields before processing + +### 5. Memory Management +```python +# Process PDFs in chunks +def process_large_pdf(pdf_path, chunk_size=10): + reader = PdfReader(pdf_path) + total_pages = len(reader.pages) + + for start_idx in range(0, total_pages, chunk_size): + end_idx = min(start_idx + chunk_size, total_pages) + writer = PdfWriter() + + for i in range(start_idx, end_idx): + writer.add_page(reader.pages[i]) + + # Process chunk + with open(f"chunk_{start_idx//chunk_size}.pdf", "wb") as output: + writer.write(output) +``` + +## Troubleshooting Common Issues + +### Encrypted PDFs +```python +# Handle password-protected PDFs +from pypdf import PdfReader + +try: + reader = PdfReader("encrypted.pdf") + if reader.is_encrypted: + reader.decrypt("password") +except Exception as e: + print(f"Failed to decrypt: {e}") +``` + +### Corrupted PDFs +```bash +# Use qpdf to repair +qpdf --check corrupted.pdf +qpdf --replace-input corrupted.pdf +``` + +### Text Extraction Issues +```python +# Fallback to OCR for scanned PDFs +import pytesseract +from pdf2image import convert_from_path + +def extract_text_with_ocr(pdf_path): + images = convert_from_path(pdf_path) + text = "" + for i, image in enumerate(images): + text += pytesseract.image_to_string(image) + return text +``` + +## License Information + +- **pypdf**: BSD License +- **pdfplumber**: MIT License +- **pypdfium2**: Apache/BSD License +- **reportlab**: BSD License +- **poppler-utils**: GPL-2 License +- **qpdf**: Apache License +- **pdf-lib**: MIT License +- **pdfjs-dist**: Apache License \ No newline at end of file diff --git a/.claude/skills/pdf/scripts/check_bounding_boxes.py b/.claude/skills/pdf/scripts/check_bounding_boxes.py new file mode 100644 index 0000000..7443660 --- /dev/null +++ b/.claude/skills/pdf/scripts/check_bounding_boxes.py @@ -0,0 +1,70 @@ +from dataclasses import dataclass +import json +import sys + + +# Script to check that the `fields.json` file that Claude creates when analyzing PDFs +# does not have overlapping bounding boxes. See forms.md. + + +@dataclass +class RectAndField: + rect: list[float] + rect_type: str + field: dict + + +# Returns a list of messages that are printed to stdout for Claude to read. +def get_bounding_box_messages(fields_json_stream) -> list[str]: + messages = [] + fields = json.load(fields_json_stream) + messages.append(f"Read {len(fields['form_fields'])} fields") + + def rects_intersect(r1, r2): + disjoint_horizontal = r1[0] >= r2[2] or r1[2] <= r2[0] + disjoint_vertical = r1[1] >= r2[3] or r1[3] <= r2[1] + return not (disjoint_horizontal or disjoint_vertical) + + rects_and_fields = [] + for f in fields["form_fields"]: + rects_and_fields.append(RectAndField(f["label_bounding_box"], "label", f)) + rects_and_fields.append(RectAndField(f["entry_bounding_box"], "entry", f)) + + has_error = False + for i, ri in enumerate(rects_and_fields): + # This is O(N^2); we can optimize if it becomes a problem. + for j in range(i + 1, len(rects_and_fields)): + rj = rects_and_fields[j] + if ri.field["page_number"] == rj.field["page_number"] and rects_intersect(ri.rect, rj.rect): + has_error = True + if ri.field is rj.field: + messages.append(f"FAILURE: intersection between label and entry bounding boxes for `{ri.field['description']}` ({ri.rect}, {rj.rect})") + else: + messages.append(f"FAILURE: intersection between {ri.rect_type} bounding box for `{ri.field['description']}` ({ri.rect}) and {rj.rect_type} bounding box for `{rj.field['description']}` ({rj.rect})") + if len(messages) >= 20: + messages.append("Aborting further checks; fix bounding boxes and try again") + return messages + if ri.rect_type == "entry": + if "entry_text" in ri.field: + font_size = ri.field["entry_text"].get("font_size", 14) + entry_height = ri.rect[3] - ri.rect[1] + if entry_height < font_size: + has_error = True + messages.append(f"FAILURE: entry bounding box height ({entry_height}) for `{ri.field['description']}` is too short for the text content (font size: {font_size}). Increase the box height or decrease the font size.") + if len(messages) >= 20: + messages.append("Aborting further checks; fix bounding boxes and try again") + return messages + + if not has_error: + messages.append("SUCCESS: All bounding boxes are valid") + return messages + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("Usage: check_bounding_boxes.py [fields.json]") + sys.exit(1) + # Input file should be in the `fields.json` format described in forms.md. + with open(sys.argv[1]) as f: + messages = get_bounding_box_messages(f) + for msg in messages: + print(msg) diff --git a/.claude/skills/pdf/scripts/check_bounding_boxes_test.py b/.claude/skills/pdf/scripts/check_bounding_boxes_test.py new file mode 100644 index 0000000..1dbb463 --- /dev/null +++ b/.claude/skills/pdf/scripts/check_bounding_boxes_test.py @@ -0,0 +1,226 @@ +import unittest +import json +import io +from check_bounding_boxes import get_bounding_box_messages + + +# Currently this is not run automatically in CI; it's just for documentation and manual checking. +class TestGetBoundingBoxMessages(unittest.TestCase): + + def create_json_stream(self, data): + """Helper to create a JSON stream from data""" + return io.StringIO(json.dumps(data)) + + def test_no_intersections(self): + """Test case with no bounding box intersections""" + data = { + "form_fields": [ + { + "description": "Name", + "page_number": 1, + "label_bounding_box": [10, 10, 50, 30], + "entry_bounding_box": [60, 10, 150, 30] + }, + { + "description": "Email", + "page_number": 1, + "label_bounding_box": [10, 40, 50, 60], + "entry_bounding_box": [60, 40, 150, 60] + } + ] + } + + stream = self.create_json_stream(data) + messages = get_bounding_box_messages(stream) + self.assertTrue(any("SUCCESS" in msg for msg in messages)) + self.assertFalse(any("FAILURE" in msg for msg in messages)) + + def test_label_entry_intersection_same_field(self): + """Test intersection between label and entry of the same field""" + data = { + "form_fields": [ + { + "description": "Name", + "page_number": 1, + "label_bounding_box": [10, 10, 60, 30], + "entry_bounding_box": [50, 10, 150, 30] # Overlaps with label + } + ] + } + + stream = self.create_json_stream(data) + messages = get_bounding_box_messages(stream) + self.assertTrue(any("FAILURE" in msg and "intersection" in msg for msg in messages)) + self.assertFalse(any("SUCCESS" in msg for msg in messages)) + + def test_intersection_between_different_fields(self): + """Test intersection between bounding boxes of different fields""" + data = { + "form_fields": [ + { + "description": "Name", + "page_number": 1, + "label_bounding_box": [10, 10, 50, 30], + "entry_bounding_box": [60, 10, 150, 30] + }, + { + "description": "Email", + "page_number": 1, + "label_bounding_box": [40, 20, 80, 40], # Overlaps with Name's boxes + "entry_bounding_box": [160, 10, 250, 30] + } + ] + } + + stream = self.create_json_stream(data) + messages = get_bounding_box_messages(stream) + self.assertTrue(any("FAILURE" in msg and "intersection" in msg for msg in messages)) + self.assertFalse(any("SUCCESS" in msg for msg in messages)) + + def test_different_pages_no_intersection(self): + """Test that boxes on different pages don't count as intersecting""" + data = { + "form_fields": [ + { + "description": "Name", + "page_number": 1, + "label_bounding_box": [10, 10, 50, 30], + "entry_bounding_box": [60, 10, 150, 30] + }, + { + "description": "Email", + "page_number": 2, + "label_bounding_box": [10, 10, 50, 30], # Same coordinates but different page + "entry_bounding_box": [60, 10, 150, 30] + } + ] + } + + stream = self.create_json_stream(data) + messages = get_bounding_box_messages(stream) + self.assertTrue(any("SUCCESS" in msg for msg in messages)) + self.assertFalse(any("FAILURE" in msg for msg in messages)) + + def test_entry_height_too_small(self): + """Test that entry box height is checked against font size""" + data = { + "form_fields": [ + { + "description": "Name", + "page_number": 1, + "label_bounding_box": [10, 10, 50, 30], + "entry_bounding_box": [60, 10, 150, 20], # Height is 10 + "entry_text": { + "font_size": 14 # Font size larger than height + } + } + ] + } + + stream = self.create_json_stream(data) + messages = get_bounding_box_messages(stream) + self.assertTrue(any("FAILURE" in msg and "height" in msg for msg in messages)) + self.assertFalse(any("SUCCESS" in msg for msg in messages)) + + def test_entry_height_adequate(self): + """Test that adequate entry box height passes""" + data = { + "form_fields": [ + { + "description": "Name", + "page_number": 1, + "label_bounding_box": [10, 10, 50, 30], + "entry_bounding_box": [60, 10, 150, 30], # Height is 20 + "entry_text": { + "font_size": 14 # Font size smaller than height + } + } + ] + } + + stream = self.create_json_stream(data) + messages = get_bounding_box_messages(stream) + self.assertTrue(any("SUCCESS" in msg for msg in messages)) + self.assertFalse(any("FAILURE" in msg for msg in messages)) + + def test_default_font_size(self): + """Test that default font size is used when not specified""" + data = { + "form_fields": [ + { + "description": "Name", + "page_number": 1, + "label_bounding_box": [10, 10, 50, 30], + "entry_bounding_box": [60, 10, 150, 20], # Height is 10 + "entry_text": {} # No font_size specified, should use default 14 + } + ] + } + + stream = self.create_json_stream(data) + messages = get_bounding_box_messages(stream) + self.assertTrue(any("FAILURE" in msg and "height" in msg for msg in messages)) + self.assertFalse(any("SUCCESS" in msg for msg in messages)) + + def test_no_entry_text(self): + """Test that missing entry_text doesn't cause height check""" + data = { + "form_fields": [ + { + "description": "Name", + "page_number": 1, + "label_bounding_box": [10, 10, 50, 30], + "entry_bounding_box": [60, 10, 150, 20] # Small height but no entry_text + } + ] + } + + stream = self.create_json_stream(data) + messages = get_bounding_box_messages(stream) + self.assertTrue(any("SUCCESS" in msg for msg in messages)) + self.assertFalse(any("FAILURE" in msg for msg in messages)) + + def test_multiple_errors_limit(self): + """Test that error messages are limited to prevent excessive output""" + fields = [] + # Create many overlapping fields + for i in range(25): + fields.append({ + "description": f"Field{i}", + "page_number": 1, + "label_bounding_box": [10, 10, 50, 30], # All overlap + "entry_bounding_box": [20, 15, 60, 35] # All overlap + }) + + data = {"form_fields": fields} + + stream = self.create_json_stream(data) + messages = get_bounding_box_messages(stream) + # Should abort after ~20 messages + self.assertTrue(any("Aborting" in msg for msg in messages)) + # Should have some FAILURE messages but not hundreds + failure_count = sum(1 for msg in messages if "FAILURE" in msg) + self.assertGreater(failure_count, 0) + self.assertLess(len(messages), 30) # Should be limited + + def test_edge_touching_boxes(self): + """Test that boxes touching at edges don't count as intersecting""" + data = { + "form_fields": [ + { + "description": "Name", + "page_number": 1, + "label_bounding_box": [10, 10, 50, 30], + "entry_bounding_box": [50, 10, 150, 30] # Touches at x=50 + } + ] + } + + stream = self.create_json_stream(data) + messages = get_bounding_box_messages(stream) + self.assertTrue(any("SUCCESS" in msg for msg in messages)) + self.assertFalse(any("FAILURE" in msg for msg in messages)) + + +if __name__ == '__main__': + unittest.main() diff --git a/.claude/skills/pdf/scripts/check_fillable_fields.py b/.claude/skills/pdf/scripts/check_fillable_fields.py new file mode 100644 index 0000000..dc43d18 --- /dev/null +++ b/.claude/skills/pdf/scripts/check_fillable_fields.py @@ -0,0 +1,12 @@ +import sys +from pypdf import PdfReader + + +# Script for Claude to run to determine whether a PDF has fillable form fields. See forms.md. + + +reader = PdfReader(sys.argv[1]) +if (reader.get_fields()): + print("This PDF has fillable form fields") +else: + print("This PDF does not have fillable form fields; you will need to visually determine where to enter data") diff --git a/.claude/skills/pdf/scripts/convert_pdf_to_images.py b/.claude/skills/pdf/scripts/convert_pdf_to_images.py new file mode 100644 index 0000000..f8a4ec5 --- /dev/null +++ b/.claude/skills/pdf/scripts/convert_pdf_to_images.py @@ -0,0 +1,35 @@ +import os +import sys + +from pdf2image import convert_from_path + + +# Converts each page of a PDF to a PNG image. + + +def convert(pdf_path, output_dir, max_dim=1000): + images = convert_from_path(pdf_path, dpi=200) + + for i, image in enumerate(images): + # Scale image if needed to keep width/height under `max_dim` + width, height = image.size + if width > max_dim or height > max_dim: + scale_factor = min(max_dim / width, max_dim / height) + new_width = int(width * scale_factor) + new_height = int(height * scale_factor) + image = image.resize((new_width, new_height)) + + image_path = os.path.join(output_dir, f"page_{i+1}.png") + image.save(image_path) + print(f"Saved page {i+1} as {image_path} (size: {image.size})") + + print(f"Converted {len(images)} pages to PNG images") + + +if __name__ == "__main__": + if len(sys.argv) != 3: + print("Usage: convert_pdf_to_images.py [input pdf] [output directory]") + sys.exit(1) + pdf_path = sys.argv[1] + output_directory = sys.argv[2] + convert(pdf_path, output_directory) diff --git a/.claude/skills/pdf/scripts/create_validation_image.py b/.claude/skills/pdf/scripts/create_validation_image.py new file mode 100644 index 0000000..4913f8f --- /dev/null +++ b/.claude/skills/pdf/scripts/create_validation_image.py @@ -0,0 +1,41 @@ +import json +import sys + +from PIL import Image, ImageDraw + + +# Creates "validation" images with rectangles for the bounding box information that +# Claude creates when determining where to add text annotations in PDFs. See forms.md. + + +def create_validation_image(page_number, fields_json_path, input_path, output_path): + # Input file should be in the `fields.json` format described in forms.md. + with open(fields_json_path, 'r') as f: + data = json.load(f) + + img = Image.open(input_path) + draw = ImageDraw.Draw(img) + num_boxes = 0 + + for field in data["form_fields"]: + if field["page_number"] == page_number: + entry_box = field['entry_bounding_box'] + label_box = field['label_bounding_box'] + # Draw red rectangle over entry bounding box and blue rectangle over the label. + draw.rectangle(entry_box, outline='red', width=2) + draw.rectangle(label_box, outline='blue', width=2) + num_boxes += 2 + + img.save(output_path) + print(f"Created validation image at {output_path} with {num_boxes} bounding boxes") + + +if __name__ == "__main__": + if len(sys.argv) != 5: + print("Usage: create_validation_image.py [page number] [fields.json file] [input image path] [output image path]") + sys.exit(1) + page_number = int(sys.argv[1]) + fields_json_path = sys.argv[2] + input_image_path = sys.argv[3] + output_image_path = sys.argv[4] + create_validation_image(page_number, fields_json_path, input_image_path, output_image_path) diff --git a/.claude/skills/pdf/scripts/extract_form_field_info.py b/.claude/skills/pdf/scripts/extract_form_field_info.py new file mode 100644 index 0000000..f42a2df --- /dev/null +++ b/.claude/skills/pdf/scripts/extract_form_field_info.py @@ -0,0 +1,152 @@ +import json +import sys + +from pypdf import PdfReader + + +# Extracts data for the fillable form fields in a PDF and outputs JSON that +# Claude uses to fill the fields. See forms.md. + + +# This matches the format used by PdfReader `get_fields` and `update_page_form_field_values` methods. +def get_full_annotation_field_id(annotation): + components = [] + while annotation: + field_name = annotation.get('/T') + if field_name: + components.append(field_name) + annotation = annotation.get('/Parent') + return ".".join(reversed(components)) if components else None + + +def make_field_dict(field, field_id): + field_dict = {"field_id": field_id} + ft = field.get('/FT') + if ft == "/Tx": + field_dict["type"] = "text" + elif ft == "/Btn": + field_dict["type"] = "checkbox" # radio groups handled separately + states = field.get("/_States_", []) + if len(states) == 2: + # "/Off" seems to always be the unchecked value, as suggested by + # https://opensource.adobe.com/dc-acrobat-sdk-docs/standards/pdfstandards/pdf/PDF32000_2008.pdf#page=448 + # It can be either first or second in the "/_States_" list. + if "/Off" in states: + field_dict["checked_value"] = states[0] if states[0] != "/Off" else states[1] + field_dict["unchecked_value"] = "/Off" + else: + print(f"Unexpected state values for checkbox `${field_id}`. Its checked and unchecked values may not be correct; if you're trying to check it, visually verify the results.") + field_dict["checked_value"] = states[0] + field_dict["unchecked_value"] = states[1] + elif ft == "/Ch": + field_dict["type"] = "choice" + states = field.get("/_States_", []) + field_dict["choice_options"] = [{ + "value": state[0], + "text": state[1], + } for state in states] + else: + field_dict["type"] = f"unknown ({ft})" + return field_dict + + +# Returns a list of fillable PDF fields: +# [ +# { +# "field_id": "name", +# "page": 1, +# "type": ("text", "checkbox", "radio_group", or "choice") +# // Per-type additional fields described in forms.md +# }, +# ] +def get_field_info(reader: PdfReader): + fields = reader.get_fields() + + field_info_by_id = {} + possible_radio_names = set() + + for field_id, field in fields.items(): + # Skip if this is a container field with children, except that it might be + # a parent group for radio button options. + if field.get("/Kids"): + if field.get("/FT") == "/Btn": + possible_radio_names.add(field_id) + continue + field_info_by_id[field_id] = make_field_dict(field, field_id) + + # Bounding rects are stored in annotations in page objects. + + # Radio button options have a separate annotation for each choice; + # all choices have the same field name. + # See https://westhealth.github.io/exploring-fillable-forms-with-pdfrw.html + radio_fields_by_id = {} + + for page_index, page in enumerate(reader.pages): + annotations = page.get('/Annots', []) + for ann in annotations: + field_id = get_full_annotation_field_id(ann) + if field_id in field_info_by_id: + field_info_by_id[field_id]["page"] = page_index + 1 + field_info_by_id[field_id]["rect"] = ann.get('/Rect') + elif field_id in possible_radio_names: + try: + # ann['/AP']['/N'] should have two items. One of them is '/Off', + # the other is the active value. + on_values = [v for v in ann["/AP"]["/N"] if v != "/Off"] + except KeyError: + continue + if len(on_values) == 1: + rect = ann.get("/Rect") + if field_id not in radio_fields_by_id: + radio_fields_by_id[field_id] = { + "field_id": field_id, + "type": "radio_group", + "page": page_index + 1, + "radio_options": [], + } + # Note: at least on macOS 15.7, Preview.app doesn't show selected + # radio buttons correctly. (It does if you remove the leading slash + # from the value, but that causes them not to appear correctly in + # Chrome/Firefox/Acrobat/etc). + radio_fields_by_id[field_id]["radio_options"].append({ + "value": on_values[0], + "rect": rect, + }) + + # Some PDFs have form field definitions without corresponding annotations, + # so we can't tell where they are. Ignore these fields for now. + fields_with_location = [] + for field_info in field_info_by_id.values(): + if "page" in field_info: + fields_with_location.append(field_info) + else: + print(f"Unable to determine location for field id: {field_info.get('field_id')}, ignoring") + + # Sort by page number, then Y position (flipped in PDF coordinate system), then X. + def sort_key(f): + if "radio_options" in f: + rect = f["radio_options"][0]["rect"] or [0, 0, 0, 0] + else: + rect = f.get("rect") or [0, 0, 0, 0] + adjusted_position = [-rect[1], rect[0]] + return [f.get("page"), adjusted_position] + + sorted_fields = fields_with_location + list(radio_fields_by_id.values()) + sorted_fields.sort(key=sort_key) + + return sorted_fields + + +def write_field_info(pdf_path: str, json_output_path: str): + reader = PdfReader(pdf_path) + field_info = get_field_info(reader) + with open(json_output_path, "w") as f: + json.dump(field_info, f, indent=2) + print(f"Wrote {len(field_info)} fields to {json_output_path}") + + +if __name__ == "__main__": + if len(sys.argv) != 3: + print("Usage: extract_form_field_info.py [input pdf] [output json]") + sys.exit(1) + write_field_info(sys.argv[1], sys.argv[2]) diff --git a/.claude/skills/pdf/scripts/fill_fillable_fields.py b/.claude/skills/pdf/scripts/fill_fillable_fields.py new file mode 100644 index 0000000..ac35753 --- /dev/null +++ b/.claude/skills/pdf/scripts/fill_fillable_fields.py @@ -0,0 +1,114 @@ +import json +import sys + +from pypdf import PdfReader, PdfWriter + +from extract_form_field_info import get_field_info + + +# Fills fillable form fields in a PDF. See forms.md. + + +def fill_pdf_fields(input_pdf_path: str, fields_json_path: str, output_pdf_path: str): + with open(fields_json_path) as f: + fields = json.load(f) + # Group by page number. + fields_by_page = {} + for field in fields: + if "value" in field: + field_id = field["field_id"] + page = field["page"] + if page not in fields_by_page: + fields_by_page[page] = {} + fields_by_page[page][field_id] = field["value"] + + reader = PdfReader(input_pdf_path) + + has_error = False + field_info = get_field_info(reader) + fields_by_ids = {f["field_id"]: f for f in field_info} + for field in fields: + existing_field = fields_by_ids.get(field["field_id"]) + if not existing_field: + has_error = True + print(f"ERROR: `{field['field_id']}` is not a valid field ID") + elif field["page"] != existing_field["page"]: + has_error = True + print(f"ERROR: Incorrect page number for `{field['field_id']}` (got {field['page']}, expected {existing_field['page']})") + else: + if "value" in field: + err = validation_error_for_field_value(existing_field, field["value"]) + if err: + print(err) + has_error = True + if has_error: + sys.exit(1) + + writer = PdfWriter(clone_from=reader) + for page, field_values in fields_by_page.items(): + writer.update_page_form_field_values(writer.pages[page - 1], field_values, auto_regenerate=False) + + # This seems to be necessary for many PDF viewers to format the form values correctly. + # It may cause the viewer to show a "save changes" dialog even if the user doesn't make any changes. + writer.set_need_appearances_writer(True) + + with open(output_pdf_path, "wb") as f: + writer.write(f) + + +def validation_error_for_field_value(field_info, field_value): + field_type = field_info["type"] + field_id = field_info["field_id"] + if field_type == "checkbox": + checked_val = field_info["checked_value"] + unchecked_val = field_info["unchecked_value"] + if field_value != checked_val and field_value != unchecked_val: + return f'ERROR: Invalid value "{field_value}" for checkbox field "{field_id}". The checked value is "{checked_val}" and the unchecked value is "{unchecked_val}"' + elif field_type == "radio_group": + option_values = [opt["value"] for opt in field_info["radio_options"]] + if field_value not in option_values: + return f'ERROR: Invalid value "{field_value}" for radio group field "{field_id}". Valid values are: {option_values}' + elif field_type == "choice": + choice_values = [opt["value"] for opt in field_info["choice_options"]] + if field_value not in choice_values: + return f'ERROR: Invalid value "{field_value}" for choice field "{field_id}". Valid values are: {choice_values}' + return None + + +# pypdf (at least version 5.7.0) has a bug when setting the value for a selection list field. +# In _writer.py around line 966: +# +# if field.get(FA.FT, "/Tx") == "/Ch" and field_flags & FA.FfBits.Combo == 0: +# txt = "\n".join(annotation.get_inherited(FA.Opt, [])) +# +# The problem is that for selection lists, `get_inherited` returns a list of two-element lists like +# [["value1", "Text 1"], ["value2", "Text 2"], ...] +# This causes `join` to throw a TypeError because it expects an iterable of strings. +# The horrible workaround is to patch `get_inherited` to return a list of the value strings. +# We call the original method and adjust the return value only if the argument to `get_inherited` +# is `FA.Opt` and if the return value is a list of two-element lists. +def monkeypatch_pydpf_method(): + from pypdf.generic import DictionaryObject + from pypdf.constants import FieldDictionaryAttributes + + original_get_inherited = DictionaryObject.get_inherited + + def patched_get_inherited(self, key: str, default = None): + result = original_get_inherited(self, key, default) + if key == FieldDictionaryAttributes.Opt: + if isinstance(result, list) and all(isinstance(v, list) and len(v) == 2 for v in result): + result = [r[0] for r in result] + return result + + DictionaryObject.get_inherited = patched_get_inherited + + +if __name__ == "__main__": + if len(sys.argv) != 4: + print("Usage: fill_fillable_fields.py [input pdf] [field_values.json] [output pdf]") + sys.exit(1) + monkeypatch_pydpf_method() + input_pdf = sys.argv[1] + fields_json = sys.argv[2] + output_pdf = sys.argv[3] + fill_pdf_fields(input_pdf, fields_json, output_pdf) diff --git a/.claude/skills/pdf/scripts/fill_pdf_form_with_annotations.py b/.claude/skills/pdf/scripts/fill_pdf_form_with_annotations.py new file mode 100644 index 0000000..f980531 --- /dev/null +++ b/.claude/skills/pdf/scripts/fill_pdf_form_with_annotations.py @@ -0,0 +1,108 @@ +import json +import sys + +from pypdf import PdfReader, PdfWriter +from pypdf.annotations import FreeText + + +# Fills a PDF by adding text annotations defined in `fields.json`. See forms.md. + + +def transform_coordinates(bbox, image_width, image_height, pdf_width, pdf_height): + """Transform bounding box from image coordinates to PDF coordinates""" + # Image coordinates: origin at top-left, y increases downward + # PDF coordinates: origin at bottom-left, y increases upward + x_scale = pdf_width / image_width + y_scale = pdf_height / image_height + + left = bbox[0] * x_scale + right = bbox[2] * x_scale + + # Flip Y coordinates for PDF + top = pdf_height - (bbox[1] * y_scale) + bottom = pdf_height - (bbox[3] * y_scale) + + return left, bottom, right, top + + +def fill_pdf_form(input_pdf_path, fields_json_path, output_pdf_path): + """Fill the PDF form with data from fields.json""" + + # `fields.json` format described in forms.md. + with open(fields_json_path, "r") as f: + fields_data = json.load(f) + + # Open the PDF + reader = PdfReader(input_pdf_path) + writer = PdfWriter() + + # Copy all pages to writer + writer.append(reader) + + # Get PDF dimensions for each page + pdf_dimensions = {} + for i, page in enumerate(reader.pages): + mediabox = page.mediabox + pdf_dimensions[i + 1] = [mediabox.width, mediabox.height] + + # Process each form field + annotations = [] + for field in fields_data["form_fields"]: + page_num = field["page_number"] + + # Get page dimensions and transform coordinates. + page_info = next(p for p in fields_data["pages"] if p["page_number"] == page_num) + image_width = page_info["image_width"] + image_height = page_info["image_height"] + pdf_width, pdf_height = pdf_dimensions[page_num] + + transformed_entry_box = transform_coordinates( + field["entry_bounding_box"], + image_width, image_height, + pdf_width, pdf_height + ) + + # Skip empty fields + if "entry_text" not in field or "text" not in field["entry_text"]: + continue + entry_text = field["entry_text"] + text = entry_text["text"] + if not text: + continue + + font_name = entry_text.get("font", "Arial") + font_size = str(entry_text.get("font_size", 14)) + "pt" + font_color = entry_text.get("font_color", "000000") + + # Font size/color seems to not work reliably across viewers: + # https://github.com/py-pdf/pypdf/issues/2084 + annotation = FreeText( + text=text, + rect=transformed_entry_box, + font=font_name, + font_size=font_size, + font_color=font_color, + border_color=None, + background_color=None, + ) + annotations.append(annotation) + # page_number is 0-based for pypdf + writer.add_annotation(page_number=page_num - 1, annotation=annotation) + + # Save the filled PDF + with open(output_pdf_path, "wb") as output: + writer.write(output) + + print(f"Successfully filled PDF form and saved to {output_pdf_path}") + print(f"Added {len(annotations)} text annotations") + + +if __name__ == "__main__": + if len(sys.argv) != 4: + print("Usage: fill_pdf_form_with_annotations.py [input pdf] [fields.json] [output pdf]") + sys.exit(1) + input_pdf = sys.argv[1] + fields_json = sys.argv[2] + output_pdf = sys.argv[3] + + fill_pdf_form(input_pdf, fields_json, output_pdf) \ No newline at end of file diff --git a/.claude/skills/pptx/LICENSE.txt b/.claude/skills/pptx/LICENSE.txt new file mode 100644 index 0000000..c55ab42 --- /dev/null +++ b/.claude/skills/pptx/LICENSE.txt @@ -0,0 +1,30 @@ +© 2025 Anthropic, PBC. All rights reserved. + +LICENSE: Use of these materials (including all code, prompts, assets, files, +and other components of this Skill) is governed by your agreement with +Anthropic regarding use of Anthropic's services. If no separate agreement +exists, use is governed by Anthropic's Consumer Terms of Service or +Commercial Terms of Service, as applicable: +https://www.anthropic.com/legal/consumer-terms +https://www.anthropic.com/legal/commercial-terms +Your applicable agreement is referred to as the "Agreement." "Services" are +as defined in the Agreement. + +ADDITIONAL RESTRICTIONS: Notwithstanding anything in the Agreement to the +contrary, users may not: + +- Extract these materials from the Services or retain copies of these + materials outside the Services +- Reproduce or copy these materials, except for temporary copies created + automatically during authorized use of the Services +- Create derivative works based on these materials +- Distribute, sublicense, or transfer these materials to any third party +- Make, offer to sell, sell, or import any inventions embodied in these + materials +- Reverse engineer, decompile, or disassemble these materials + +The receipt, viewing, or possession of these materials does not convey or +imply any license or right beyond those expressly granted above. + +Anthropic retains all right, title, and interest in these materials, +including all copyrights, patents, and other intellectual property rights. diff --git a/.claude/skills/pptx/SKILL.md b/.claude/skills/pptx/SKILL.md new file mode 100644 index 0000000..b93b875 --- /dev/null +++ b/.claude/skills/pptx/SKILL.md @@ -0,0 +1,484 @@ +--- +name: pptx +description: "Presentation creation, editing, and analysis. When Claude needs to work with presentations (.pptx files) for: (1) Creating new presentations, (2) Modifying or editing content, (3) Working with layouts, (4) Adding comments or speaker notes, or any other presentation tasks" +license: Proprietary. LICENSE.txt has complete terms +--- + +# PPTX creation, editing, and analysis + +## Overview + +A user may ask you to create, edit, or analyze the contents of a .pptx file. A .pptx file is essentially a ZIP archive containing XML files and other resources that you can read or edit. You have different tools and workflows available for different tasks. + +## Reading and analyzing content + +### Text extraction +If you just need to read the text contents of a presentation, you should convert the document to markdown: + +```bash +# Convert document to markdown +python -m markitdown path-to-file.pptx +``` + +### Raw XML access +You need raw XML access for: comments, speaker notes, slide layouts, animations, design elements, and complex formatting. For any of these features, you'll need to unpack a presentation and read its raw XML contents. + +#### Unpacking a file +`python ooxml/scripts/unpack.py ` + +**Note**: The unpack.py script is located at `skills/pptx/ooxml/scripts/unpack.py` relative to the project root. If the script doesn't exist at this path, use `find . -name "unpack.py"` to locate it. + +#### Key file structures +* `ppt/presentation.xml` - Main presentation metadata and slide references +* `ppt/slides/slide{N}.xml` - Individual slide contents (slide1.xml, slide2.xml, etc.) +* `ppt/notesSlides/notesSlide{N}.xml` - Speaker notes for each slide +* `ppt/comments/modernComment_*.xml` - Comments for specific slides +* `ppt/slideLayouts/` - Layout templates for slides +* `ppt/slideMasters/` - Master slide templates +* `ppt/theme/` - Theme and styling information +* `ppt/media/` - Images and other media files + +#### Typography and color extraction +**When given an example design to emulate**: Always analyze the presentation's typography and colors first using the methods below: +1. **Read theme file**: Check `ppt/theme/theme1.xml` for colors (``) and fonts (``) +2. **Sample slide content**: Examine `ppt/slides/slide1.xml` for actual font usage (``) and colors +3. **Search for patterns**: Use grep to find color (``, ``) and font references across all XML files + +## Creating a new PowerPoint presentation **without a template** + +When creating a new PowerPoint presentation from scratch, use the **html2pptx** workflow to convert HTML slides to PowerPoint with accurate positioning. + +### Design Principles + +**CRITICAL**: Before creating any presentation, analyze the content and choose appropriate design elements: +1. **Consider the subject matter**: What is this presentation about? What tone, industry, or mood does it suggest? +2. **Check for branding**: If the user mentions a company/organization, consider their brand colors and identity +3. **Match palette to content**: Select colors that reflect the subject +4. **State your approach**: Explain your design choices before writing code + +**Requirements**: +- ✅ State your content-informed design approach BEFORE writing code +- ✅ Use web-safe fonts only: Arial, Helvetica, Times New Roman, Georgia, Courier New, Verdana, Tahoma, Trebuchet MS, Impact +- ✅ Create clear visual hierarchy through size, weight, and color +- ✅ Ensure readability: strong contrast, appropriately sized text, clean alignment +- ✅ Be consistent: repeat patterns, spacing, and visual language across slides + +#### Color Palette Selection + +**Choosing colors creatively**: +- **Think beyond defaults**: What colors genuinely match this specific topic? Avoid autopilot choices. +- **Consider multiple angles**: Topic, industry, mood, energy level, target audience, brand identity (if mentioned) +- **Be adventurous**: Try unexpected combinations - a healthcare presentation doesn't have to be green, finance doesn't have to be navy +- **Build your palette**: Pick 3-5 colors that work together (dominant colors + supporting tones + accent) +- **Ensure contrast**: Text must be clearly readable on backgrounds + +**Example color palettes** (use these to spark creativity - choose one, adapt it, or create your own): + +1. **Classic Blue**: Deep navy (#1C2833), slate gray (#2E4053), silver (#AAB7B8), off-white (#F4F6F6) +2. **Teal & Coral**: Teal (#5EA8A7), deep teal (#277884), coral (#FE4447), white (#FFFFFF) +3. **Bold Red**: Red (#C0392B), bright red (#E74C3C), orange (#F39C12), yellow (#F1C40F), green (#2ECC71) +4. **Warm Blush**: Mauve (#A49393), blush (#EED6D3), rose (#E8B4B8), cream (#FAF7F2) +5. **Burgundy Luxury**: Burgundy (#5D1D2E), crimson (#951233), rust (#C15937), gold (#997929) +6. **Deep Purple & Emerald**: Purple (#B165FB), dark blue (#181B24), emerald (#40695B), white (#FFFFFF) +7. **Cream & Forest Green**: Cream (#FFE1C7), forest green (#40695B), white (#FCFCFC) +8. **Pink & Purple**: Pink (#F8275B), coral (#FF574A), rose (#FF737D), purple (#3D2F68) +9. **Lime & Plum**: Lime (#C5DE82), plum (#7C3A5F), coral (#FD8C6E), blue-gray (#98ACB5) +10. **Black & Gold**: Gold (#BF9A4A), black (#000000), cream (#F4F6F6) +11. **Sage & Terracotta**: Sage (#87A96B), terracotta (#E07A5F), cream (#F4F1DE), charcoal (#2C2C2C) +12. **Charcoal & Red**: Charcoal (#292929), red (#E33737), light gray (#CCCBCB) +13. **Vibrant Orange**: Orange (#F96D00), light gray (#F2F2F2), charcoal (#222831) +14. **Forest Green**: Black (#191A19), green (#4E9F3D), dark green (#1E5128), white (#FFFFFF) +15. **Retro Rainbow**: Purple (#722880), pink (#D72D51), orange (#EB5C18), amber (#F08800), gold (#DEB600) +16. **Vintage Earthy**: Mustard (#E3B448), sage (#CBD18F), forest green (#3A6B35), cream (#F4F1DE) +17. **Coastal Rose**: Old rose (#AD7670), beaver (#B49886), eggshell (#F3ECDC), ash gray (#BFD5BE) +18. **Orange & Turquoise**: Light orange (#FC993E), grayish turquoise (#667C6F), white (#FCFCFC) + +#### Visual Details Options + +**Geometric Patterns**: +- Diagonal section dividers instead of horizontal +- Asymmetric column widths (30/70, 40/60, 25/75) +- Rotated text headers at 90° or 270° +- Circular/hexagonal frames for images +- Triangular accent shapes in corners +- Overlapping shapes for depth + +**Border & Frame Treatments**: +- Thick single-color borders (10-20pt) on one side only +- Double-line borders with contrasting colors +- Corner brackets instead of full frames +- L-shaped borders (top+left or bottom+right) +- Underline accents beneath headers (3-5pt thick) + +**Typography Treatments**: +- Extreme size contrast (72pt headlines vs 11pt body) +- All-caps headers with wide letter spacing +- Numbered sections in oversized display type +- Monospace (Courier New) for data/stats/technical content +- Condensed fonts (Arial Narrow) for dense information +- Outlined text for emphasis + +**Chart & Data Styling**: +- Monochrome charts with single accent color for key data +- Horizontal bar charts instead of vertical +- Dot plots instead of bar charts +- Minimal gridlines or none at all +- Data labels directly on elements (no legends) +- Oversized numbers for key metrics + +**Layout Innovations**: +- Full-bleed images with text overlays +- Sidebar column (20-30% width) for navigation/context +- Modular grid systems (3×3, 4×4 blocks) +- Z-pattern or F-pattern content flow +- Floating text boxes over colored shapes +- Magazine-style multi-column layouts + +**Background Treatments**: +- Solid color blocks occupying 40-60% of slide +- Gradient fills (vertical or diagonal only) +- Split backgrounds (two colors, diagonal or vertical) +- Edge-to-edge color bands +- Negative space as a design element + +### Layout Tips +**When creating slides with charts or tables:** +- **Two-column layout (PREFERRED)**: Use a header spanning the full width, then two columns below - text/bullets in one column and the featured content in the other. This provides better balance and makes charts/tables more readable. Use flexbox with unequal column widths (e.g., 40%/60% split) to optimize space for each content type. +- **Full-slide layout**: Let the featured content (chart/table) take up the entire slide for maximum impact and readability +- **NEVER vertically stack**: Do not place charts/tables below text in a single column - this causes poor readability and layout issues + +### Workflow +1. **MANDATORY - READ ENTIRE FILE**: Read [`html2pptx.md`](html2pptx.md) completely from start to finish. **NEVER set any range limits when reading this file.** Read the full file content for detailed syntax, critical formatting rules, and best practices before proceeding with presentation creation. +2. Create an HTML file for each slide with proper dimensions (e.g., 720pt × 405pt for 16:9) + - Use `

`, `

`-`

`, `
    `, `
      ` for all text content + - Use `class="placeholder"` for areas where charts/tables will be added (render with gray background for visibility) + - **CRITICAL**: Rasterize gradients and icons as PNG images FIRST using Sharp, then reference in HTML + - **LAYOUT**: For slides with charts/tables/images, use either full-slide layout or two-column layout for better readability +3. Create and run a JavaScript file using the [`html2pptx.js`](scripts/html2pptx.js) library to convert HTML slides to PowerPoint and save the presentation + - Use the `html2pptx()` function to process each HTML file + - Add charts and tables to placeholder areas using PptxGenJS API + - Save the presentation using `pptx.writeFile()` +4. **Visual validation**: Generate thumbnails and inspect for layout issues + - Create thumbnail grid: `python scripts/thumbnail.py output.pptx workspace/thumbnails --cols 4` + - Read and carefully examine the thumbnail image for: + - **Text cutoff**: Text being cut off by header bars, shapes, or slide edges + - **Text overlap**: Text overlapping with other text or shapes + - **Positioning issues**: Content too close to slide boundaries or other elements + - **Contrast issues**: Insufficient contrast between text and backgrounds + - If issues found, adjust HTML margins/spacing/colors and regenerate the presentation + - Repeat until all slides are visually correct + +## Editing an existing PowerPoint presentation + +When edit slides in an existing PowerPoint presentation, you need to work with the raw Office Open XML (OOXML) format. This involves unpacking the .pptx file, editing the XML content, and repacking it. + +### Workflow +1. **MANDATORY - READ ENTIRE FILE**: Read [`ooxml.md`](ooxml.md) (~500 lines) completely from start to finish. **NEVER set any range limits when reading this file.** Read the full file content for detailed guidance on OOXML structure and editing workflows before any presentation editing. +2. Unpack the presentation: `python ooxml/scripts/unpack.py ` +3. Edit the XML files (primarily `ppt/slides/slide{N}.xml` and related files) +4. **CRITICAL**: Validate immediately after each edit and fix any validation errors before proceeding: `python ooxml/scripts/validate.py --original ` +5. Pack the final presentation: `python ooxml/scripts/pack.py ` + +## Creating a new PowerPoint presentation **using a template** + +When you need to create a presentation that follows an existing template's design, you'll need to duplicate and re-arrange template slides before then replacing placeholder context. + +### Workflow +1. **Extract template text AND create visual thumbnail grid**: + * Extract text: `python -m markitdown template.pptx > template-content.md` + * Read `template-content.md`: Read the entire file to understand the contents of the template presentation. **NEVER set any range limits when reading this file.** + * Create thumbnail grids: `python scripts/thumbnail.py template.pptx` + * See [Creating Thumbnail Grids](#creating-thumbnail-grids) section for more details + +2. **Analyze template and save inventory to a file**: + * **Visual Analysis**: Review thumbnail grid(s) to understand slide layouts, design patterns, and visual structure + * Create and save a template inventory file at `template-inventory.md` containing: + ```markdown + # Template Inventory Analysis + **Total Slides: [count]** + **IMPORTANT: Slides are 0-indexed (first slide = 0, last slide = count-1)** + + ## [Category Name] + - Slide 0: [Layout code if available] - Description/purpose + - Slide 1: [Layout code] - Description/purpose + - Slide 2: [Layout code] - Description/purpose + [... EVERY slide must be listed individually with its index ...] + ``` + * **Using the thumbnail grid**: Reference the visual thumbnails to identify: + - Layout patterns (title slides, content layouts, section dividers) + - Image placeholder locations and counts + - Design consistency across slide groups + - Visual hierarchy and structure + * This inventory file is REQUIRED for selecting appropriate templates in the next step + +3. **Create presentation outline based on template inventory**: + * Review available templates from step 2. + * Choose an intro or title template for the first slide. This should be one of the first templates. + * Choose safe, text-based layouts for the other slides. + * **CRITICAL: Match layout structure to actual content**: + - Single-column layouts: Use for unified narrative or single topic + - Two-column layouts: Use ONLY when you have exactly 2 distinct items/concepts + - Three-column layouts: Use ONLY when you have exactly 3 distinct items/concepts + - Image + text layouts: Use ONLY when you have actual images to insert + - Quote layouts: Use ONLY for actual quotes from people (with attribution), never for emphasis + - Never use layouts with more placeholders than you have content + - If you have 2 items, don't force them into a 3-column layout + - If you have 4+ items, consider breaking into multiple slides or using a list format + * Count your actual content pieces BEFORE selecting the layout + * Verify each placeholder in the chosen layout will be filled with meaningful content + * Select one option representing the **best** layout for each content section. + * Save `outline.md` with content AND template mapping that leverages available designs + * Example template mapping: + ``` + # Template slides to use (0-based indexing) + # WARNING: Verify indices are within range! Template with 73 slides has indices 0-72 + # Mapping: slide numbers from outline -> template slide indices + template_mapping = [ + 0, # Use slide 0 (Title/Cover) + 34, # Use slide 34 (B1: Title and body) + 34, # Use slide 34 again (duplicate for second B1) + 50, # Use slide 50 (E1: Quote) + 54, # Use slide 54 (F2: Closing + Text) + ] + ``` + +4. **Duplicate, reorder, and delete slides using `rearrange.py`**: + * Use the `scripts/rearrange.py` script to create a new presentation with slides in the desired order: + ```bash + python scripts/rearrange.py template.pptx working.pptx 0,34,34,50,52 + ``` + * The script handles duplicating repeated slides, deleting unused slides, and reordering automatically + * Slide indices are 0-based (first slide is 0, second is 1, etc.) + * The same slide index can appear multiple times to duplicate that slide + +5. **Extract ALL text using the `inventory.py` script**: + * **Run inventory extraction**: + ```bash + python scripts/inventory.py working.pptx text-inventory.json + ``` + * **Read text-inventory.json**: Read the entire text-inventory.json file to understand all shapes and their properties. **NEVER set any range limits when reading this file.** + + * The inventory JSON structure: + ```json + { + "slide-0": { + "shape-0": { + "placeholder_type": "TITLE", // or null for non-placeholders + "left": 1.5, // position in inches + "top": 2.0, + "width": 7.5, + "height": 1.2, + "paragraphs": [ + { + "text": "Paragraph text", + // Optional properties (only included when non-default): + "bullet": true, // explicit bullet detected + "level": 0, // only included when bullet is true + "alignment": "CENTER", // CENTER, RIGHT (not LEFT) + "space_before": 10.0, // space before paragraph in points + "space_after": 6.0, // space after paragraph in points + "line_spacing": 22.4, // line spacing in points + "font_name": "Arial", // from first run + "font_size": 14.0, // in points + "bold": true, + "italic": false, + "underline": false, + "color": "FF0000" // RGB color + } + ] + } + } + } + ``` + + * Key features: + - **Slides**: Named as "slide-0", "slide-1", etc. + - **Shapes**: Ordered by visual position (top-to-bottom, left-to-right) as "shape-0", "shape-1", etc. + - **Placeholder types**: TITLE, CENTER_TITLE, SUBTITLE, BODY, OBJECT, or null + - **Default font size**: `default_font_size` in points extracted from layout placeholders (when available) + - **Slide numbers are filtered**: Shapes with SLIDE_NUMBER placeholder type are automatically excluded from inventory + - **Bullets**: When `bullet: true`, `level` is always included (even if 0) + - **Spacing**: `space_before`, `space_after`, and `line_spacing` in points (only included when set) + - **Colors**: `color` for RGB (e.g., "FF0000"), `theme_color` for theme colors (e.g., "DARK_1") + - **Properties**: Only non-default values are included in the output + +6. **Generate replacement text and save the data to a JSON file** + Based on the text inventory from the previous step: + - **CRITICAL**: First verify which shapes exist in the inventory - only reference shapes that are actually present + - **VALIDATION**: The replace.py script will validate that all shapes in your replacement JSON exist in the inventory + - If you reference a non-existent shape, you'll get an error showing available shapes + - If you reference a non-existent slide, you'll get an error indicating the slide doesn't exist + - All validation errors are shown at once before the script exits + - **IMPORTANT**: The replace.py script uses inventory.py internally to identify ALL text shapes + - **AUTOMATIC CLEARING**: ALL text shapes from the inventory will be cleared unless you provide "paragraphs" for them + - Add a "paragraphs" field to shapes that need content (not "replacement_paragraphs") + - Shapes without "paragraphs" in the replacement JSON will have their text cleared automatically + - Paragraphs with bullets will be automatically left aligned. Don't set the `alignment` property on when `"bullet": true` + - Generate appropriate replacement content for placeholder text + - Use shape size to determine appropriate content length + - **CRITICAL**: Include paragraph properties from the original inventory - don't just provide text + - **IMPORTANT**: When bullet: true, do NOT include bullet symbols (•, -, *) in text - they're added automatically + - **ESSENTIAL FORMATTING RULES**: + - Headers/titles should typically have `"bold": true` + - List items should have `"bullet": true, "level": 0` (level is required when bullet is true) + - Preserve any alignment properties (e.g., `"alignment": "CENTER"` for centered text) + - Include font properties when different from default (e.g., `"font_size": 14.0`, `"font_name": "Lora"`) + - Colors: Use `"color": "FF0000"` for RGB or `"theme_color": "DARK_1"` for theme colors + - The replacement script expects **properly formatted paragraphs**, not just text strings + - **Overlapping shapes**: Prefer shapes with larger default_font_size or more appropriate placeholder_type + - Save the updated inventory with replacements to `replacement-text.json` + - **WARNING**: Different template layouts have different shape counts - always check the actual inventory before creating replacements + + Example paragraphs field showing proper formatting: + ```json + "paragraphs": [ + { + "text": "New presentation title text", + "alignment": "CENTER", + "bold": true + }, + { + "text": "Section Header", + "bold": true + }, + { + "text": "First bullet point without bullet symbol", + "bullet": true, + "level": 0 + }, + { + "text": "Red colored text", + "color": "FF0000" + }, + { + "text": "Theme colored text", + "theme_color": "DARK_1" + }, + { + "text": "Regular paragraph text without special formatting" + } + ] + ``` + + **Shapes not listed in the replacement JSON are automatically cleared**: + ```json + { + "slide-0": { + "shape-0": { + "paragraphs": [...] // This shape gets new text + } + // shape-1 and shape-2 from inventory will be cleared automatically + } + } + ``` + + **Common formatting patterns for presentations**: + - Title slides: Bold text, sometimes centered + - Section headers within slides: Bold text + - Bullet lists: Each item needs `"bullet": true, "level": 0` + - Body text: Usually no special properties needed + - Quotes: May have special alignment or font properties + +7. **Apply replacements using the `replace.py` script** + ```bash + python scripts/replace.py working.pptx replacement-text.json output.pptx + ``` + + The script will: + - First extract the inventory of ALL text shapes using functions from inventory.py + - Validate that all shapes in the replacement JSON exist in the inventory + - Clear text from ALL shapes identified in the inventory + - Apply new text only to shapes with "paragraphs" defined in the replacement JSON + - Preserve formatting by applying paragraph properties from the JSON + - Handle bullets, alignment, font properties, and colors automatically + - Save the updated presentation + + Example validation errors: + ``` + ERROR: Invalid shapes in replacement JSON: + - Shape 'shape-99' not found on 'slide-0'. Available shapes: shape-0, shape-1, shape-4 + - Slide 'slide-999' not found in inventory + ``` + + ``` + ERROR: Replacement text made overflow worse in these shapes: + - slide-0/shape-2: overflow worsened by 1.25" (was 0.00", now 1.25") + ``` + +## Creating Thumbnail Grids + +To create visual thumbnail grids of PowerPoint slides for quick analysis and reference: + +```bash +python scripts/thumbnail.py template.pptx [output_prefix] +``` + +**Features**: +- Creates: `thumbnails.jpg` (or `thumbnails-1.jpg`, `thumbnails-2.jpg`, etc. for large decks) +- Default: 5 columns, max 30 slides per grid (5×6) +- Custom prefix: `python scripts/thumbnail.py template.pptx my-grid` + - Note: The output prefix should include the path if you want output in a specific directory (e.g., `workspace/my-grid`) +- Adjust columns: `--cols 4` (range: 3-6, affects slides per grid) +- Grid limits: 3 cols = 12 slides/grid, 4 cols = 20, 5 cols = 30, 6 cols = 42 +- Slides are zero-indexed (Slide 0, Slide 1, etc.) + +**Use cases**: +- Template analysis: Quickly understand slide layouts and design patterns +- Content review: Visual overview of entire presentation +- Navigation reference: Find specific slides by their visual appearance +- Quality check: Verify all slides are properly formatted + +**Examples**: +```bash +# Basic usage +python scripts/thumbnail.py presentation.pptx + +# Combine options: custom name, columns +python scripts/thumbnail.py template.pptx analysis --cols 4 +``` + +## Converting Slides to Images + +To visually analyze PowerPoint slides, convert them to images using a two-step process: + +1. **Convert PPTX to PDF**: + ```bash + soffice --headless --convert-to pdf template.pptx + ``` + +2. **Convert PDF pages to JPEG images**: + ```bash + pdftoppm -jpeg -r 150 template.pdf slide + ``` + This creates files like `slide-1.jpg`, `slide-2.jpg`, etc. + +Options: +- `-r 150`: Sets resolution to 150 DPI (adjust for quality/size balance) +- `-jpeg`: Output JPEG format (use `-png` for PNG if preferred) +- `-f N`: First page to convert (e.g., `-f 2` starts from page 2) +- `-l N`: Last page to convert (e.g., `-l 5` stops at page 5) +- `slide`: Prefix for output files + +Example for specific range: +```bash +pdftoppm -jpeg -r 150 -f 2 -l 5 template.pdf slide # Converts only pages 2-5 +``` + +## Code Style Guidelines +**IMPORTANT**: When generating code for PPTX operations: +- Write concise code +- Avoid verbose variable names and redundant operations +- Avoid unnecessary print statements + +## Dependencies + +Required dependencies (should already be installed): + +- **markitdown**: `pip install "markitdown[pptx]"` (for text extraction from presentations) +- **pptxgenjs**: `npm install -g pptxgenjs` (for creating presentations via html2pptx) +- **playwright**: `npm install -g playwright` (for HTML rendering in html2pptx) +- **react-icons**: `npm install -g react-icons react react-dom` (for icons) +- **sharp**: `npm install -g sharp` (for SVG rasterization and image processing) +- **LibreOffice**: `sudo apt-get install libreoffice` (for PDF conversion) +- **Poppler**: `sudo apt-get install poppler-utils` (for pdftoppm to convert PDF to images) +- **defusedxml**: `pip install defusedxml` (for secure XML parsing) \ No newline at end of file diff --git a/.claude/skills/pptx/html2pptx.md b/.claude/skills/pptx/html2pptx.md new file mode 100644 index 0000000..106adf7 --- /dev/null +++ b/.claude/skills/pptx/html2pptx.md @@ -0,0 +1,625 @@ +# HTML to PowerPoint Guide + +Convert HTML slides to PowerPoint presentations with accurate positioning using the `html2pptx.js` library. + +## Table of Contents + +1. [Creating HTML Slides](#creating-html-slides) +2. [Using the html2pptx Library](#using-the-html2pptx-library) +3. [Using PptxGenJS](#using-pptxgenjs) + +--- + +## Creating HTML Slides + +Every HTML slide must include proper body dimensions: + +### Layout Dimensions + +- **16:9** (default): `width: 720pt; height: 405pt` +- **4:3**: `width: 720pt; height: 540pt` +- **16:10**: `width: 720pt; height: 450pt` + +### Supported Elements + +- `

      `, `

      `-`

      ` - Text with styling +- `
        `, `
          ` - Lists (never use manual bullets •, -, *) +- ``, `` - Bold text (inline formatting) +- ``, `` - Italic text (inline formatting) +- `` - Underlined text (inline formatting) +- `` - Inline formatting with CSS styles (bold, italic, underline, color) +- `
          ` - Line breaks +- `
          ` with bg/border - Becomes shape +- `` - Images +- `class="placeholder"` - Reserved space for charts (returns `{ id, x, y, w, h }`) + +### Critical Text Rules + +**ALL text MUST be inside `

          `, `

          `-`

          `, `
            `, or `
              ` tags:** +- ✅ Correct: `

              Text here

              ` +- ❌ Wrong: `
              Text here
              ` - **Text will NOT appear in PowerPoint** +- ❌ Wrong: `Text` - **Text will NOT appear in PowerPoint** +- Text in `
              ` or `` without a text tag will be silently ignored + +**NEVER use manual bullet symbols (•, -, *, etc.)** - Use `
                ` or `
                  ` lists instead + +**ONLY use web-safe fonts that are universally available:** +- ✅ Web-safe fonts: `Arial`, `Helvetica`, `Times New Roman`, `Georgia`, `Courier New`, `Verdana`, `Tahoma`, `Trebuchet MS`, `Impact`, `Comic Sans MS` +- ❌ Wrong: `'Segoe UI'`, `'SF Pro'`, `'Roboto'`, custom fonts - **Might cause rendering issues** + +### Styling + +- Use `display: flex` on body to prevent margin collapse from breaking overflow validation +- Use `margin` for spacing (padding included in size) +- Inline formatting: Use ``, ``, `` tags OR `` with CSS styles + - `` supports: `font-weight: bold`, `font-style: italic`, `text-decoration: underline`, `color: #rrggbb` + - `` does NOT support: `margin`, `padding` (not supported in PowerPoint text runs) + - Example: `Bold blue text` +- Flexbox works - positions calculated from rendered layout +- Use hex colors with `#` prefix in CSS +- **Text alignment**: Use CSS `text-align` (`center`, `right`, etc.) when needed as a hint to PptxGenJS for text formatting if text lengths are slightly off + +### Shape Styling (DIV elements only) + +**IMPORTANT: Backgrounds, borders, and shadows only work on `
                  ` elements, NOT on text elements (`

                  `, `

                  `-`

                  `, `
                    `, `
                      `)** + +- **Backgrounds**: CSS `background` or `background-color` on `
                      ` elements only + - Example: `
                      ` - Creates a shape with background +- **Borders**: CSS `border` on `
                      ` elements converts to PowerPoint shape borders + - Supports uniform borders: `border: 2px solid #333333` + - Supports partial borders: `border-left`, `border-right`, `border-top`, `border-bottom` (rendered as line shapes) + - Example: `
                      ` +- **Border radius**: CSS `border-radius` on `
                      ` elements for rounded corners + - `border-radius: 50%` or higher creates circular shape + - Percentages <50% calculated relative to shape's smaller dimension + - Supports px and pt units (e.g., `border-radius: 8pt;`, `border-radius: 12px;`) + - Example: `
                      ` on 100x200px box = 25% of 100px = 25px radius +- **Box shadows**: CSS `box-shadow` on `
                      ` elements converts to PowerPoint shadows + - Supports outer shadows only (inset shadows are ignored to prevent corruption) + - Example: `
                      ` + - Note: Inset/inner shadows are not supported by PowerPoint and will be skipped + +### Icons & Gradients + +- **CRITICAL: Never use CSS gradients (`linear-gradient`, `radial-gradient`)** - They don't convert to PowerPoint +- **ALWAYS create gradient/icon PNGs FIRST using Sharp, then reference in HTML** +- For gradients: Rasterize SVG to PNG background images +- For icons: Rasterize react-icons SVG to PNG images +- All visual effects must be pre-rendered as raster images before HTML rendering + +**Rasterizing Icons with Sharp:** + +```javascript +const React = require('react'); +const ReactDOMServer = require('react-dom/server'); +const sharp = require('sharp'); +const { FaHome } = require('react-icons/fa'); + +async function rasterizeIconPng(IconComponent, color, size = "256", filename) { + const svgString = ReactDOMServer.renderToStaticMarkup( + React.createElement(IconComponent, { color: `#${color}`, size: size }) + ); + + // Convert SVG to PNG using Sharp + await sharp(Buffer.from(svgString)) + .png() + .toFile(filename); + + return filename; +} + +// Usage: Rasterize icon before using in HTML +const iconPath = await rasterizeIconPng(FaHome, "4472c4", "256", "home-icon.png"); +// Then reference in HTML: +``` + +**Rasterizing Gradients with Sharp:** + +```javascript +const sharp = require('sharp'); + +async function createGradientBackground(filename) { + const svg = ` + + + + + + + + `; + + await sharp(Buffer.from(svg)) + .png() + .toFile(filename); + + return filename; +} + +// Usage: Create gradient background before HTML +const bgPath = await createGradientBackground("gradient-bg.png"); +// Then in HTML: +``` + +### Example + +```html + + + + + + +
                      +

                      Recipe Title

                      +
                        +
                      • Item: Description
                      • +
                      +

                      Text with bold, italic, underline.

                      +
                      + + +
                      +

                      5

                      +
                      +
                      + + +``` + +## Using the html2pptx Library + +### Dependencies + +These libraries have been globally installed and are available to use: +- `pptxgenjs` +- `playwright` +- `sharp` + +### Basic Usage + +```javascript +const pptxgen = require('pptxgenjs'); +const html2pptx = require('./html2pptx'); + +const pptx = new pptxgen(); +pptx.layout = 'LAYOUT_16x9'; // Must match HTML body dimensions + +const { slide, placeholders } = await html2pptx('slide1.html', pptx); + +// Add chart to placeholder area +if (placeholders.length > 0) { + slide.addChart(pptx.charts.LINE, chartData, placeholders[0]); +} + +await pptx.writeFile('output.pptx'); +``` + +### API Reference + +#### Function Signature +```javascript +await html2pptx(htmlFile, pres, options) +``` + +#### Parameters +- `htmlFile` (string): Path to HTML file (absolute or relative) +- `pres` (pptxgen): PptxGenJS presentation instance with layout already set +- `options` (object, optional): + - `tmpDir` (string): Temporary directory for generated files (default: `process.env.TMPDIR || '/tmp'`) + - `slide` (object): Existing slide to reuse (default: creates new slide) + +#### Returns +```javascript +{ + slide: pptxgenSlide, // The created/updated slide + placeholders: [ // Array of placeholder positions + { id: string, x: number, y: number, w: number, h: number }, + ... + ] +} +``` + +### Validation + +The library automatically validates and collects all errors before throwing: + +1. **HTML dimensions must match presentation layout** - Reports dimension mismatches +2. **Content must not overflow body** - Reports overflow with exact measurements +3. **CSS gradients** - Reports unsupported gradient usage +4. **Text element styling** - Reports backgrounds/borders/shadows on text elements (only allowed on divs) + +**All validation errors are collected and reported together** in a single error message, allowing you to fix all issues at once instead of one at a time. + +### Working with Placeholders + +```javascript +const { slide, placeholders } = await html2pptx('slide.html', pptx); + +// Use first placeholder +slide.addChart(pptx.charts.BAR, data, placeholders[0]); + +// Find by ID +const chartArea = placeholders.find(p => p.id === 'chart-area'); +slide.addChart(pptx.charts.LINE, data, chartArea); +``` + +### Complete Example + +```javascript +const pptxgen = require('pptxgenjs'); +const html2pptx = require('./html2pptx'); + +async function createPresentation() { + const pptx = new pptxgen(); + pptx.layout = 'LAYOUT_16x9'; + pptx.author = 'Your Name'; + pptx.title = 'My Presentation'; + + // Slide 1: Title + const { slide: slide1 } = await html2pptx('slides/title.html', pptx); + + // Slide 2: Content with chart + const { slide: slide2, placeholders } = await html2pptx('slides/data.html', pptx); + + const chartData = [{ + name: 'Sales', + labels: ['Q1', 'Q2', 'Q3', 'Q4'], + values: [4500, 5500, 6200, 7100] + }]; + + slide2.addChart(pptx.charts.BAR, chartData, { + ...placeholders[0], + showTitle: true, + title: 'Quarterly Sales', + showCatAxisTitle: true, + catAxisTitle: 'Quarter', + showValAxisTitle: true, + valAxisTitle: 'Sales ($000s)' + }); + + // Save + await pptx.writeFile({ fileName: 'presentation.pptx' }); + console.log('Presentation created successfully!'); +} + +createPresentation().catch(console.error); +``` + +## Using PptxGenJS + +After converting HTML to slides with `html2pptx`, you'll use PptxGenJS to add dynamic content like charts, images, and additional elements. + +### ⚠️ Critical Rules + +#### Colors +- **NEVER use `#` prefix** with hex colors in PptxGenJS - causes file corruption +- ✅ Correct: `color: "FF0000"`, `fill: { color: "0066CC" }` +- ❌ Wrong: `color: "#FF0000"` (breaks document) + +### Adding Images + +Always calculate aspect ratios from actual image dimensions: + +```javascript +// Get image dimensions: identify image.png | grep -o '[0-9]* x [0-9]*' +const imgWidth = 1860, imgHeight = 1519; // From actual file +const aspectRatio = imgWidth / imgHeight; + +const h = 3; // Max height +const w = h * aspectRatio; +const x = (10 - w) / 2; // Center on 16:9 slide + +slide.addImage({ path: "chart.png", x, y: 1.5, w, h }); +``` + +### Adding Text + +```javascript +// Rich text with formatting +slide.addText([ + { text: "Bold ", options: { bold: true } }, + { text: "Italic ", options: { italic: true } }, + { text: "Normal" } +], { + x: 1, y: 2, w: 8, h: 1 +}); +``` + +### Adding Shapes + +```javascript +// Rectangle +slide.addShape(pptx.shapes.RECTANGLE, { + x: 1, y: 1, w: 3, h: 2, + fill: { color: "4472C4" }, + line: { color: "000000", width: 2 } +}); + +// Circle +slide.addShape(pptx.shapes.OVAL, { + x: 5, y: 1, w: 2, h: 2, + fill: { color: "ED7D31" } +}); + +// Rounded rectangle +slide.addShape(pptx.shapes.ROUNDED_RECTANGLE, { + x: 1, y: 4, w: 3, h: 1.5, + fill: { color: "70AD47" }, + rectRadius: 0.2 +}); +``` + +### Adding Charts + +**Required for most charts:** Axis labels using `catAxisTitle` (category) and `valAxisTitle` (value). + +**Chart Data Format:** +- Use **single series with all labels** for simple bar/line charts +- Each series creates a separate legend entry +- Labels array defines X-axis values + +**Time Series Data - Choose Correct Granularity:** +- **< 30 days**: Use daily grouping (e.g., "10-01", "10-02") - avoid monthly aggregation that creates single-point charts +- **30-365 days**: Use monthly grouping (e.g., "2024-01", "2024-02") +- **> 365 days**: Use yearly grouping (e.g., "2023", "2024") +- **Validate**: Charts with only 1 data point likely indicate incorrect aggregation for the time period + +```javascript +const { slide, placeholders } = await html2pptx('slide.html', pptx); + +// CORRECT: Single series with all labels +slide.addChart(pptx.charts.BAR, [{ + name: "Sales 2024", + labels: ["Q1", "Q2", "Q3", "Q4"], + values: [4500, 5500, 6200, 7100] +}], { + ...placeholders[0], // Use placeholder position + barDir: 'col', // 'col' = vertical bars, 'bar' = horizontal + showTitle: true, + title: 'Quarterly Sales', + showLegend: false, // No legend needed for single series + // Required axis labels + showCatAxisTitle: true, + catAxisTitle: 'Quarter', + showValAxisTitle: true, + valAxisTitle: 'Sales ($000s)', + // Optional: Control scaling (adjust min based on data range for better visualization) + valAxisMaxVal: 8000, + valAxisMinVal: 0, // Use 0 for counts/amounts; for clustered data (e.g., 4500-7100), consider starting closer to min value + valAxisMajorUnit: 2000, // Control y-axis label spacing to prevent crowding + catAxisLabelRotate: 45, // Rotate labels if crowded + dataLabelPosition: 'outEnd', + dataLabelColor: '000000', + // Use single color for single-series charts + chartColors: ["4472C4"] // All bars same color +}); +``` + +#### Scatter Chart + +**IMPORTANT**: Scatter chart data format is unusual - first series contains X-axis values, subsequent series contain Y-values: + +```javascript +// Prepare data +const data1 = [{ x: 10, y: 20 }, { x: 15, y: 25 }, { x: 20, y: 30 }]; +const data2 = [{ x: 12, y: 18 }, { x: 18, y: 22 }]; + +const allXValues = [...data1.map(d => d.x), ...data2.map(d => d.x)]; + +slide.addChart(pptx.charts.SCATTER, [ + { name: 'X-Axis', values: allXValues }, // First series = X values + { name: 'Series 1', values: data1.map(d => d.y) }, // Y values only + { name: 'Series 2', values: data2.map(d => d.y) } // Y values only +], { + x: 1, y: 1, w: 8, h: 4, + lineSize: 0, // 0 = no connecting lines + lineDataSymbol: 'circle', + lineDataSymbolSize: 6, + showCatAxisTitle: true, + catAxisTitle: 'X Axis', + showValAxisTitle: true, + valAxisTitle: 'Y Axis', + chartColors: ["4472C4", "ED7D31"] +}); +``` + +#### Line Chart + +```javascript +slide.addChart(pptx.charts.LINE, [{ + name: "Temperature", + labels: ["Jan", "Feb", "Mar", "Apr"], + values: [32, 35, 42, 55] +}], { + x: 1, y: 1, w: 8, h: 4, + lineSize: 4, + lineSmooth: true, + // Required axis labels + showCatAxisTitle: true, + catAxisTitle: 'Month', + showValAxisTitle: true, + valAxisTitle: 'Temperature (°F)', + // Optional: Y-axis range (set min based on data range for better visualization) + valAxisMinVal: 0, // For ranges starting at 0 (counts, percentages, etc.) + valAxisMaxVal: 60, + valAxisMajorUnit: 20, // Control y-axis label spacing to prevent crowding (e.g., 10, 20, 25) + // valAxisMinVal: 30, // PREFERRED: For data clustered in a range (e.g., 32-55 or ratings 3-5), start axis closer to min value to show variation + // Optional: Chart colors + chartColors: ["4472C4", "ED7D31", "A5A5A5"] +}); +``` + +#### Pie Chart (No Axis Labels Required) + +**CRITICAL**: Pie charts require a **single data series** with all categories in the `labels` array and corresponding values in the `values` array. + +```javascript +slide.addChart(pptx.charts.PIE, [{ + name: "Market Share", + labels: ["Product A", "Product B", "Other"], // All categories in one array + values: [35, 45, 20] // All values in one array +}], { + x: 2, y: 1, w: 6, h: 4, + showPercent: true, + showLegend: true, + legendPos: 'r', // right + chartColors: ["4472C4", "ED7D31", "A5A5A5"] +}); +``` + +#### Multiple Data Series + +```javascript +slide.addChart(pptx.charts.LINE, [ + { + name: "Product A", + labels: ["Q1", "Q2", "Q3", "Q4"], + values: [10, 20, 30, 40] + }, + { + name: "Product B", + labels: ["Q1", "Q2", "Q3", "Q4"], + values: [15, 25, 20, 35] + } +], { + x: 1, y: 1, w: 8, h: 4, + showCatAxisTitle: true, + catAxisTitle: 'Quarter', + showValAxisTitle: true, + valAxisTitle: 'Revenue ($M)' +}); +``` + +### Chart Colors + +**CRITICAL**: Use hex colors **without** the `#` prefix - including `#` causes file corruption. + +**Align chart colors with your chosen design palette**, ensuring sufficient contrast and distinctiveness for data visualization. Adjust colors for: +- Strong contrast between adjacent series +- Readability against slide backgrounds +- Accessibility (avoid red-green only combinations) + +```javascript +// Example: Ocean palette-inspired chart colors (adjusted for contrast) +const chartColors = ["16A085", "FF6B9D", "2C3E50", "F39C12", "9B59B6"]; + +// Single-series chart: Use one color for all bars/points +slide.addChart(pptx.charts.BAR, [{ + name: "Sales", + labels: ["Q1", "Q2", "Q3", "Q4"], + values: [4500, 5500, 6200, 7100] +}], { + ...placeholders[0], + chartColors: ["16A085"], // All bars same color + showLegend: false +}); + +// Multi-series chart: Each series gets a different color +slide.addChart(pptx.charts.LINE, [ + { name: "Product A", labels: ["Q1", "Q2", "Q3"], values: [10, 20, 30] }, + { name: "Product B", labels: ["Q1", "Q2", "Q3"], values: [15, 25, 20] } +], { + ...placeholders[0], + chartColors: ["16A085", "FF6B9D"] // One color per series +}); +``` + +### Adding Tables + +Tables can be added with basic or advanced formatting: + +#### Basic Table + +```javascript +slide.addTable([ + ["Header 1", "Header 2", "Header 3"], + ["Row 1, Col 1", "Row 1, Col 2", "Row 1, Col 3"], + ["Row 2, Col 1", "Row 2, Col 2", "Row 2, Col 3"] +], { + x: 0.5, + y: 1, + w: 9, + h: 3, + border: { pt: 1, color: "999999" }, + fill: { color: "F1F1F1" } +}); +``` + +#### Table with Custom Formatting + +```javascript +const tableData = [ + // Header row with custom styling + [ + { text: "Product", options: { fill: { color: "4472C4" }, color: "FFFFFF", bold: true } }, + { text: "Revenue", options: { fill: { color: "4472C4" }, color: "FFFFFF", bold: true } }, + { text: "Growth", options: { fill: { color: "4472C4" }, color: "FFFFFF", bold: true } } + ], + // Data rows + ["Product A", "$50M", "+15%"], + ["Product B", "$35M", "+22%"], + ["Product C", "$28M", "+8%"] +]; + +slide.addTable(tableData, { + x: 1, + y: 1.5, + w: 8, + h: 3, + colW: [3, 2.5, 2.5], // Column widths + rowH: [0.5, 0.6, 0.6, 0.6], // Row heights + border: { pt: 1, color: "CCCCCC" }, + align: "center", + valign: "middle", + fontSize: 14 +}); +``` + +#### Table with Merged Cells + +```javascript +const mergedTableData = [ + [ + { text: "Q1 Results", options: { colspan: 3, fill: { color: "4472C4" }, color: "FFFFFF", bold: true } } + ], + ["Product", "Sales", "Market Share"], + ["Product A", "$25M", "35%"], + ["Product B", "$18M", "25%"] +]; + +slide.addTable(mergedTableData, { + x: 1, + y: 1, + w: 8, + h: 2.5, + colW: [3, 2.5, 2.5], + border: { pt: 1, color: "DDDDDD" } +}); +``` + +### Table Options + +Common table options: +- `x, y, w, h` - Position and size +- `colW` - Array of column widths (in inches) +- `rowH` - Array of row heights (in inches) +- `border` - Border style: `{ pt: 1, color: "999999" }` +- `fill` - Background color (no # prefix) +- `align` - Text alignment: "left", "center", "right" +- `valign` - Vertical alignment: "top", "middle", "bottom" +- `fontSize` - Text size +- `autoPage` - Auto-create new slides if content overflows \ No newline at end of file diff --git a/.claude/skills/pptx/ooxml.md b/.claude/skills/pptx/ooxml.md new file mode 100644 index 0000000..951b3cf --- /dev/null +++ b/.claude/skills/pptx/ooxml.md @@ -0,0 +1,427 @@ +# Office Open XML Technical Reference for PowerPoint + +**Important: Read this entire document before starting.** Critical XML schema rules and formatting requirements are covered throughout. Incorrect implementation can create invalid PPTX files that PowerPoint cannot open. + +## Technical Guidelines + +### Schema Compliance +- **Element ordering in ``**: ``, ``, `` +- **Whitespace**: Add `xml:space='preserve'` to `` elements with leading/trailing spaces +- **Unicode**: Escape characters in ASCII content: `"` becomes `“` +- **Images**: Add to `ppt/media/`, reference in slide XML, set dimensions to fit slide bounds +- **Relationships**: Update `ppt/slides/_rels/slideN.xml.rels` for each slide's resources +- **Dirty attribute**: Add `dirty="0"` to `` and `` elements to indicate clean state + +## Presentation Structure + +### Basic Slide Structure +```xml + + + + + ... + ... + + + + +``` + +### Text Box / Shape with Text +```xml + + + + + + + + + + + + + + + + + + + + + + Slide Title + + + + +``` + +### Text Formatting +```xml + + + + Bold Text + + + + + + Italic Text + + + + + + Underlined + + + + + + + + + + Highlighted Text + + + + + + + + + + Colored Arial 24pt + + + + + + + + + + Formatted text + +``` + +### Lists +```xml + + + + + + + First bullet point + + + + + + + + + + First numbered item + + + + + + + + + + Indented bullet + + +``` + +### Shapes +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +### Images +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +### Tables +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + + Cell 1 + + + + + + + + + + + Cell 2 + + + + + + + + + +``` + +### Slide Layouts + +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +## File Updates + +When adding content, update these files: + +**`ppt/_rels/presentation.xml.rels`:** +```xml + + +``` + +**`ppt/slides/_rels/slide1.xml.rels`:** +```xml + + +``` + +**`[Content_Types].xml`:** +```xml + + + +``` + +**`ppt/presentation.xml`:** +```xml + + + + +``` + +**`docProps/app.xml`:** Update slide count and statistics +```xml +2 +10 +50 +``` + +## Slide Operations + +### Adding a New Slide +When adding a slide to the end of the presentation: + +1. **Create the slide file** (`ppt/slides/slideN.xml`) +2. **Update `[Content_Types].xml`**: Add Override for the new slide +3. **Update `ppt/_rels/presentation.xml.rels`**: Add relationship for the new slide +4. **Update `ppt/presentation.xml`**: Add slide ID to `` +5. **Create slide relationships** (`ppt/slides/_rels/slideN.xml.rels`) if needed +6. **Update `docProps/app.xml`**: Increment slide count and update statistics (if present) + +### Duplicating a Slide +1. Copy the source slide XML file with a new name +2. Update all IDs in the new slide to be unique +3. Follow the "Adding a New Slide" steps above +4. **CRITICAL**: Remove or update any notes slide references in `_rels` files +5. Remove references to unused media files + +### Reordering Slides +1. **Update `ppt/presentation.xml`**: Reorder `` elements in `` +2. The order of `` elements determines slide order +3. Keep slide IDs and relationship IDs unchanged + +Example: +```xml + + + + + + + + + + + + + +``` + +### Deleting a Slide +1. **Remove from `ppt/presentation.xml`**: Delete the `` entry +2. **Remove from `ppt/_rels/presentation.xml.rels`**: Delete the relationship +3. **Remove from `[Content_Types].xml`**: Delete the Override entry +4. **Delete files**: Remove `ppt/slides/slideN.xml` and `ppt/slides/_rels/slideN.xml.rels` +5. **Update `docProps/app.xml`**: Decrement slide count and update statistics +6. **Clean up unused media**: Remove orphaned images from `ppt/media/` + +Note: Don't renumber remaining slides - keep their original IDs and filenames. + + +## Common Errors to Avoid + +- **Encodings**: Escape unicode characters in ASCII content: `"` becomes `“` +- **Images**: Add to `ppt/media/` and update relationship files +- **Lists**: Omit bullets from list headers +- **IDs**: Use valid hexadecimal values for UUIDs +- **Themes**: Check all themes in `theme` directory for colors + +## Validation Checklist for Template-Based Presentations + +### Before Packing, Always: +- **Clean unused resources**: Remove unreferenced media, fonts, and notes directories +- **Fix Content_Types.xml**: Declare ALL slides, layouts, and themes present in the package +- **Fix relationship IDs**: + - Remove font embed references if not using embedded fonts +- **Remove broken references**: Check all `_rels` files for references to deleted resources + +### Common Template Duplication Pitfalls: +- Multiple slides referencing the same notes slide after duplication +- Image/media references from template slides that no longer exist +- Font embedding references when fonts aren't included +- Missing slideLayout declarations for layouts 12-25 +- docProps directory may not unpack - this is optional \ No newline at end of file diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chart.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chart.xsd new file mode 100644 index 0000000..6454ef9 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chart.xsd @@ -0,0 +1,1499 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd new file mode 100644 index 0000000..afa4f46 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd @@ -0,0 +1,146 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd new file mode 100644 index 0000000..64e66b8 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd @@ -0,0 +1,1085 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd new file mode 100644 index 0000000..687eea8 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd @@ -0,0 +1,11 @@ + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-main.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-main.xsd new file mode 100644 index 0000000..6ac81b0 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-main.xsd @@ -0,0 +1,3081 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-picture.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-picture.xsd new file mode 100644 index 0000000..1dbf051 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-picture.xsd @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd new file mode 100644 index 0000000..f1af17d --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd @@ -0,0 +1,185 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd new file mode 100644 index 0000000..0a185ab --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd @@ -0,0 +1,287 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/pml.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/pml.xsd new file mode 100644 index 0000000..14ef488 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/pml.xsd @@ -0,0 +1,1676 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd new file mode 100644 index 0000000..c20f3bf --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd new file mode 100644 index 0000000..ac60252 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd @@ -0,0 +1,144 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd new file mode 100644 index 0000000..424b8ba --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd @@ -0,0 +1,174 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd new file mode 100644 index 0000000..2bddce2 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd new file mode 100644 index 0000000..8a8c18b --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd new file mode 100644 index 0000000..5c42706 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd new file mode 100644 index 0000000..853c341 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd new file mode 100644 index 0000000..da835ee --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd @@ -0,0 +1,195 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-math.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-math.xsd new file mode 100644 index 0000000..87ad265 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-math.xsd @@ -0,0 +1,582 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd new file mode 100644 index 0000000..9e86f1b --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/sml.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/sml.xsd new file mode 100644 index 0000000..d0be42e --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/sml.xsd @@ -0,0 +1,4439 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-main.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-main.xsd new file mode 100644 index 0000000..8821dd1 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-main.xsd @@ -0,0 +1,570 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd new file mode 100644 index 0000000..ca2575c --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd @@ -0,0 +1,509 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd new file mode 100644 index 0000000..dd079e6 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd @@ -0,0 +1,12 @@ + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd new file mode 100644 index 0000000..3dd6cf6 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd @@ -0,0 +1,108 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd new file mode 100644 index 0000000..f1041e3 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd @@ -0,0 +1,96 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/wml.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/wml.xsd new file mode 100644 index 0000000..9c5b7a6 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/wml.xsd @@ -0,0 +1,3646 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/xml.xsd b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/xml.xsd new file mode 100644 index 0000000..0f13678 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/xml.xsd @@ -0,0 +1,116 @@ + + + + + + See http://www.w3.org/XML/1998/namespace.html and + http://www.w3.org/TR/REC-xml for information about this namespace. + + This schema document describes the XML namespace, in a form + suitable for import by other schema documents. + + Note that local names in this namespace are intended to be defined + only by the World Wide Web Consortium or its subgroups. The + following names are currently defined in this namespace and should + not be used with conflicting semantics by any Working Group, + specification, or document instance: + + base (as an attribute name): denotes an attribute whose value + provides a URI to be used as the base for interpreting any + relative URIs in the scope of the element on which it + appears; its value is inherited. This name is reserved + by virtue of its definition in the XML Base specification. + + lang (as an attribute name): denotes an attribute whose value + is a language code for the natural language of the content of + any element; its value is inherited. This name is reserved + by virtue of its definition in the XML specification. + + space (as an attribute name): denotes an attribute whose + value is a keyword indicating what whitespace processing + discipline is intended for the content of the element; its + value is inherited. This name is reserved by virtue of its + definition in the XML specification. + + Father (in any context at all): denotes Jon Bosak, the chair of + the original XML Working Group. This name is reserved by + the following decision of the W3C XML Plenary and + XML Coordination groups: + + In appreciation for his vision, leadership and dedication + the W3C XML Plenary on this 10th day of February, 2000 + reserves for Jon Bosak in perpetuity the XML name + xml:Father + + + + + This schema defines attributes and an attribute group + suitable for use by + schemas wishing to allow xml:base, xml:lang or xml:space attributes + on elements they define. + + To enable this, such a schema must import this schema + for the XML namespace, e.g. as follows: + <schema . . .> + . . . + <import namespace="http://www.w3.org/XML/1998/namespace" + schemaLocation="http://www.w3.org/2001/03/xml.xsd"/> + + Subsequently, qualified reference to any of the attributes + or the group defined below will have the desired effect, e.g. + + <type . . .> + . . . + <attributeGroup ref="xml:specialAttrs"/> + + will define a type which will schema-validate an instance + element with any of those attributes + + + + In keeping with the XML Schema WG's standard versioning + policy, this schema document will persist at + http://www.w3.org/2001/03/xml.xsd. + At the date of issue it can also be found at + http://www.w3.org/2001/xml.xsd. + The schema document at that URI may however change in the future, + in order to remain compatible with the latest version of XML Schema + itself. In other words, if the XML Schema namespace changes, the version + of this document at + http://www.w3.org/2001/xml.xsd will change + accordingly; the version at + http://www.w3.org/2001/03/xml.xsd will not change. + + + + + + In due course, we should install the relevant ISO 2- and 3-letter + codes as the enumerated possible values . . . + + + + + + + + + + + + + + + See http://www.w3.org/TR/xmlbase/ for + information about this attribute. + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-contentTypes.xsd b/.claude/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-contentTypes.xsd new file mode 100644 index 0000000..a6de9d2 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-contentTypes.xsd @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-coreProperties.xsd b/.claude/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-coreProperties.xsd new file mode 100644 index 0000000..10e978b --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-coreProperties.xsd @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-digSig.xsd b/.claude/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-digSig.xsd new file mode 100644 index 0000000..4248bf7 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-digSig.xsd @@ -0,0 +1,49 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-relationships.xsd b/.claude/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-relationships.xsd new file mode 100644 index 0000000..5649746 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-relationships.xsd @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/mce/mc.xsd b/.claude/skills/pptx/ooxml/schemas/mce/mc.xsd new file mode 100644 index 0000000..ef72545 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/mce/mc.xsd @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/microsoft/wml-2010.xsd b/.claude/skills/pptx/ooxml/schemas/microsoft/wml-2010.xsd new file mode 100644 index 0000000..f65f777 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/microsoft/wml-2010.xsd @@ -0,0 +1,560 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/microsoft/wml-2012.xsd b/.claude/skills/pptx/ooxml/schemas/microsoft/wml-2012.xsd new file mode 100644 index 0000000..6b00755 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/microsoft/wml-2012.xsd @@ -0,0 +1,67 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/microsoft/wml-2018.xsd b/.claude/skills/pptx/ooxml/schemas/microsoft/wml-2018.xsd new file mode 100644 index 0000000..f321d33 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/microsoft/wml-2018.xsd @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/microsoft/wml-cex-2018.xsd b/.claude/skills/pptx/ooxml/schemas/microsoft/wml-cex-2018.xsd new file mode 100644 index 0000000..364c6a9 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/microsoft/wml-cex-2018.xsd @@ -0,0 +1,20 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/microsoft/wml-cid-2016.xsd b/.claude/skills/pptx/ooxml/schemas/microsoft/wml-cid-2016.xsd new file mode 100644 index 0000000..fed9d15 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/microsoft/wml-cid-2016.xsd @@ -0,0 +1,13 @@ + + + + + + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/microsoft/wml-sdtdatahash-2020.xsd b/.claude/skills/pptx/ooxml/schemas/microsoft/wml-sdtdatahash-2020.xsd new file mode 100644 index 0000000..680cf15 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/microsoft/wml-sdtdatahash-2020.xsd @@ -0,0 +1,4 @@ + + + + diff --git a/.claude/skills/pptx/ooxml/schemas/microsoft/wml-symex-2015.xsd b/.claude/skills/pptx/ooxml/schemas/microsoft/wml-symex-2015.xsd new file mode 100644 index 0000000..89ada90 --- /dev/null +++ b/.claude/skills/pptx/ooxml/schemas/microsoft/wml-symex-2015.xsd @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/.claude/skills/pptx/ooxml/scripts/pack.py b/.claude/skills/pptx/ooxml/scripts/pack.py new file mode 100644 index 0000000..68bc088 --- /dev/null +++ b/.claude/skills/pptx/ooxml/scripts/pack.py @@ -0,0 +1,159 @@ +#!/usr/bin/env python3 +""" +Tool to pack a directory into a .docx, .pptx, or .xlsx file with XML formatting undone. + +Example usage: + python pack.py [--force] +""" + +import argparse +import shutil +import subprocess +import sys +import tempfile +import defusedxml.minidom +import zipfile +from pathlib import Path + + +def main(): + parser = argparse.ArgumentParser(description="Pack a directory into an Office file") + parser.add_argument("input_directory", help="Unpacked Office document directory") + parser.add_argument("output_file", help="Output Office file (.docx/.pptx/.xlsx)") + parser.add_argument("--force", action="store_true", help="Skip validation") + args = parser.parse_args() + + try: + success = pack_document( + args.input_directory, args.output_file, validate=not args.force + ) + + # Show warning if validation was skipped + if args.force: + print("Warning: Skipped validation, file may be corrupt", file=sys.stderr) + # Exit with error if validation failed + elif not success: + print("Contents would produce a corrupt file.", file=sys.stderr) + print("Please validate XML before repacking.", file=sys.stderr) + print("Use --force to skip validation and pack anyway.", file=sys.stderr) + sys.exit(1) + + except ValueError as e: + sys.exit(f"Error: {e}") + + +def pack_document(input_dir, output_file, validate=False): + """Pack a directory into an Office file (.docx/.pptx/.xlsx). + + Args: + input_dir: Path to unpacked Office document directory + output_file: Path to output Office file + validate: If True, validates with soffice (default: False) + + Returns: + bool: True if successful, False if validation failed + """ + input_dir = Path(input_dir) + output_file = Path(output_file) + + if not input_dir.is_dir(): + raise ValueError(f"{input_dir} is not a directory") + if output_file.suffix.lower() not in {".docx", ".pptx", ".xlsx"}: + raise ValueError(f"{output_file} must be a .docx, .pptx, or .xlsx file") + + # Work in temporary directory to avoid modifying original + with tempfile.TemporaryDirectory() as temp_dir: + temp_content_dir = Path(temp_dir) / "content" + shutil.copytree(input_dir, temp_content_dir) + + # Process XML files to remove pretty-printing whitespace + for pattern in ["*.xml", "*.rels"]: + for xml_file in temp_content_dir.rglob(pattern): + condense_xml(xml_file) + + # Create final Office file as zip archive + output_file.parent.mkdir(parents=True, exist_ok=True) + with zipfile.ZipFile(output_file, "w", zipfile.ZIP_DEFLATED) as zf: + for f in temp_content_dir.rglob("*"): + if f.is_file(): + zf.write(f, f.relative_to(temp_content_dir)) + + # Validate if requested + if validate: + if not validate_document(output_file): + output_file.unlink() # Delete the corrupt file + return False + + return True + + +def validate_document(doc_path): + """Validate document by converting to HTML with soffice.""" + # Determine the correct filter based on file extension + match doc_path.suffix.lower(): + case ".docx": + filter_name = "html:HTML" + case ".pptx": + filter_name = "html:impress_html_Export" + case ".xlsx": + filter_name = "html:HTML (StarCalc)" + + with tempfile.TemporaryDirectory() as temp_dir: + try: + result = subprocess.run( + [ + "soffice", + "--headless", + "--convert-to", + filter_name, + "--outdir", + temp_dir, + str(doc_path), + ], + capture_output=True, + timeout=10, + text=True, + ) + if not (Path(temp_dir) / f"{doc_path.stem}.html").exists(): + error_msg = result.stderr.strip() or "Document validation failed" + print(f"Validation error: {error_msg}", file=sys.stderr) + return False + return True + except FileNotFoundError: + print("Warning: soffice not found. Skipping validation.", file=sys.stderr) + return True + except subprocess.TimeoutExpired: + print("Validation error: Timeout during conversion", file=sys.stderr) + return False + except Exception as e: + print(f"Validation error: {e}", file=sys.stderr) + return False + + +def condense_xml(xml_file): + """Strip unnecessary whitespace and remove comments.""" + with open(xml_file, "r", encoding="utf-8") as f: + dom = defusedxml.minidom.parse(f) + + # Process each element to remove whitespace and comments + for element in dom.getElementsByTagName("*"): + # Skip w:t elements and their processing + if element.tagName.endswith(":t"): + continue + + # Remove whitespace-only text nodes and comment nodes + for child in list(element.childNodes): + if ( + child.nodeType == child.TEXT_NODE + and child.nodeValue + and child.nodeValue.strip() == "" + ) or child.nodeType == child.COMMENT_NODE: + element.removeChild(child) + + # Write back the condensed XML + with open(xml_file, "wb") as f: + f.write(dom.toxml(encoding="UTF-8")) + + +if __name__ == "__main__": + main() diff --git a/.claude/skills/pptx/ooxml/scripts/unpack.py b/.claude/skills/pptx/ooxml/scripts/unpack.py new file mode 100644 index 0000000..4938798 --- /dev/null +++ b/.claude/skills/pptx/ooxml/scripts/unpack.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +"""Unpack and format XML contents of Office files (.docx, .pptx, .xlsx)""" + +import random +import sys +import defusedxml.minidom +import zipfile +from pathlib import Path + +# Get command line arguments +assert len(sys.argv) == 3, "Usage: python unpack.py " +input_file, output_dir = sys.argv[1], sys.argv[2] + +# Extract and format +output_path = Path(output_dir) +output_path.mkdir(parents=True, exist_ok=True) +zipfile.ZipFile(input_file).extractall(output_path) + +# Pretty print all XML files +xml_files = list(output_path.rglob("*.xml")) + list(output_path.rglob("*.rels")) +for xml_file in xml_files: + content = xml_file.read_text(encoding="utf-8") + dom = defusedxml.minidom.parseString(content) + xml_file.write_bytes(dom.toprettyxml(indent=" ", encoding="ascii")) + +# For .docx files, suggest an RSID for tracked changes +if input_file.endswith(".docx"): + suggested_rsid = "".join(random.choices("0123456789ABCDEF", k=8)) + print(f"Suggested RSID for edit session: {suggested_rsid}") diff --git a/.claude/skills/pptx/ooxml/scripts/validate.py b/.claude/skills/pptx/ooxml/scripts/validate.py new file mode 100644 index 0000000..508c589 --- /dev/null +++ b/.claude/skills/pptx/ooxml/scripts/validate.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +""" +Command line tool to validate Office document XML files against XSD schemas and tracked changes. + +Usage: + python validate.py --original +""" + +import argparse +import sys +from pathlib import Path + +from validation import DOCXSchemaValidator, PPTXSchemaValidator, RedliningValidator + + +def main(): + parser = argparse.ArgumentParser(description="Validate Office document XML files") + parser.add_argument( + "unpacked_dir", + help="Path to unpacked Office document directory", + ) + parser.add_argument( + "--original", + required=True, + help="Path to original file (.docx/.pptx/.xlsx)", + ) + parser.add_argument( + "-v", + "--verbose", + action="store_true", + help="Enable verbose output", + ) + args = parser.parse_args() + + # Validate paths + unpacked_dir = Path(args.unpacked_dir) + original_file = Path(args.original) + file_extension = original_file.suffix.lower() + assert unpacked_dir.is_dir(), f"Error: {unpacked_dir} is not a directory" + assert original_file.is_file(), f"Error: {original_file} is not a file" + assert file_extension in [".docx", ".pptx", ".xlsx"], ( + f"Error: {original_file} must be a .docx, .pptx, or .xlsx file" + ) + + # Run validations + match file_extension: + case ".docx": + validators = [DOCXSchemaValidator, RedliningValidator] + case ".pptx": + validators = [PPTXSchemaValidator] + case _: + print(f"Error: Validation not supported for file type {file_extension}") + sys.exit(1) + + # Run validators + success = True + for V in validators: + validator = V(unpacked_dir, original_file, verbose=args.verbose) + if not validator.validate(): + success = False + + if success: + print("All validations PASSED!") + + sys.exit(0 if success else 1) + + +if __name__ == "__main__": + main() diff --git a/.claude/skills/pptx/ooxml/scripts/validation/__init__.py b/.claude/skills/pptx/ooxml/scripts/validation/__init__.py new file mode 100644 index 0000000..db092ec --- /dev/null +++ b/.claude/skills/pptx/ooxml/scripts/validation/__init__.py @@ -0,0 +1,15 @@ +""" +Validation modules for Word document processing. +""" + +from .base import BaseSchemaValidator +from .docx import DOCXSchemaValidator +from .pptx import PPTXSchemaValidator +from .redlining import RedliningValidator + +__all__ = [ + "BaseSchemaValidator", + "DOCXSchemaValidator", + "PPTXSchemaValidator", + "RedliningValidator", +] diff --git a/.claude/skills/pptx/ooxml/scripts/validation/base.py b/.claude/skills/pptx/ooxml/scripts/validation/base.py new file mode 100644 index 0000000..0681b19 --- /dev/null +++ b/.claude/skills/pptx/ooxml/scripts/validation/base.py @@ -0,0 +1,951 @@ +""" +Base validator with common validation logic for document files. +""" + +import re +from pathlib import Path + +import lxml.etree + + +class BaseSchemaValidator: + """Base validator with common validation logic for document files.""" + + # Elements whose 'id' attributes must be unique within their file + # Format: element_name -> (attribute_name, scope) + # scope can be 'file' (unique within file) or 'global' (unique across all files) + UNIQUE_ID_REQUIREMENTS = { + # Word elements + "comment": ("id", "file"), # Comment IDs in comments.xml + "commentrangestart": ("id", "file"), # Must match comment IDs + "commentrangeend": ("id", "file"), # Must match comment IDs + "bookmarkstart": ("id", "file"), # Bookmark start IDs + "bookmarkend": ("id", "file"), # Bookmark end IDs + # Note: ins and del (track changes) can share IDs when part of same revision + # PowerPoint elements + "sldid": ("id", "file"), # Slide IDs in presentation.xml + "sldmasterid": ("id", "global"), # Slide master IDs must be globally unique + "sldlayoutid": ("id", "global"), # Slide layout IDs must be globally unique + "cm": ("authorid", "file"), # Comment author IDs + # Excel elements + "sheet": ("sheetid", "file"), # Sheet IDs in workbook.xml + "definedname": ("id", "file"), # Named range IDs + # Drawing/Shape elements (all formats) + "cxnsp": ("id", "file"), # Connection shape IDs + "sp": ("id", "file"), # Shape IDs + "pic": ("id", "file"), # Picture IDs + "grpsp": ("id", "file"), # Group shape IDs + } + + # Mapping of element names to expected relationship types + # Subclasses should override this with format-specific mappings + ELEMENT_RELATIONSHIP_TYPES = {} + + # Unified schema mappings for all Office document types + SCHEMA_MAPPINGS = { + # Document type specific schemas + "word": "ISO-IEC29500-4_2016/wml.xsd", # Word documents + "ppt": "ISO-IEC29500-4_2016/pml.xsd", # PowerPoint presentations + "xl": "ISO-IEC29500-4_2016/sml.xsd", # Excel spreadsheets + # Common file types + "[Content_Types].xml": "ecma/fouth-edition/opc-contentTypes.xsd", + "app.xml": "ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd", + "core.xml": "ecma/fouth-edition/opc-coreProperties.xsd", + "custom.xml": "ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd", + ".rels": "ecma/fouth-edition/opc-relationships.xsd", + # Word-specific files + "people.xml": "microsoft/wml-2012.xsd", + "commentsIds.xml": "microsoft/wml-cid-2016.xsd", + "commentsExtensible.xml": "microsoft/wml-cex-2018.xsd", + "commentsExtended.xml": "microsoft/wml-2012.xsd", + # Chart files (common across document types) + "chart": "ISO-IEC29500-4_2016/dml-chart.xsd", + # Theme files (common across document types) + "theme": "ISO-IEC29500-4_2016/dml-main.xsd", + # Drawing and media files + "drawing": "ISO-IEC29500-4_2016/dml-main.xsd", + } + + # Unified namespace constants + MC_NAMESPACE = "http://schemas.openxmlformats.org/markup-compatibility/2006" + XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace" + + # Common OOXML namespaces used across validators + PACKAGE_RELATIONSHIPS_NAMESPACE = ( + "http://schemas.openxmlformats.org/package/2006/relationships" + ) + OFFICE_RELATIONSHIPS_NAMESPACE = ( + "http://schemas.openxmlformats.org/officeDocument/2006/relationships" + ) + CONTENT_TYPES_NAMESPACE = ( + "http://schemas.openxmlformats.org/package/2006/content-types" + ) + + # Folders where we should clean ignorable namespaces + MAIN_CONTENT_FOLDERS = {"word", "ppt", "xl"} + + # All allowed OOXML namespaces (superset of all document types) + OOXML_NAMESPACES = { + "http://schemas.openxmlformats.org/officeDocument/2006/math", + "http://schemas.openxmlformats.org/officeDocument/2006/relationships", + "http://schemas.openxmlformats.org/schemaLibrary/2006/main", + "http://schemas.openxmlformats.org/drawingml/2006/main", + "http://schemas.openxmlformats.org/drawingml/2006/chart", + "http://schemas.openxmlformats.org/drawingml/2006/chartDrawing", + "http://schemas.openxmlformats.org/drawingml/2006/diagram", + "http://schemas.openxmlformats.org/drawingml/2006/picture", + "http://schemas.openxmlformats.org/drawingml/2006/spreadsheetDrawing", + "http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing", + "http://schemas.openxmlformats.org/wordprocessingml/2006/main", + "http://schemas.openxmlformats.org/presentationml/2006/main", + "http://schemas.openxmlformats.org/spreadsheetml/2006/main", + "http://schemas.openxmlformats.org/officeDocument/2006/sharedTypes", + "http://www.w3.org/XML/1998/namespace", + } + + def __init__(self, unpacked_dir, original_file, verbose=False): + self.unpacked_dir = Path(unpacked_dir).resolve() + self.original_file = Path(original_file) + self.verbose = verbose + + # Set schemas directory + self.schemas_dir = Path(__file__).parent.parent.parent / "schemas" + + # Get all XML and .rels files + patterns = ["*.xml", "*.rels"] + self.xml_files = [ + f for pattern in patterns for f in self.unpacked_dir.rglob(pattern) + ] + + if not self.xml_files: + print(f"Warning: No XML files found in {self.unpacked_dir}") + + def validate(self): + """Run all validation checks and return True if all pass.""" + raise NotImplementedError("Subclasses must implement the validate method") + + def validate_xml(self): + """Validate that all XML files are well-formed.""" + errors = [] + + for xml_file in self.xml_files: + try: + # Try to parse the XML file + lxml.etree.parse(str(xml_file)) + except lxml.etree.XMLSyntaxError as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {e.lineno}: {e.msg}" + ) + except Exception as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Unexpected error: {str(e)}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} XML violations:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - All XML files are well-formed") + return True + + def validate_namespaces(self): + """Validate that namespace prefixes in Ignorable attributes are declared.""" + errors = [] + + for xml_file in self.xml_files: + try: + root = lxml.etree.parse(str(xml_file)).getroot() + declared = set(root.nsmap.keys()) - {None} # Exclude default namespace + + for attr_val in [ + v for k, v in root.attrib.items() if k.endswith("Ignorable") + ]: + undeclared = set(attr_val.split()) - declared + errors.extend( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Namespace '{ns}' in Ignorable but not declared" + for ns in undeclared + ) + except lxml.etree.XMLSyntaxError: + continue + + if errors: + print(f"FAILED - {len(errors)} namespace issues:") + for error in errors: + print(error) + return False + if self.verbose: + print("PASSED - All namespace prefixes properly declared") + return True + + def validate_unique_ids(self): + """Validate that specific IDs are unique according to OOXML requirements.""" + errors = [] + global_ids = {} # Track globally unique IDs across all files + + for xml_file in self.xml_files: + try: + root = lxml.etree.parse(str(xml_file)).getroot() + file_ids = {} # Track IDs that must be unique within this file + + # Remove all mc:AlternateContent elements from the tree + mc_elements = root.xpath( + ".//mc:AlternateContent", namespaces={"mc": self.MC_NAMESPACE} + ) + for elem in mc_elements: + elem.getparent().remove(elem) + + # Now check IDs in the cleaned tree + for elem in root.iter(): + # Get the element name without namespace + tag = ( + elem.tag.split("}")[-1].lower() + if "}" in elem.tag + else elem.tag.lower() + ) + + # Check if this element type has ID uniqueness requirements + if tag in self.UNIQUE_ID_REQUIREMENTS: + attr_name, scope = self.UNIQUE_ID_REQUIREMENTS[tag] + + # Look for the specified attribute + id_value = None + for attr, value in elem.attrib.items(): + attr_local = ( + attr.split("}")[-1].lower() + if "}" in attr + else attr.lower() + ) + if attr_local == attr_name: + id_value = value + break + + if id_value is not None: + if scope == "global": + # Check global uniqueness + if id_value in global_ids: + prev_file, prev_line, prev_tag = global_ids[ + id_value + ] + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {elem.sourceline}: Global ID '{id_value}' in <{tag}> " + f"already used in {prev_file} at line {prev_line} in <{prev_tag}>" + ) + else: + global_ids[id_value] = ( + xml_file.relative_to(self.unpacked_dir), + elem.sourceline, + tag, + ) + elif scope == "file": + # Check file-level uniqueness + key = (tag, attr_name) + if key not in file_ids: + file_ids[key] = {} + + if id_value in file_ids[key]: + prev_line = file_ids[key][id_value] + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {elem.sourceline}: Duplicate {attr_name}='{id_value}' in <{tag}> " + f"(first occurrence at line {prev_line})" + ) + else: + file_ids[key][id_value] = elem.sourceline + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} ID uniqueness violations:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - All required IDs are unique") + return True + + def validate_file_references(self): + """ + Validate that all .rels files properly reference files and that all files are referenced. + """ + errors = [] + + # Find all .rels files + rels_files = list(self.unpacked_dir.rglob("*.rels")) + + if not rels_files: + if self.verbose: + print("PASSED - No .rels files found") + return True + + # Get all files in the unpacked directory (excluding reference files) + all_files = [] + for file_path in self.unpacked_dir.rglob("*"): + if ( + file_path.is_file() + and file_path.name != "[Content_Types].xml" + and not file_path.name.endswith(".rels") + ): # This file is not referenced by .rels + all_files.append(file_path.resolve()) + + # Track all files that are referenced by any .rels file + all_referenced_files = set() + + if self.verbose: + print( + f"Found {len(rels_files)} .rels files and {len(all_files)} target files" + ) + + # Check each .rels file + for rels_file in rels_files: + try: + # Parse relationships file + rels_root = lxml.etree.parse(str(rels_file)).getroot() + + # Get the directory where this .rels file is located + rels_dir = rels_file.parent + + # Find all relationships and their targets + referenced_files = set() + broken_refs = [] + + for rel in rels_root.findall( + ".//ns:Relationship", + namespaces={"ns": self.PACKAGE_RELATIONSHIPS_NAMESPACE}, + ): + target = rel.get("Target") + if target and not target.startswith( + ("http", "mailto:") + ): # Skip external URLs + # Resolve the target path relative to the .rels file location + if rels_file.name == ".rels": + # Root .rels file - targets are relative to unpacked_dir + target_path = self.unpacked_dir / target + else: + # Other .rels files - targets are relative to their parent's parent + # e.g., word/_rels/document.xml.rels -> targets relative to word/ + base_dir = rels_dir.parent + target_path = base_dir / target + + # Normalize the path and check if it exists + try: + target_path = target_path.resolve() + if target_path.exists() and target_path.is_file(): + referenced_files.add(target_path) + all_referenced_files.add(target_path) + else: + broken_refs.append((target, rel.sourceline)) + except (OSError, ValueError): + broken_refs.append((target, rel.sourceline)) + + # Report broken references + if broken_refs: + rel_path = rels_file.relative_to(self.unpacked_dir) + for broken_ref, line_num in broken_refs: + errors.append( + f" {rel_path}: Line {line_num}: Broken reference to {broken_ref}" + ) + + except Exception as e: + rel_path = rels_file.relative_to(self.unpacked_dir) + errors.append(f" Error parsing {rel_path}: {e}") + + # Check for unreferenced files (files that exist but are not referenced anywhere) + unreferenced_files = set(all_files) - all_referenced_files + + if unreferenced_files: + for unref_file in sorted(unreferenced_files): + unref_rel_path = unref_file.relative_to(self.unpacked_dir) + errors.append(f" Unreferenced file: {unref_rel_path}") + + if errors: + print(f"FAILED - Found {len(errors)} relationship validation errors:") + for error in errors: + print(error) + print( + "CRITICAL: These errors will cause the document to appear corrupt. " + + "Broken references MUST be fixed, " + + "and unreferenced files MUST be referenced or removed." + ) + return False + else: + if self.verbose: + print( + "PASSED - All references are valid and all files are properly referenced" + ) + return True + + def validate_all_relationship_ids(self): + """ + Validate that all r:id attributes in XML files reference existing IDs + in their corresponding .rels files, and optionally validate relationship types. + """ + import lxml.etree + + errors = [] + + # Process each XML file that might contain r:id references + for xml_file in self.xml_files: + # Skip .rels files themselves + if xml_file.suffix == ".rels": + continue + + # Determine the corresponding .rels file + # For dir/file.xml, it's dir/_rels/file.xml.rels + rels_dir = xml_file.parent / "_rels" + rels_file = rels_dir / f"{xml_file.name}.rels" + + # Skip if there's no corresponding .rels file (that's okay) + if not rels_file.exists(): + continue + + try: + # Parse the .rels file to get valid relationship IDs and their types + rels_root = lxml.etree.parse(str(rels_file)).getroot() + rid_to_type = {} + + for rel in rels_root.findall( + f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship" + ): + rid = rel.get("Id") + rel_type = rel.get("Type", "") + if rid: + # Check for duplicate rIds + if rid in rid_to_type: + rels_rel_path = rels_file.relative_to(self.unpacked_dir) + errors.append( + f" {rels_rel_path}: Line {rel.sourceline}: " + f"Duplicate relationship ID '{rid}' (IDs must be unique)" + ) + # Extract just the type name from the full URL + type_name = ( + rel_type.split("/")[-1] if "/" in rel_type else rel_type + ) + rid_to_type[rid] = type_name + + # Parse the XML file to find all r:id references + xml_root = lxml.etree.parse(str(xml_file)).getroot() + + # Find all elements with r:id attributes + for elem in xml_root.iter(): + # Check for r:id attribute (relationship ID) + rid_attr = elem.get(f"{{{self.OFFICE_RELATIONSHIPS_NAMESPACE}}}id") + if rid_attr: + xml_rel_path = xml_file.relative_to(self.unpacked_dir) + elem_name = ( + elem.tag.split("}")[-1] if "}" in elem.tag else elem.tag + ) + + # Check if the ID exists + if rid_attr not in rid_to_type: + errors.append( + f" {xml_rel_path}: Line {elem.sourceline}: " + f"<{elem_name}> references non-existent relationship '{rid_attr}' " + f"(valid IDs: {', '.join(sorted(rid_to_type.keys())[:5])}{'...' if len(rid_to_type) > 5 else ''})" + ) + # Check if we have type expectations for this element + elif self.ELEMENT_RELATIONSHIP_TYPES: + expected_type = self._get_expected_relationship_type( + elem_name + ) + if expected_type: + actual_type = rid_to_type[rid_attr] + # Check if the actual type matches or contains the expected type + if expected_type not in actual_type.lower(): + errors.append( + f" {xml_rel_path}: Line {elem.sourceline}: " + f"<{elem_name}> references '{rid_attr}' which points to '{actual_type}' " + f"but should point to a '{expected_type}' relationship" + ) + + except Exception as e: + xml_rel_path = xml_file.relative_to(self.unpacked_dir) + errors.append(f" Error processing {xml_rel_path}: {e}") + + if errors: + print(f"FAILED - Found {len(errors)} relationship ID reference errors:") + for error in errors: + print(error) + print("\nThese ID mismatches will cause the document to appear corrupt!") + return False + else: + if self.verbose: + print("PASSED - All relationship ID references are valid") + return True + + def _get_expected_relationship_type(self, element_name): + """ + Get the expected relationship type for an element. + First checks the explicit mapping, then tries pattern detection. + """ + # Normalize element name to lowercase + elem_lower = element_name.lower() + + # Check explicit mapping first + if elem_lower in self.ELEMENT_RELATIONSHIP_TYPES: + return self.ELEMENT_RELATIONSHIP_TYPES[elem_lower] + + # Try pattern detection for common patterns + # Pattern 1: Elements ending in "Id" often expect a relationship of the prefix type + if elem_lower.endswith("id") and len(elem_lower) > 2: + # e.g., "sldId" -> "sld", "sldMasterId" -> "sldMaster" + prefix = elem_lower[:-2] # Remove "id" + # Check if this might be a compound like "sldMasterId" + if prefix.endswith("master"): + return prefix.lower() + elif prefix.endswith("layout"): + return prefix.lower() + else: + # Simple case like "sldId" -> "slide" + # Common transformations + if prefix == "sld": + return "slide" + return prefix.lower() + + # Pattern 2: Elements ending in "Reference" expect a relationship of the prefix type + if elem_lower.endswith("reference") and len(elem_lower) > 9: + prefix = elem_lower[:-9] # Remove "reference" + return prefix.lower() + + return None + + def validate_content_types(self): + """Validate that all content files are properly declared in [Content_Types].xml.""" + errors = [] + + # Find [Content_Types].xml file + content_types_file = self.unpacked_dir / "[Content_Types].xml" + if not content_types_file.exists(): + print("FAILED - [Content_Types].xml file not found") + return False + + try: + # Parse and get all declared parts and extensions + root = lxml.etree.parse(str(content_types_file)).getroot() + declared_parts = set() + declared_extensions = set() + + # Get Override declarations (specific files) + for override in root.findall( + f".//{{{self.CONTENT_TYPES_NAMESPACE}}}Override" + ): + part_name = override.get("PartName") + if part_name is not None: + declared_parts.add(part_name.lstrip("/")) + + # Get Default declarations (by extension) + for default in root.findall( + f".//{{{self.CONTENT_TYPES_NAMESPACE}}}Default" + ): + extension = default.get("Extension") + if extension is not None: + declared_extensions.add(extension.lower()) + + # Root elements that require content type declaration + declarable_roots = { + "sld", + "sldLayout", + "sldMaster", + "presentation", # PowerPoint + "document", # Word + "workbook", + "worksheet", # Excel + "theme", # Common + } + + # Common media file extensions that should be declared + media_extensions = { + "png": "image/png", + "jpg": "image/jpeg", + "jpeg": "image/jpeg", + "gif": "image/gif", + "bmp": "image/bmp", + "tiff": "image/tiff", + "wmf": "image/x-wmf", + "emf": "image/x-emf", + } + + # Get all files in the unpacked directory + all_files = list(self.unpacked_dir.rglob("*")) + all_files = [f for f in all_files if f.is_file()] + + # Check all XML files for Override declarations + for xml_file in self.xml_files: + path_str = str(xml_file.relative_to(self.unpacked_dir)).replace( + "\\", "/" + ) + + # Skip non-content files + if any( + skip in path_str + for skip in [".rels", "[Content_Types]", "docProps/", "_rels/"] + ): + continue + + try: + root_tag = lxml.etree.parse(str(xml_file)).getroot().tag + root_name = root_tag.split("}")[-1] if "}" in root_tag else root_tag + + if root_name in declarable_roots and path_str not in declared_parts: + errors.append( + f" {path_str}: File with <{root_name}> root not declared in [Content_Types].xml" + ) + + except Exception: + continue # Skip unparseable files + + # Check all non-XML files for Default extension declarations + for file_path in all_files: + # Skip XML files and metadata files (already checked above) + if file_path.suffix.lower() in {".xml", ".rels"}: + continue + if file_path.name == "[Content_Types].xml": + continue + if "_rels" in file_path.parts or "docProps" in file_path.parts: + continue + + extension = file_path.suffix.lstrip(".").lower() + if extension and extension not in declared_extensions: + # Check if it's a known media extension that should be declared + if extension in media_extensions: + relative_path = file_path.relative_to(self.unpacked_dir) + errors.append( + f' {relative_path}: File with extension \'{extension}\' not declared in [Content_Types].xml - should add: ' + ) + + except Exception as e: + errors.append(f" Error parsing [Content_Types].xml: {e}") + + if errors: + print(f"FAILED - Found {len(errors)} content type declaration errors:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print( + "PASSED - All content files are properly declared in [Content_Types].xml" + ) + return True + + def validate_file_against_xsd(self, xml_file, verbose=False): + """Validate a single XML file against XSD schema, comparing with original. + + Args: + xml_file: Path to XML file to validate + verbose: Enable verbose output + + Returns: + tuple: (is_valid, new_errors_set) where is_valid is True/False/None (skipped) + """ + # Resolve both paths to handle symlinks + xml_file = Path(xml_file).resolve() + unpacked_dir = self.unpacked_dir.resolve() + + # Validate current file + is_valid, current_errors = self._validate_single_file_xsd( + xml_file, unpacked_dir + ) + + if is_valid is None: + return None, set() # Skipped + elif is_valid: + return True, set() # Valid, no errors + + # Get errors from original file for this specific file + original_errors = self._get_original_file_errors(xml_file) + + # Compare with original (both are guaranteed to be sets here) + assert current_errors is not None + new_errors = current_errors - original_errors + + if new_errors: + if verbose: + relative_path = xml_file.relative_to(unpacked_dir) + print(f"FAILED - {relative_path}: {len(new_errors)} new error(s)") + for error in list(new_errors)[:3]: + truncated = error[:250] + "..." if len(error) > 250 else error + print(f" - {truncated}") + return False, new_errors + else: + # All errors existed in original + if verbose: + print( + f"PASSED - No new errors (original had {len(current_errors)} errors)" + ) + return True, set() + + def validate_against_xsd(self): + """Validate XML files against XSD schemas, showing only new errors compared to original.""" + new_errors = [] + original_error_count = 0 + valid_count = 0 + skipped_count = 0 + + for xml_file in self.xml_files: + relative_path = str(xml_file.relative_to(self.unpacked_dir)) + is_valid, new_file_errors = self.validate_file_against_xsd( + xml_file, verbose=False + ) + + if is_valid is None: + skipped_count += 1 + continue + elif is_valid and not new_file_errors: + valid_count += 1 + continue + elif is_valid: + # Had errors but all existed in original + original_error_count += 1 + valid_count += 1 + continue + + # Has new errors + new_errors.append(f" {relative_path}: {len(new_file_errors)} new error(s)") + for error in list(new_file_errors)[:3]: # Show first 3 errors + new_errors.append( + f" - {error[:250]}..." if len(error) > 250 else f" - {error}" + ) + + # Print summary + if self.verbose: + print(f"Validated {len(self.xml_files)} files:") + print(f" - Valid: {valid_count}") + print(f" - Skipped (no schema): {skipped_count}") + if original_error_count: + print(f" - With original errors (ignored): {original_error_count}") + print( + f" - With NEW errors: {len(new_errors) > 0 and len([e for e in new_errors if not e.startswith(' ')]) or 0}" + ) + + if new_errors: + print("\nFAILED - Found NEW validation errors:") + for error in new_errors: + print(error) + return False + else: + if self.verbose: + print("\nPASSED - No new XSD validation errors introduced") + return True + + def _get_schema_path(self, xml_file): + """Determine the appropriate schema path for an XML file.""" + # Check exact filename match + if xml_file.name in self.SCHEMA_MAPPINGS: + return self.schemas_dir / self.SCHEMA_MAPPINGS[xml_file.name] + + # Check .rels files + if xml_file.suffix == ".rels": + return self.schemas_dir / self.SCHEMA_MAPPINGS[".rels"] + + # Check chart files + if "charts/" in str(xml_file) and xml_file.name.startswith("chart"): + return self.schemas_dir / self.SCHEMA_MAPPINGS["chart"] + + # Check theme files + if "theme/" in str(xml_file) and xml_file.name.startswith("theme"): + return self.schemas_dir / self.SCHEMA_MAPPINGS["theme"] + + # Check if file is in a main content folder and use appropriate schema + if xml_file.parent.name in self.MAIN_CONTENT_FOLDERS: + return self.schemas_dir / self.SCHEMA_MAPPINGS[xml_file.parent.name] + + return None + + def _clean_ignorable_namespaces(self, xml_doc): + """Remove attributes and elements not in allowed namespaces.""" + # Create a clean copy + xml_string = lxml.etree.tostring(xml_doc, encoding="unicode") + xml_copy = lxml.etree.fromstring(xml_string) + + # Remove attributes not in allowed namespaces + for elem in xml_copy.iter(): + attrs_to_remove = [] + + for attr in elem.attrib: + # Check if attribute is from a namespace other than allowed ones + if "{" in attr: + ns = attr.split("}")[0][1:] + if ns not in self.OOXML_NAMESPACES: + attrs_to_remove.append(attr) + + # Remove collected attributes + for attr in attrs_to_remove: + del elem.attrib[attr] + + # Remove elements not in allowed namespaces + self._remove_ignorable_elements(xml_copy) + + return lxml.etree.ElementTree(xml_copy) + + def _remove_ignorable_elements(self, root): + """Recursively remove all elements not in allowed namespaces.""" + elements_to_remove = [] + + # Find elements to remove + for elem in list(root): + # Skip non-element nodes (comments, processing instructions, etc.) + if not hasattr(elem, "tag") or callable(elem.tag): + continue + + tag_str = str(elem.tag) + if tag_str.startswith("{"): + ns = tag_str.split("}")[0][1:] + if ns not in self.OOXML_NAMESPACES: + elements_to_remove.append(elem) + continue + + # Recursively clean child elements + self._remove_ignorable_elements(elem) + + # Remove collected elements + for elem in elements_to_remove: + root.remove(elem) + + def _preprocess_for_mc_ignorable(self, xml_doc): + """Preprocess XML to handle mc:Ignorable attribute properly.""" + # Remove mc:Ignorable attributes before validation + root = xml_doc.getroot() + + # Remove mc:Ignorable attribute from root + if f"{{{self.MC_NAMESPACE}}}Ignorable" in root.attrib: + del root.attrib[f"{{{self.MC_NAMESPACE}}}Ignorable"] + + return xml_doc + + def _validate_single_file_xsd(self, xml_file, base_path): + """Validate a single XML file against XSD schema. Returns (is_valid, errors_set).""" + schema_path = self._get_schema_path(xml_file) + if not schema_path: + return None, None # Skip file + + try: + # Load schema + with open(schema_path, "rb") as xsd_file: + parser = lxml.etree.XMLParser() + xsd_doc = lxml.etree.parse( + xsd_file, parser=parser, base_url=str(schema_path) + ) + schema = lxml.etree.XMLSchema(xsd_doc) + + # Load and preprocess XML + with open(xml_file, "r") as f: + xml_doc = lxml.etree.parse(f) + + xml_doc, _ = self._remove_template_tags_from_text_nodes(xml_doc) + xml_doc = self._preprocess_for_mc_ignorable(xml_doc) + + # Clean ignorable namespaces if needed + relative_path = xml_file.relative_to(base_path) + if ( + relative_path.parts + and relative_path.parts[0] in self.MAIN_CONTENT_FOLDERS + ): + xml_doc = self._clean_ignorable_namespaces(xml_doc) + + # Validate + if schema.validate(xml_doc): + return True, set() + else: + errors = set() + for error in schema.error_log: + # Store normalized error message (without line numbers for comparison) + errors.add(error.message) + return False, errors + + except Exception as e: + return False, {str(e)} + + def _get_original_file_errors(self, xml_file): + """Get XSD validation errors from a single file in the original document. + + Args: + xml_file: Path to the XML file in unpacked_dir to check + + Returns: + set: Set of error messages from the original file + """ + import tempfile + import zipfile + + # Resolve both paths to handle symlinks (e.g., /var vs /private/var on macOS) + xml_file = Path(xml_file).resolve() + unpacked_dir = self.unpacked_dir.resolve() + relative_path = xml_file.relative_to(unpacked_dir) + + with tempfile.TemporaryDirectory() as temp_dir: + temp_path = Path(temp_dir) + + # Extract original file + with zipfile.ZipFile(self.original_file, "r") as zip_ref: + zip_ref.extractall(temp_path) + + # Find corresponding file in original + original_xml_file = temp_path / relative_path + + if not original_xml_file.exists(): + # File didn't exist in original, so no original errors + return set() + + # Validate the specific file in original + is_valid, errors = self._validate_single_file_xsd( + original_xml_file, temp_path + ) + return errors if errors else set() + + def _remove_template_tags_from_text_nodes(self, xml_doc): + """Remove template tags from XML text nodes and collect warnings. + + Template tags follow the pattern {{ ... }} and are used as placeholders + for content replacement. They should be removed from text content before + XSD validation while preserving XML structure. + + Returns: + tuple: (cleaned_xml_doc, warnings_list) + """ + warnings = [] + template_pattern = re.compile(r"\{\{[^}]*\}\}") + + # Create a copy of the document to avoid modifying the original + xml_string = lxml.etree.tostring(xml_doc, encoding="unicode") + xml_copy = lxml.etree.fromstring(xml_string) + + def process_text_content(text, content_type): + if not text: + return text + matches = list(template_pattern.finditer(text)) + if matches: + for match in matches: + warnings.append( + f"Found template tag in {content_type}: {match.group()}" + ) + return template_pattern.sub("", text) + return text + + # Process all text nodes in the document + for elem in xml_copy.iter(): + # Skip processing if this is a w:t element + if not hasattr(elem, "tag") or callable(elem.tag): + continue + tag_str = str(elem.tag) + if tag_str.endswith("}t") or tag_str == "t": + continue + + elem.text = process_text_content(elem.text, "text content") + elem.tail = process_text_content(elem.tail, "tail content") + + return lxml.etree.ElementTree(xml_copy), warnings + + +if __name__ == "__main__": + raise RuntimeError("This module should not be run directly.") diff --git a/.claude/skills/pptx/ooxml/scripts/validation/docx.py b/.claude/skills/pptx/ooxml/scripts/validation/docx.py new file mode 100644 index 0000000..602c470 --- /dev/null +++ b/.claude/skills/pptx/ooxml/scripts/validation/docx.py @@ -0,0 +1,274 @@ +""" +Validator for Word document XML files against XSD schemas. +""" + +import re +import tempfile +import zipfile + +import lxml.etree + +from .base import BaseSchemaValidator + + +class DOCXSchemaValidator(BaseSchemaValidator): + """Validator for Word document XML files against XSD schemas.""" + + # Word-specific namespace + WORD_2006_NAMESPACE = "http://schemas.openxmlformats.org/wordprocessingml/2006/main" + + # Word-specific element to relationship type mappings + # Start with empty mapping - add specific cases as we discover them + ELEMENT_RELATIONSHIP_TYPES = {} + + def validate(self): + """Run all validation checks and return True if all pass.""" + # Test 0: XML well-formedness + if not self.validate_xml(): + return False + + # Test 1: Namespace declarations + all_valid = True + if not self.validate_namespaces(): + all_valid = False + + # Test 2: Unique IDs + if not self.validate_unique_ids(): + all_valid = False + + # Test 3: Relationship and file reference validation + if not self.validate_file_references(): + all_valid = False + + # Test 4: Content type declarations + if not self.validate_content_types(): + all_valid = False + + # Test 5: XSD schema validation + if not self.validate_against_xsd(): + all_valid = False + + # Test 6: Whitespace preservation + if not self.validate_whitespace_preservation(): + all_valid = False + + # Test 7: Deletion validation + if not self.validate_deletions(): + all_valid = False + + # Test 8: Insertion validation + if not self.validate_insertions(): + all_valid = False + + # Test 9: Relationship ID reference validation + if not self.validate_all_relationship_ids(): + all_valid = False + + # Count and compare paragraphs + self.compare_paragraph_counts() + + return all_valid + + def validate_whitespace_preservation(self): + """ + Validate that w:t elements with whitespace have xml:space='preserve'. + """ + errors = [] + + for xml_file in self.xml_files: + # Only check document.xml files + if xml_file.name != "document.xml": + continue + + try: + root = lxml.etree.parse(str(xml_file)).getroot() + + # Find all w:t elements + for elem in root.iter(f"{{{self.WORD_2006_NAMESPACE}}}t"): + if elem.text: + text = elem.text + # Check if text starts or ends with whitespace + if re.match(r"^\s.*", text) or re.match(r".*\s$", text): + # Check if xml:space="preserve" attribute exists + xml_space_attr = f"{{{self.XML_NAMESPACE}}}space" + if ( + xml_space_attr not in elem.attrib + or elem.attrib[xml_space_attr] != "preserve" + ): + # Show a preview of the text + text_preview = ( + repr(text)[:50] + "..." + if len(repr(text)) > 50 + else repr(text) + ) + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {elem.sourceline}: w:t element with whitespace missing xml:space='preserve': {text_preview}" + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} whitespace preservation violations:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - All whitespace is properly preserved") + return True + + def validate_deletions(self): + """ + Validate that w:t elements are not within w:del elements. + For some reason, XSD validation does not catch this, so we do it manually. + """ + errors = [] + + for xml_file in self.xml_files: + # Only check document.xml files + if xml_file.name != "document.xml": + continue + + try: + root = lxml.etree.parse(str(xml_file)).getroot() + + # Find all w:t elements that are descendants of w:del elements + namespaces = {"w": self.WORD_2006_NAMESPACE} + xpath_expression = ".//w:del//w:t" + problematic_t_elements = root.xpath( + xpath_expression, namespaces=namespaces + ) + for t_elem in problematic_t_elements: + if t_elem.text: + # Show a preview of the text + text_preview = ( + repr(t_elem.text)[:50] + "..." + if len(repr(t_elem.text)) > 50 + else repr(t_elem.text) + ) + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {t_elem.sourceline}: found within : {text_preview}" + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} deletion validation violations:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - No w:t elements found within w:del elements") + return True + + def count_paragraphs_in_unpacked(self): + """Count the number of paragraphs in the unpacked document.""" + count = 0 + + for xml_file in self.xml_files: + # Only check document.xml files + if xml_file.name != "document.xml": + continue + + try: + root = lxml.etree.parse(str(xml_file)).getroot() + # Count all w:p elements + paragraphs = root.findall(f".//{{{self.WORD_2006_NAMESPACE}}}p") + count = len(paragraphs) + except Exception as e: + print(f"Error counting paragraphs in unpacked document: {e}") + + return count + + def count_paragraphs_in_original(self): + """Count the number of paragraphs in the original docx file.""" + count = 0 + + try: + # Create temporary directory to unpack original + with tempfile.TemporaryDirectory() as temp_dir: + # Unpack original docx + with zipfile.ZipFile(self.original_file, "r") as zip_ref: + zip_ref.extractall(temp_dir) + + # Parse document.xml + doc_xml_path = temp_dir + "/word/document.xml" + root = lxml.etree.parse(doc_xml_path).getroot() + + # Count all w:p elements + paragraphs = root.findall(f".//{{{self.WORD_2006_NAMESPACE}}}p") + count = len(paragraphs) + + except Exception as e: + print(f"Error counting paragraphs in original document: {e}") + + return count + + def validate_insertions(self): + """ + Validate that w:delText elements are not within w:ins elements. + w:delText is only allowed in w:ins if nested within a w:del. + """ + errors = [] + + for xml_file in self.xml_files: + if xml_file.name != "document.xml": + continue + + try: + root = lxml.etree.parse(str(xml_file)).getroot() + namespaces = {"w": self.WORD_2006_NAMESPACE} + + # Find w:delText in w:ins that are NOT within w:del + invalid_elements = root.xpath( + ".//w:ins//w:delText[not(ancestor::w:del)]", + namespaces=namespaces + ) + + for elem in invalid_elements: + text_preview = ( + repr(elem.text or "")[:50] + "..." + if len(repr(elem.text or "")) > 50 + else repr(elem.text or "") + ) + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {elem.sourceline}: within : {text_preview}" + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} insertion validation violations:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - No w:delText elements within w:ins elements") + return True + + def compare_paragraph_counts(self): + """Compare paragraph counts between original and new document.""" + original_count = self.count_paragraphs_in_original() + new_count = self.count_paragraphs_in_unpacked() + + diff = new_count - original_count + diff_str = f"+{diff}" if diff > 0 else str(diff) + print(f"\nParagraphs: {original_count} → {new_count} ({diff_str})") + + +if __name__ == "__main__": + raise RuntimeError("This module should not be run directly.") diff --git a/.claude/skills/pptx/ooxml/scripts/validation/pptx.py b/.claude/skills/pptx/ooxml/scripts/validation/pptx.py new file mode 100644 index 0000000..66d5b1e --- /dev/null +++ b/.claude/skills/pptx/ooxml/scripts/validation/pptx.py @@ -0,0 +1,315 @@ +""" +Validator for PowerPoint presentation XML files against XSD schemas. +""" + +import re + +from .base import BaseSchemaValidator + + +class PPTXSchemaValidator(BaseSchemaValidator): + """Validator for PowerPoint presentation XML files against XSD schemas.""" + + # PowerPoint presentation namespace + PRESENTATIONML_NAMESPACE = ( + "http://schemas.openxmlformats.org/presentationml/2006/main" + ) + + # PowerPoint-specific element to relationship type mappings + ELEMENT_RELATIONSHIP_TYPES = { + "sldid": "slide", + "sldmasterid": "slidemaster", + "notesmasterid": "notesmaster", + "sldlayoutid": "slidelayout", + "themeid": "theme", + "tablestyleid": "tablestyles", + } + + def validate(self): + """Run all validation checks and return True if all pass.""" + # Test 0: XML well-formedness + if not self.validate_xml(): + return False + + # Test 1: Namespace declarations + all_valid = True + if not self.validate_namespaces(): + all_valid = False + + # Test 2: Unique IDs + if not self.validate_unique_ids(): + all_valid = False + + # Test 3: UUID ID validation + if not self.validate_uuid_ids(): + all_valid = False + + # Test 4: Relationship and file reference validation + if not self.validate_file_references(): + all_valid = False + + # Test 5: Slide layout ID validation + if not self.validate_slide_layout_ids(): + all_valid = False + + # Test 6: Content type declarations + if not self.validate_content_types(): + all_valid = False + + # Test 7: XSD schema validation + if not self.validate_against_xsd(): + all_valid = False + + # Test 8: Notes slide reference validation + if not self.validate_notes_slide_references(): + all_valid = False + + # Test 9: Relationship ID reference validation + if not self.validate_all_relationship_ids(): + all_valid = False + + # Test 10: Duplicate slide layout references validation + if not self.validate_no_duplicate_slide_layouts(): + all_valid = False + + return all_valid + + def validate_uuid_ids(self): + """Validate that ID attributes that look like UUIDs contain only hex values.""" + import lxml.etree + + errors = [] + # UUID pattern: 8-4-4-4-12 hex digits with optional braces/hyphens + uuid_pattern = re.compile( + r"^[\{\(]?[0-9A-Fa-f]{8}-?[0-9A-Fa-f]{4}-?[0-9A-Fa-f]{4}-?[0-9A-Fa-f]{4}-?[0-9A-Fa-f]{12}[\}\)]?$" + ) + + for xml_file in self.xml_files: + try: + root = lxml.etree.parse(str(xml_file)).getroot() + + # Check all elements for ID attributes + for elem in root.iter(): + for attr, value in elem.attrib.items(): + # Check if this is an ID attribute + attr_name = attr.split("}")[-1].lower() + if attr_name == "id" or attr_name.endswith("id"): + # Check if value looks like a UUID (has the right length and pattern structure) + if self._looks_like_uuid(value): + # Validate that it contains only hex characters in the right positions + if not uuid_pattern.match(value): + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {elem.sourceline}: ID '{value}' appears to be a UUID but contains invalid hex characters" + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} UUID ID validation errors:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - All UUID-like IDs contain valid hex values") + return True + + def _looks_like_uuid(self, value): + """Check if a value has the general structure of a UUID.""" + # Remove common UUID delimiters + clean_value = value.strip("{}()").replace("-", "") + # Check if it's 32 hex-like characters (could include invalid hex chars) + return len(clean_value) == 32 and all(c.isalnum() for c in clean_value) + + def validate_slide_layout_ids(self): + """Validate that sldLayoutId elements in slide masters reference valid slide layouts.""" + import lxml.etree + + errors = [] + + # Find all slide master files + slide_masters = list(self.unpacked_dir.glob("ppt/slideMasters/*.xml")) + + if not slide_masters: + if self.verbose: + print("PASSED - No slide masters found") + return True + + for slide_master in slide_masters: + try: + # Parse the slide master file + root = lxml.etree.parse(str(slide_master)).getroot() + + # Find the corresponding _rels file for this slide master + rels_file = slide_master.parent / "_rels" / f"{slide_master.name}.rels" + + if not rels_file.exists(): + errors.append( + f" {slide_master.relative_to(self.unpacked_dir)}: " + f"Missing relationships file: {rels_file.relative_to(self.unpacked_dir)}" + ) + continue + + # Parse the relationships file + rels_root = lxml.etree.parse(str(rels_file)).getroot() + + # Build a set of valid relationship IDs that point to slide layouts + valid_layout_rids = set() + for rel in rels_root.findall( + f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship" + ): + rel_type = rel.get("Type", "") + if "slideLayout" in rel_type: + valid_layout_rids.add(rel.get("Id")) + + # Find all sldLayoutId elements in the slide master + for sld_layout_id in root.findall( + f".//{{{self.PRESENTATIONML_NAMESPACE}}}sldLayoutId" + ): + r_id = sld_layout_id.get( + f"{{{self.OFFICE_RELATIONSHIPS_NAMESPACE}}}id" + ) + layout_id = sld_layout_id.get("id") + + if r_id and r_id not in valid_layout_rids: + errors.append( + f" {slide_master.relative_to(self.unpacked_dir)}: " + f"Line {sld_layout_id.sourceline}: sldLayoutId with id='{layout_id}' " + f"references r:id='{r_id}' which is not found in slide layout relationships" + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {slide_master.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} slide layout ID validation errors:") + for error in errors: + print(error) + print( + "Remove invalid references or add missing slide layouts to the relationships file." + ) + return False + else: + if self.verbose: + print("PASSED - All slide layout IDs reference valid slide layouts") + return True + + def validate_no_duplicate_slide_layouts(self): + """Validate that each slide has exactly one slideLayout reference.""" + import lxml.etree + + errors = [] + slide_rels_files = list(self.unpacked_dir.glob("ppt/slides/_rels/*.xml.rels")) + + for rels_file in slide_rels_files: + try: + root = lxml.etree.parse(str(rels_file)).getroot() + + # Find all slideLayout relationships + layout_rels = [ + rel + for rel in root.findall( + f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship" + ) + if "slideLayout" in rel.get("Type", "") + ] + + if len(layout_rels) > 1: + errors.append( + f" {rels_file.relative_to(self.unpacked_dir)}: has {len(layout_rels)} slideLayout references" + ) + + except Exception as e: + errors.append( + f" {rels_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print("FAILED - Found slides with duplicate slideLayout references:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - All slides have exactly one slideLayout reference") + return True + + def validate_notes_slide_references(self): + """Validate that each notesSlide file is referenced by only one slide.""" + import lxml.etree + + errors = [] + notes_slide_references = {} # Track which slides reference each notesSlide + + # Find all slide relationship files + slide_rels_files = list(self.unpacked_dir.glob("ppt/slides/_rels/*.xml.rels")) + + if not slide_rels_files: + if self.verbose: + print("PASSED - No slide relationship files found") + return True + + for rels_file in slide_rels_files: + try: + # Parse the relationships file + root = lxml.etree.parse(str(rels_file)).getroot() + + # Find all notesSlide relationships + for rel in root.findall( + f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship" + ): + rel_type = rel.get("Type", "") + if "notesSlide" in rel_type: + target = rel.get("Target", "") + if target: + # Normalize the target path to handle relative paths + normalized_target = target.replace("../", "") + + # Track which slide references this notesSlide + slide_name = rels_file.stem.replace( + ".xml", "" + ) # e.g., "slide1" + + if normalized_target not in notes_slide_references: + notes_slide_references[normalized_target] = [] + notes_slide_references[normalized_target].append( + (slide_name, rels_file) + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {rels_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + # Check for duplicate references + for target, references in notes_slide_references.items(): + if len(references) > 1: + slide_names = [ref[0] for ref in references] + errors.append( + f" Notes slide '{target}' is referenced by multiple slides: {', '.join(slide_names)}" + ) + for slide_name, rels_file in references: + errors.append(f" - {rels_file.relative_to(self.unpacked_dir)}") + + if errors: + print( + f"FAILED - Found {len([e for e in errors if not e.startswith(' ')])} notes slide reference validation errors:" + ) + for error in errors: + print(error) + print("Each slide may optionally have its own slide file.") + return False + else: + if self.verbose: + print("PASSED - All notes slide references are unique") + return True + + +if __name__ == "__main__": + raise RuntimeError("This module should not be run directly.") diff --git a/.claude/skills/pptx/ooxml/scripts/validation/redlining.py b/.claude/skills/pptx/ooxml/scripts/validation/redlining.py new file mode 100644 index 0000000..7ed425e --- /dev/null +++ b/.claude/skills/pptx/ooxml/scripts/validation/redlining.py @@ -0,0 +1,279 @@ +""" +Validator for tracked changes in Word documents. +""" + +import subprocess +import tempfile +import zipfile +from pathlib import Path + + +class RedliningValidator: + """Validator for tracked changes in Word documents.""" + + def __init__(self, unpacked_dir, original_docx, verbose=False): + self.unpacked_dir = Path(unpacked_dir) + self.original_docx = Path(original_docx) + self.verbose = verbose + self.namespaces = { + "w": "http://schemas.openxmlformats.org/wordprocessingml/2006/main" + } + + def validate(self): + """Main validation method that returns True if valid, False otherwise.""" + # Verify unpacked directory exists and has correct structure + modified_file = self.unpacked_dir / "word" / "document.xml" + if not modified_file.exists(): + print(f"FAILED - Modified document.xml not found at {modified_file}") + return False + + # First, check if there are any tracked changes by Claude to validate + try: + import xml.etree.ElementTree as ET + + tree = ET.parse(modified_file) + root = tree.getroot() + + # Check for w:del or w:ins tags authored by Claude + del_elements = root.findall(".//w:del", self.namespaces) + ins_elements = root.findall(".//w:ins", self.namespaces) + + # Filter to only include changes by Claude + claude_del_elements = [ + elem + for elem in del_elements + if elem.get(f"{{{self.namespaces['w']}}}author") == "Claude" + ] + claude_ins_elements = [ + elem + for elem in ins_elements + if elem.get(f"{{{self.namespaces['w']}}}author") == "Claude" + ] + + # Redlining validation is only needed if tracked changes by Claude have been used. + if not claude_del_elements and not claude_ins_elements: + if self.verbose: + print("PASSED - No tracked changes by Claude found.") + return True + + except Exception: + # If we can't parse the XML, continue with full validation + pass + + # Create temporary directory for unpacking original docx + with tempfile.TemporaryDirectory() as temp_dir: + temp_path = Path(temp_dir) + + # Unpack original docx + try: + with zipfile.ZipFile(self.original_docx, "r") as zip_ref: + zip_ref.extractall(temp_path) + except Exception as e: + print(f"FAILED - Error unpacking original docx: {e}") + return False + + original_file = temp_path / "word" / "document.xml" + if not original_file.exists(): + print( + f"FAILED - Original document.xml not found in {self.original_docx}" + ) + return False + + # Parse both XML files using xml.etree.ElementTree for redlining validation + try: + import xml.etree.ElementTree as ET + + modified_tree = ET.parse(modified_file) + modified_root = modified_tree.getroot() + original_tree = ET.parse(original_file) + original_root = original_tree.getroot() + except ET.ParseError as e: + print(f"FAILED - Error parsing XML files: {e}") + return False + + # Remove Claude's tracked changes from both documents + self._remove_claude_tracked_changes(original_root) + self._remove_claude_tracked_changes(modified_root) + + # Extract and compare text content + modified_text = self._extract_text_content(modified_root) + original_text = self._extract_text_content(original_root) + + if modified_text != original_text: + # Show detailed character-level differences for each paragraph + error_message = self._generate_detailed_diff( + original_text, modified_text + ) + print(error_message) + return False + + if self.verbose: + print("PASSED - All changes by Claude are properly tracked") + return True + + def _generate_detailed_diff(self, original_text, modified_text): + """Generate detailed word-level differences using git word diff.""" + error_parts = [ + "FAILED - Document text doesn't match after removing Claude's tracked changes", + "", + "Likely causes:", + " 1. Modified text inside another author's or tags", + " 2. Made edits without proper tracked changes", + " 3. Didn't nest inside when deleting another's insertion", + "", + "For pre-redlined documents, use correct patterns:", + " - To reject another's INSERTION: Nest inside their ", + " - To restore another's DELETION: Add new AFTER their ", + "", + ] + + # Show git word diff + git_diff = self._get_git_word_diff(original_text, modified_text) + if git_diff: + error_parts.extend(["Differences:", "============", git_diff]) + else: + error_parts.append("Unable to generate word diff (git not available)") + + return "\n".join(error_parts) + + def _get_git_word_diff(self, original_text, modified_text): + """Generate word diff using git with character-level precision.""" + try: + with tempfile.TemporaryDirectory() as temp_dir: + temp_path = Path(temp_dir) + + # Create two files + original_file = temp_path / "original.txt" + modified_file = temp_path / "modified.txt" + + original_file.write_text(original_text, encoding="utf-8") + modified_file.write_text(modified_text, encoding="utf-8") + + # Try character-level diff first for precise differences + result = subprocess.run( + [ + "git", + "diff", + "--word-diff=plain", + "--word-diff-regex=.", # Character-by-character diff + "-U0", # Zero lines of context - show only changed lines + "--no-index", + str(original_file), + str(modified_file), + ], + capture_output=True, + text=True, + ) + + if result.stdout.strip(): + # Clean up the output - remove git diff header lines + lines = result.stdout.split("\n") + # Skip the header lines (diff --git, index, +++, ---, @@) + content_lines = [] + in_content = False + for line in lines: + if line.startswith("@@"): + in_content = True + continue + if in_content and line.strip(): + content_lines.append(line) + + if content_lines: + return "\n".join(content_lines) + + # Fallback to word-level diff if character-level is too verbose + result = subprocess.run( + [ + "git", + "diff", + "--word-diff=plain", + "-U0", # Zero lines of context + "--no-index", + str(original_file), + str(modified_file), + ], + capture_output=True, + text=True, + ) + + if result.stdout.strip(): + lines = result.stdout.split("\n") + content_lines = [] + in_content = False + for line in lines: + if line.startswith("@@"): + in_content = True + continue + if in_content and line.strip(): + content_lines.append(line) + return "\n".join(content_lines) + + except (subprocess.CalledProcessError, FileNotFoundError, Exception): + # Git not available or other error, return None to use fallback + pass + + return None + + def _remove_claude_tracked_changes(self, root): + """Remove tracked changes authored by Claude from the XML root.""" + ins_tag = f"{{{self.namespaces['w']}}}ins" + del_tag = f"{{{self.namespaces['w']}}}del" + author_attr = f"{{{self.namespaces['w']}}}author" + + # Remove w:ins elements + for parent in root.iter(): + to_remove = [] + for child in parent: + if child.tag == ins_tag and child.get(author_attr) == "Claude": + to_remove.append(child) + for elem in to_remove: + parent.remove(elem) + + # Unwrap content in w:del elements where author is "Claude" + deltext_tag = f"{{{self.namespaces['w']}}}delText" + t_tag = f"{{{self.namespaces['w']}}}t" + + for parent in root.iter(): + to_process = [] + for child in parent: + if child.tag == del_tag and child.get(author_attr) == "Claude": + to_process.append((child, list(parent).index(child))) + + # Process in reverse order to maintain indices + for del_elem, del_index in reversed(to_process): + # Convert w:delText to w:t before moving + for elem in del_elem.iter(): + if elem.tag == deltext_tag: + elem.tag = t_tag + + # Move all children of w:del to its parent before removing w:del + for child in reversed(list(del_elem)): + parent.insert(del_index, child) + parent.remove(del_elem) + + def _extract_text_content(self, root): + """Extract text content from Word XML, preserving paragraph structure. + + Empty paragraphs are skipped to avoid false positives when tracked + insertions add only structural elements without text content. + """ + p_tag = f"{{{self.namespaces['w']}}}p" + t_tag = f"{{{self.namespaces['w']}}}t" + + paragraphs = [] + for p_elem in root.findall(f".//{p_tag}"): + # Get all text elements within this paragraph + text_parts = [] + for t_elem in p_elem.findall(f".//{t_tag}"): + if t_elem.text: + text_parts.append(t_elem.text) + paragraph_text = "".join(text_parts) + # Skip empty paragraphs - they don't affect content validation + if paragraph_text: + paragraphs.append(paragraph_text) + + return "\n".join(paragraphs) + + +if __name__ == "__main__": + raise RuntimeError("This module should not be run directly.") diff --git a/.claude/skills/pptx/scripts/html2pptx.js b/.claude/skills/pptx/scripts/html2pptx.js new file mode 100644 index 0000000..437bf7c --- /dev/null +++ b/.claude/skills/pptx/scripts/html2pptx.js @@ -0,0 +1,979 @@ +/** + * html2pptx - Convert HTML slide to pptxgenjs slide with positioned elements + * + * USAGE: + * const pptx = new pptxgen(); + * pptx.layout = 'LAYOUT_16x9'; // Must match HTML body dimensions + * + * const { slide, placeholders } = await html2pptx('slide.html', pptx); + * slide.addChart(pptx.charts.LINE, data, placeholders[0]); + * + * await pptx.writeFile('output.pptx'); + * + * FEATURES: + * - Converts HTML to PowerPoint with accurate positioning + * - Supports text, images, shapes, and bullet lists + * - Extracts placeholder elements (class="placeholder") with positions + * - Handles CSS gradients, borders, and margins + * + * VALIDATION: + * - Uses body width/height from HTML for viewport sizing + * - Throws error if HTML dimensions don't match presentation layout + * - Throws error if content overflows body (with overflow details) + * + * RETURNS: + * { slide, placeholders } where placeholders is an array of { id, x, y, w, h } + */ + +const { chromium } = require('playwright'); +const path = require('path'); +const sharp = require('sharp'); + +const PT_PER_PX = 0.75; +const PX_PER_IN = 96; +const EMU_PER_IN = 914400; + +// Helper: Get body dimensions and check for overflow +async function getBodyDimensions(page) { + const bodyDimensions = await page.evaluate(() => { + const body = document.body; + const style = window.getComputedStyle(body); + + return { + width: parseFloat(style.width), + height: parseFloat(style.height), + scrollWidth: body.scrollWidth, + scrollHeight: body.scrollHeight + }; + }); + + const errors = []; + const widthOverflowPx = Math.max(0, bodyDimensions.scrollWidth - bodyDimensions.width - 1); + const heightOverflowPx = Math.max(0, bodyDimensions.scrollHeight - bodyDimensions.height - 1); + + const widthOverflowPt = widthOverflowPx * PT_PER_PX; + const heightOverflowPt = heightOverflowPx * PT_PER_PX; + + if (widthOverflowPt > 0 || heightOverflowPt > 0) { + const directions = []; + if (widthOverflowPt > 0) directions.push(`${widthOverflowPt.toFixed(1)}pt horizontally`); + if (heightOverflowPt > 0) directions.push(`${heightOverflowPt.toFixed(1)}pt vertically`); + const reminder = heightOverflowPt > 0 ? ' (Remember: leave 0.5" margin at bottom of slide)' : ''; + errors.push(`HTML content overflows body by ${directions.join(' and ')}${reminder}`); + } + + return { ...bodyDimensions, errors }; +} + +// Helper: Validate dimensions match presentation layout +function validateDimensions(bodyDimensions, pres) { + const errors = []; + const widthInches = bodyDimensions.width / PX_PER_IN; + const heightInches = bodyDimensions.height / PX_PER_IN; + + if (pres.presLayout) { + const layoutWidth = pres.presLayout.width / EMU_PER_IN; + const layoutHeight = pres.presLayout.height / EMU_PER_IN; + + if (Math.abs(layoutWidth - widthInches) > 0.1 || Math.abs(layoutHeight - heightInches) > 0.1) { + errors.push( + `HTML dimensions (${widthInches.toFixed(1)}" × ${heightInches.toFixed(1)}") ` + + `don't match presentation layout (${layoutWidth.toFixed(1)}" × ${layoutHeight.toFixed(1)}")` + ); + } + } + return errors; +} + +function validateTextBoxPosition(slideData, bodyDimensions) { + const errors = []; + const slideHeightInches = bodyDimensions.height / PX_PER_IN; + const minBottomMargin = 0.5; // 0.5 inches from bottom + + for (const el of slideData.elements) { + // Check text elements (p, h1-h6, list) + if (['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'list'].includes(el.type)) { + const fontSize = el.style?.fontSize || 0; + const bottomEdge = el.position.y + el.position.h; + const distanceFromBottom = slideHeightInches - bottomEdge; + + if (fontSize > 12 && distanceFromBottom < minBottomMargin) { + const getText = () => { + if (typeof el.text === 'string') return el.text; + if (Array.isArray(el.text)) return el.text.find(t => t.text)?.text || ''; + if (Array.isArray(el.items)) return el.items.find(item => item.text)?.text || ''; + return ''; + }; + const textPrefix = getText().substring(0, 50) + (getText().length > 50 ? '...' : ''); + + errors.push( + `Text box "${textPrefix}" ends too close to bottom edge ` + + `(${distanceFromBottom.toFixed(2)}" from bottom, minimum ${minBottomMargin}" required)` + ); + } + } + } + + return errors; +} + +// Helper: Add background to slide +async function addBackground(slideData, targetSlide, tmpDir) { + if (slideData.background.type === 'image' && slideData.background.path) { + let imagePath = slideData.background.path.startsWith('file://') + ? slideData.background.path.replace('file://', '') + : slideData.background.path; + targetSlide.background = { path: imagePath }; + } else if (slideData.background.type === 'color' && slideData.background.value) { + targetSlide.background = { color: slideData.background.value }; + } +} + +// Helper: Add elements to slide +function addElements(slideData, targetSlide, pres) { + for (const el of slideData.elements) { + if (el.type === 'image') { + let imagePath = el.src.startsWith('file://') ? el.src.replace('file://', '') : el.src; + targetSlide.addImage({ + path: imagePath, + x: el.position.x, + y: el.position.y, + w: el.position.w, + h: el.position.h + }); + } else if (el.type === 'line') { + targetSlide.addShape(pres.ShapeType.line, { + x: el.x1, + y: el.y1, + w: el.x2 - el.x1, + h: el.y2 - el.y1, + line: { color: el.color, width: el.width } + }); + } else if (el.type === 'shape') { + const shapeOptions = { + x: el.position.x, + y: el.position.y, + w: el.position.w, + h: el.position.h, + shape: el.shape.rectRadius > 0 ? pres.ShapeType.roundRect : pres.ShapeType.rect + }; + + if (el.shape.fill) { + shapeOptions.fill = { color: el.shape.fill }; + if (el.shape.transparency != null) shapeOptions.fill.transparency = el.shape.transparency; + } + if (el.shape.line) shapeOptions.line = el.shape.line; + if (el.shape.rectRadius > 0) shapeOptions.rectRadius = el.shape.rectRadius; + if (el.shape.shadow) shapeOptions.shadow = el.shape.shadow; + + targetSlide.addText(el.text || '', shapeOptions); + } else if (el.type === 'list') { + const listOptions = { + x: el.position.x, + y: el.position.y, + w: el.position.w, + h: el.position.h, + fontSize: el.style.fontSize, + fontFace: el.style.fontFace, + color: el.style.color, + align: el.style.align, + valign: 'top', + lineSpacing: el.style.lineSpacing, + paraSpaceBefore: el.style.paraSpaceBefore, + paraSpaceAfter: el.style.paraSpaceAfter, + margin: el.style.margin + }; + if (el.style.margin) listOptions.margin = el.style.margin; + targetSlide.addText(el.items, listOptions); + } else { + // Check if text is single-line (height suggests one line) + const lineHeight = el.style.lineSpacing || el.style.fontSize * 1.2; + const isSingleLine = el.position.h <= lineHeight * 1.5; + + let adjustedX = el.position.x; + let adjustedW = el.position.w; + + // Make single-line text 2% wider to account for underestimate + if (isSingleLine) { + const widthIncrease = el.position.w * 0.02; + const align = el.style.align; + + if (align === 'center') { + // Center: expand both sides + adjustedX = el.position.x - (widthIncrease / 2); + adjustedW = el.position.w + widthIncrease; + } else if (align === 'right') { + // Right: expand to the left + adjustedX = el.position.x - widthIncrease; + adjustedW = el.position.w + widthIncrease; + } else { + // Left (default): expand to the right + adjustedW = el.position.w + widthIncrease; + } + } + + const textOptions = { + x: adjustedX, + y: el.position.y, + w: adjustedW, + h: el.position.h, + fontSize: el.style.fontSize, + fontFace: el.style.fontFace, + color: el.style.color, + bold: el.style.bold, + italic: el.style.italic, + underline: el.style.underline, + valign: 'top', + lineSpacing: el.style.lineSpacing, + paraSpaceBefore: el.style.paraSpaceBefore, + paraSpaceAfter: el.style.paraSpaceAfter, + inset: 0 // Remove default PowerPoint internal padding + }; + + if (el.style.align) textOptions.align = el.style.align; + if (el.style.margin) textOptions.margin = el.style.margin; + if (el.style.rotate !== undefined) textOptions.rotate = el.style.rotate; + if (el.style.transparency !== null && el.style.transparency !== undefined) textOptions.transparency = el.style.transparency; + + targetSlide.addText(el.text, textOptions); + } + } +} + +// Helper: Extract slide data from HTML page +async function extractSlideData(page) { + return await page.evaluate(() => { + const PT_PER_PX = 0.75; + const PX_PER_IN = 96; + + // Fonts that are single-weight and should not have bold applied + // (applying bold causes PowerPoint to use faux bold which makes text wider) + const SINGLE_WEIGHT_FONTS = ['impact']; + + // Helper: Check if a font should skip bold formatting + const shouldSkipBold = (fontFamily) => { + if (!fontFamily) return false; + const normalizedFont = fontFamily.toLowerCase().replace(/['"]/g, '').split(',')[0].trim(); + return SINGLE_WEIGHT_FONTS.includes(normalizedFont); + }; + + // Unit conversion helpers + const pxToInch = (px) => px / PX_PER_IN; + const pxToPoints = (pxStr) => parseFloat(pxStr) * PT_PER_PX; + const rgbToHex = (rgbStr) => { + // Handle transparent backgrounds by defaulting to white + if (rgbStr === 'rgba(0, 0, 0, 0)' || rgbStr === 'transparent') return 'FFFFFF'; + + const match = rgbStr.match(/rgba?\((\d+),\s*(\d+),\s*(\d+)/); + if (!match) return 'FFFFFF'; + return match.slice(1).map(n => parseInt(n).toString(16).padStart(2, '0')).join(''); + }; + + const extractAlpha = (rgbStr) => { + const match = rgbStr.match(/rgba\((\d+),\s*(\d+),\s*(\d+),\s*([\d.]+)\)/); + if (!match || !match[4]) return null; + const alpha = parseFloat(match[4]); + return Math.round((1 - alpha) * 100); + }; + + const applyTextTransform = (text, textTransform) => { + if (textTransform === 'uppercase') return text.toUpperCase(); + if (textTransform === 'lowercase') return text.toLowerCase(); + if (textTransform === 'capitalize') { + return text.replace(/\b\w/g, c => c.toUpperCase()); + } + return text; + }; + + // Extract rotation angle from CSS transform and writing-mode + const getRotation = (transform, writingMode) => { + let angle = 0; + + // Handle writing-mode first + // PowerPoint: 90° = text rotated 90° clockwise (reads top to bottom, letters upright) + // PowerPoint: 270° = text rotated 270° clockwise (reads bottom to top, letters upright) + if (writingMode === 'vertical-rl') { + // vertical-rl alone = text reads top to bottom = 90° in PowerPoint + angle = 90; + } else if (writingMode === 'vertical-lr') { + // vertical-lr alone = text reads bottom to top = 270° in PowerPoint + angle = 270; + } + + // Then add any transform rotation + if (transform && transform !== 'none') { + // Try to match rotate() function + const rotateMatch = transform.match(/rotate\((-?\d+(?:\.\d+)?)deg\)/); + if (rotateMatch) { + angle += parseFloat(rotateMatch[1]); + } else { + // Browser may compute as matrix - extract rotation from matrix + const matrixMatch = transform.match(/matrix\(([^)]+)\)/); + if (matrixMatch) { + const values = matrixMatch[1].split(',').map(parseFloat); + // matrix(a, b, c, d, e, f) where rotation = atan2(b, a) + const matrixAngle = Math.atan2(values[1], values[0]) * (180 / Math.PI); + angle += Math.round(matrixAngle); + } + } + } + + // Normalize to 0-359 range + angle = angle % 360; + if (angle < 0) angle += 360; + + return angle === 0 ? null : angle; + }; + + // Get position/dimensions accounting for rotation + const getPositionAndSize = (el, rect, rotation) => { + if (rotation === null) { + return { x: rect.left, y: rect.top, w: rect.width, h: rect.height }; + } + + // For 90° or 270° rotations, swap width and height + // because PowerPoint applies rotation to the original (unrotated) box + const isVertical = rotation === 90 || rotation === 270; + + if (isVertical) { + // The browser shows us the rotated dimensions (tall box for vertical text) + // But PowerPoint needs the pre-rotation dimensions (wide box that will be rotated) + // So we swap: browser's height becomes PPT's width, browser's width becomes PPT's height + const centerX = rect.left + rect.width / 2; + const centerY = rect.top + rect.height / 2; + + return { + x: centerX - rect.height / 2, + y: centerY - rect.width / 2, + w: rect.height, + h: rect.width + }; + } + + // For other rotations, use element's offset dimensions + const centerX = rect.left + rect.width / 2; + const centerY = rect.top + rect.height / 2; + return { + x: centerX - el.offsetWidth / 2, + y: centerY - el.offsetHeight / 2, + w: el.offsetWidth, + h: el.offsetHeight + }; + }; + + // Parse CSS box-shadow into PptxGenJS shadow properties + const parseBoxShadow = (boxShadow) => { + if (!boxShadow || boxShadow === 'none') return null; + + // Browser computed style format: "rgba(0, 0, 0, 0.3) 2px 2px 8px 0px [inset]" + // CSS format: "[inset] 2px 2px 8px 0px rgba(0, 0, 0, 0.3)" + + const insetMatch = boxShadow.match(/inset/); + + // IMPORTANT: PptxGenJS/PowerPoint doesn't properly support inset shadows + // Only process outer shadows to avoid file corruption + if (insetMatch) return null; + + // Extract color first (rgba or rgb at start) + const colorMatch = boxShadow.match(/rgba?\([^)]+\)/); + + // Extract numeric values (handles both px and pt units) + const parts = boxShadow.match(/([-\d.]+)(px|pt)/g); + + if (!parts || parts.length < 2) return null; + + const offsetX = parseFloat(parts[0]); + const offsetY = parseFloat(parts[1]); + const blur = parts.length > 2 ? parseFloat(parts[2]) : 0; + + // Calculate angle from offsets (in degrees, 0 = right, 90 = down) + let angle = 0; + if (offsetX !== 0 || offsetY !== 0) { + angle = Math.atan2(offsetY, offsetX) * (180 / Math.PI); + if (angle < 0) angle += 360; + } + + // Calculate offset distance (hypotenuse) + const offset = Math.sqrt(offsetX * offsetX + offsetY * offsetY) * PT_PER_PX; + + // Extract opacity from rgba + let opacity = 0.5; + if (colorMatch) { + const opacityMatch = colorMatch[0].match(/[\d.]+\)$/); + if (opacityMatch) { + opacity = parseFloat(opacityMatch[0].replace(')', '')); + } + } + + return { + type: 'outer', + angle: Math.round(angle), + blur: blur * 0.75, // Convert to points + color: colorMatch ? rgbToHex(colorMatch[0]) : '000000', + offset: offset, + opacity + }; + }; + + // Parse inline formatting tags (, , , , , ) into text runs + const parseInlineFormatting = (element, baseOptions = {}, runs = [], baseTextTransform = (x) => x) => { + let prevNodeIsText = false; + + element.childNodes.forEach((node) => { + let textTransform = baseTextTransform; + + const isText = node.nodeType === Node.TEXT_NODE || node.tagName === 'BR'; + if (isText) { + const text = node.tagName === 'BR' ? '\n' : textTransform(node.textContent.replace(/\s+/g, ' ')); + const prevRun = runs[runs.length - 1]; + if (prevNodeIsText && prevRun) { + prevRun.text += text; + } else { + runs.push({ text, options: { ...baseOptions } }); + } + + } else if (node.nodeType === Node.ELEMENT_NODE && node.textContent.trim()) { + const options = { ...baseOptions }; + const computed = window.getComputedStyle(node); + + // Handle inline elements with computed styles + if (node.tagName === 'SPAN' || node.tagName === 'B' || node.tagName === 'STRONG' || node.tagName === 'I' || node.tagName === 'EM' || node.tagName === 'U') { + const isBold = computed.fontWeight === 'bold' || parseInt(computed.fontWeight) >= 600; + if (isBold && !shouldSkipBold(computed.fontFamily)) options.bold = true; + if (computed.fontStyle === 'italic') options.italic = true; + if (computed.textDecoration && computed.textDecoration.includes('underline')) options.underline = true; + if (computed.color && computed.color !== 'rgb(0, 0, 0)') { + options.color = rgbToHex(computed.color); + const transparency = extractAlpha(computed.color); + if (transparency !== null) options.transparency = transparency; + } + if (computed.fontSize) options.fontSize = pxToPoints(computed.fontSize); + + // Apply text-transform on the span element itself + if (computed.textTransform && computed.textTransform !== 'none') { + const transformStr = computed.textTransform; + textTransform = (text) => applyTextTransform(text, transformStr); + } + + // Validate: Check for margins on inline elements + if (computed.marginLeft && parseFloat(computed.marginLeft) > 0) { + errors.push(`Inline element <${node.tagName.toLowerCase()}> has margin-left which is not supported in PowerPoint. Remove margin from inline elements.`); + } + if (computed.marginRight && parseFloat(computed.marginRight) > 0) { + errors.push(`Inline element <${node.tagName.toLowerCase()}> has margin-right which is not supported in PowerPoint. Remove margin from inline elements.`); + } + if (computed.marginTop && parseFloat(computed.marginTop) > 0) { + errors.push(`Inline element <${node.tagName.toLowerCase()}> has margin-top which is not supported in PowerPoint. Remove margin from inline elements.`); + } + if (computed.marginBottom && parseFloat(computed.marginBottom) > 0) { + errors.push(`Inline element <${node.tagName.toLowerCase()}> has margin-bottom which is not supported in PowerPoint. Remove margin from inline elements.`); + } + + // Recursively process the child node. This will flatten nested spans into multiple runs. + parseInlineFormatting(node, options, runs, textTransform); + } + } + + prevNodeIsText = isText; + }); + + // Trim leading space from first run and trailing space from last run + if (runs.length > 0) { + runs[0].text = runs[0].text.replace(/^\s+/, ''); + runs[runs.length - 1].text = runs[runs.length - 1].text.replace(/\s+$/, ''); + } + + return runs.filter(r => r.text.length > 0); + }; + + // Extract background from body (image or color) + const body = document.body; + const bodyStyle = window.getComputedStyle(body); + const bgImage = bodyStyle.backgroundImage; + const bgColor = bodyStyle.backgroundColor; + + // Collect validation errors + const errors = []; + + // Validate: Check for CSS gradients + if (bgImage && (bgImage.includes('linear-gradient') || bgImage.includes('radial-gradient'))) { + errors.push( + 'CSS gradients are not supported. Use Sharp to rasterize gradients as PNG images first, ' + + 'then reference with background-image: url(\'gradient.png\')' + ); + } + + let background; + if (bgImage && bgImage !== 'none') { + // Extract URL from url("...") or url(...) + const urlMatch = bgImage.match(/url\(["']?([^"')]+)["']?\)/); + if (urlMatch) { + background = { + type: 'image', + path: urlMatch[1] + }; + } else { + background = { + type: 'color', + value: rgbToHex(bgColor) + }; + } + } else { + background = { + type: 'color', + value: rgbToHex(bgColor) + }; + } + + // Process all elements + const elements = []; + const placeholders = []; + const textTags = ['P', 'H1', 'H2', 'H3', 'H4', 'H5', 'H6', 'UL', 'OL', 'LI']; + const processed = new Set(); + + document.querySelectorAll('*').forEach((el) => { + if (processed.has(el)) return; + + // Validate text elements don't have backgrounds, borders, or shadows + if (textTags.includes(el.tagName)) { + const computed = window.getComputedStyle(el); + const hasBg = computed.backgroundColor && computed.backgroundColor !== 'rgba(0, 0, 0, 0)'; + const hasBorder = (computed.borderWidth && parseFloat(computed.borderWidth) > 0) || + (computed.borderTopWidth && parseFloat(computed.borderTopWidth) > 0) || + (computed.borderRightWidth && parseFloat(computed.borderRightWidth) > 0) || + (computed.borderBottomWidth && parseFloat(computed.borderBottomWidth) > 0) || + (computed.borderLeftWidth && parseFloat(computed.borderLeftWidth) > 0); + const hasShadow = computed.boxShadow && computed.boxShadow !== 'none'; + + if (hasBg || hasBorder || hasShadow) { + errors.push( + `Text element <${el.tagName.toLowerCase()}> has ${hasBg ? 'background' : hasBorder ? 'border' : 'shadow'}. ` + + 'Backgrounds, borders, and shadows are only supported on
                      elements, not text elements.' + ); + return; + } + } + + // Extract placeholder elements (for charts, etc.) + if (el.className && el.className.includes('placeholder')) { + const rect = el.getBoundingClientRect(); + if (rect.width === 0 || rect.height === 0) { + errors.push( + `Placeholder "${el.id || 'unnamed'}" has ${rect.width === 0 ? 'width: 0' : 'height: 0'}. Check the layout CSS.` + ); + } else { + placeholders.push({ + id: el.id || `placeholder-${placeholders.length}`, + x: pxToInch(rect.left), + y: pxToInch(rect.top), + w: pxToInch(rect.width), + h: pxToInch(rect.height) + }); + } + processed.add(el); + return; + } + + // Extract images + if (el.tagName === 'IMG') { + const rect = el.getBoundingClientRect(); + if (rect.width > 0 && rect.height > 0) { + elements.push({ + type: 'image', + src: el.src, + position: { + x: pxToInch(rect.left), + y: pxToInch(rect.top), + w: pxToInch(rect.width), + h: pxToInch(rect.height) + } + }); + processed.add(el); + return; + } + } + + // Extract DIVs with backgrounds/borders as shapes + const isContainer = el.tagName === 'DIV' && !textTags.includes(el.tagName); + if (isContainer) { + const computed = window.getComputedStyle(el); + const hasBg = computed.backgroundColor && computed.backgroundColor !== 'rgba(0, 0, 0, 0)'; + + // Validate: Check for unwrapped text content in DIV + for (const node of el.childNodes) { + if (node.nodeType === Node.TEXT_NODE) { + const text = node.textContent.trim(); + if (text) { + errors.push( + `DIV element contains unwrapped text "${text.substring(0, 50)}${text.length > 50 ? '...' : ''}". ` + + 'All text must be wrapped in

                      ,

                      -

                      ,
                        , or
                          tags to appear in PowerPoint.' + ); + } + } + } + + // Check for background images on shapes + const bgImage = computed.backgroundImage; + if (bgImage && bgImage !== 'none') { + errors.push( + 'Background images on DIV elements are not supported. ' + + 'Use solid colors or borders for shapes, or use slide.addImage() in PptxGenJS to layer images.' + ); + return; + } + + // Check for borders - both uniform and partial + const borderTop = computed.borderTopWidth; + const borderRight = computed.borderRightWidth; + const borderBottom = computed.borderBottomWidth; + const borderLeft = computed.borderLeftWidth; + const borders = [borderTop, borderRight, borderBottom, borderLeft].map(b => parseFloat(b) || 0); + const hasBorder = borders.some(b => b > 0); + const hasUniformBorder = hasBorder && borders.every(b => b === borders[0]); + const borderLines = []; + + if (hasBorder && !hasUniformBorder) { + const rect = el.getBoundingClientRect(); + const x = pxToInch(rect.left); + const y = pxToInch(rect.top); + const w = pxToInch(rect.width); + const h = pxToInch(rect.height); + + // Collect lines to add after shape (inset by half the line width to center on edge) + if (parseFloat(borderTop) > 0) { + const widthPt = pxToPoints(borderTop); + const inset = (widthPt / 72) / 2; // Convert points to inches, then half + borderLines.push({ + type: 'line', + x1: x, y1: y + inset, x2: x + w, y2: y + inset, + width: widthPt, + color: rgbToHex(computed.borderTopColor) + }); + } + if (parseFloat(borderRight) > 0) { + const widthPt = pxToPoints(borderRight); + const inset = (widthPt / 72) / 2; + borderLines.push({ + type: 'line', + x1: x + w - inset, y1: y, x2: x + w - inset, y2: y + h, + width: widthPt, + color: rgbToHex(computed.borderRightColor) + }); + } + if (parseFloat(borderBottom) > 0) { + const widthPt = pxToPoints(borderBottom); + const inset = (widthPt / 72) / 2; + borderLines.push({ + type: 'line', + x1: x, y1: y + h - inset, x2: x + w, y2: y + h - inset, + width: widthPt, + color: rgbToHex(computed.borderBottomColor) + }); + } + if (parseFloat(borderLeft) > 0) { + const widthPt = pxToPoints(borderLeft); + const inset = (widthPt / 72) / 2; + borderLines.push({ + type: 'line', + x1: x + inset, y1: y, x2: x + inset, y2: y + h, + width: widthPt, + color: rgbToHex(computed.borderLeftColor) + }); + } + } + + if (hasBg || hasBorder) { + const rect = el.getBoundingClientRect(); + if (rect.width > 0 && rect.height > 0) { + const shadow = parseBoxShadow(computed.boxShadow); + + // Only add shape if there's background or uniform border + if (hasBg || hasUniformBorder) { + elements.push({ + type: 'shape', + text: '', // Shape only - child text elements render on top + position: { + x: pxToInch(rect.left), + y: pxToInch(rect.top), + w: pxToInch(rect.width), + h: pxToInch(rect.height) + }, + shape: { + fill: hasBg ? rgbToHex(computed.backgroundColor) : null, + transparency: hasBg ? extractAlpha(computed.backgroundColor) : null, + line: hasUniformBorder ? { + color: rgbToHex(computed.borderColor), + width: pxToPoints(computed.borderWidth) + } : null, + // Convert border-radius to rectRadius (in inches) + // % values: 50%+ = circle (1), <50% = percentage of min dimension + // pt values: divide by 72 (72pt = 1 inch) + // px values: divide by 96 (96px = 1 inch) + rectRadius: (() => { + const radius = computed.borderRadius; + const radiusValue = parseFloat(radius); + if (radiusValue === 0) return 0; + + if (radius.includes('%')) { + if (radiusValue >= 50) return 1; + // Calculate percentage of smaller dimension + const minDim = Math.min(rect.width, rect.height); + return (radiusValue / 100) * pxToInch(minDim); + } + + if (radius.includes('pt')) return radiusValue / 72; + return radiusValue / PX_PER_IN; + })(), + shadow: shadow + } + }); + } + + // Add partial border lines + elements.push(...borderLines); + + processed.add(el); + return; + } + } + } + + // Extract bullet lists as single text block + if (el.tagName === 'UL' || el.tagName === 'OL') { + const rect = el.getBoundingClientRect(); + if (rect.width === 0 || rect.height === 0) return; + + const liElements = Array.from(el.querySelectorAll('li')); + const items = []; + const ulComputed = window.getComputedStyle(el); + const ulPaddingLeftPt = pxToPoints(ulComputed.paddingLeft); + + // Split: margin-left for bullet position, indent for text position + // margin-left + indent = ul padding-left + const marginLeft = ulPaddingLeftPt * 0.5; + const textIndent = ulPaddingLeftPt * 0.5; + + liElements.forEach((li, idx) => { + const isLast = idx === liElements.length - 1; + const runs = parseInlineFormatting(li, { breakLine: false }); + // Clean manual bullets from first run + if (runs.length > 0) { + runs[0].text = runs[0].text.replace(/^[•\-\*▪▸]\s*/, ''); + runs[0].options.bullet = { indent: textIndent }; + } + // Set breakLine on last run + if (runs.length > 0 && !isLast) { + runs[runs.length - 1].options.breakLine = true; + } + items.push(...runs); + }); + + const computed = window.getComputedStyle(liElements[0] || el); + + elements.push({ + type: 'list', + items: items, + position: { + x: pxToInch(rect.left), + y: pxToInch(rect.top), + w: pxToInch(rect.width), + h: pxToInch(rect.height) + }, + style: { + fontSize: pxToPoints(computed.fontSize), + fontFace: computed.fontFamily.split(',')[0].replace(/['"]/g, '').trim(), + color: rgbToHex(computed.color), + transparency: extractAlpha(computed.color), + align: computed.textAlign === 'start' ? 'left' : computed.textAlign, + lineSpacing: computed.lineHeight && computed.lineHeight !== 'normal' ? pxToPoints(computed.lineHeight) : null, + paraSpaceBefore: 0, + paraSpaceAfter: pxToPoints(computed.marginBottom), + // PptxGenJS margin array is [left, right, bottom, top] + margin: [marginLeft, 0, 0, 0] + } + }); + + liElements.forEach(li => processed.add(li)); + processed.add(el); + return; + } + + // Extract text elements (P, H1, H2, etc.) + if (!textTags.includes(el.tagName)) return; + + const rect = el.getBoundingClientRect(); + const text = el.textContent.trim(); + if (rect.width === 0 || rect.height === 0 || !text) return; + + // Validate: Check for manual bullet symbols in text elements (not in lists) + if (el.tagName !== 'LI' && /^[•\-\*▪▸○●◆◇■□]\s/.test(text.trimStart())) { + errors.push( + `Text element <${el.tagName.toLowerCase()}> starts with bullet symbol "${text.substring(0, 20)}...". ` + + 'Use
                            or
                              lists instead of manual bullet symbols.' + ); + return; + } + + const computed = window.getComputedStyle(el); + const rotation = getRotation(computed.transform, computed.writingMode); + const { x, y, w, h } = getPositionAndSize(el, rect, rotation); + + const baseStyle = { + fontSize: pxToPoints(computed.fontSize), + fontFace: computed.fontFamily.split(',')[0].replace(/['"]/g, '').trim(), + color: rgbToHex(computed.color), + align: computed.textAlign === 'start' ? 'left' : computed.textAlign, + lineSpacing: pxToPoints(computed.lineHeight), + paraSpaceBefore: pxToPoints(computed.marginTop), + paraSpaceAfter: pxToPoints(computed.marginBottom), + // PptxGenJS margin array is [left, right, bottom, top] (not [top, right, bottom, left] as documented) + margin: [ + pxToPoints(computed.paddingLeft), + pxToPoints(computed.paddingRight), + pxToPoints(computed.paddingBottom), + pxToPoints(computed.paddingTop) + ] + }; + + const transparency = extractAlpha(computed.color); + if (transparency !== null) baseStyle.transparency = transparency; + + if (rotation !== null) baseStyle.rotate = rotation; + + const hasFormatting = el.querySelector('b, i, u, strong, em, span, br'); + + if (hasFormatting) { + // Text with inline formatting + const transformStr = computed.textTransform; + const runs = parseInlineFormatting(el, {}, [], (str) => applyTextTransform(str, transformStr)); + + // Adjust lineSpacing based on largest fontSize in runs + const adjustedStyle = { ...baseStyle }; + if (adjustedStyle.lineSpacing) { + const maxFontSize = Math.max( + adjustedStyle.fontSize, + ...runs.map(r => r.options?.fontSize || 0) + ); + if (maxFontSize > adjustedStyle.fontSize) { + const lineHeightMultiplier = adjustedStyle.lineSpacing / adjustedStyle.fontSize; + adjustedStyle.lineSpacing = maxFontSize * lineHeightMultiplier; + } + } + + elements.push({ + type: el.tagName.toLowerCase(), + text: runs, + position: { x: pxToInch(x), y: pxToInch(y), w: pxToInch(w), h: pxToInch(h) }, + style: adjustedStyle + }); + } else { + // Plain text - inherit CSS formatting + const textTransform = computed.textTransform; + const transformedText = applyTextTransform(text, textTransform); + + const isBold = computed.fontWeight === 'bold' || parseInt(computed.fontWeight) >= 600; + + elements.push({ + type: el.tagName.toLowerCase(), + text: transformedText, + position: { x: pxToInch(x), y: pxToInch(y), w: pxToInch(w), h: pxToInch(h) }, + style: { + ...baseStyle, + bold: isBold && !shouldSkipBold(computed.fontFamily), + italic: computed.fontStyle === 'italic', + underline: computed.textDecoration.includes('underline') + } + }); + } + + processed.add(el); + }); + + return { background, elements, placeholders, errors }; + }); +} + +async function html2pptx(htmlFile, pres, options = {}) { + const { + tmpDir = process.env.TMPDIR || '/tmp', + slide = null + } = options; + + try { + // Use Chrome on macOS, default Chromium on Unix + const launchOptions = { env: { TMPDIR: tmpDir } }; + if (process.platform === 'darwin') { + launchOptions.channel = 'chrome'; + } + + const browser = await chromium.launch(launchOptions); + + let bodyDimensions; + let slideData; + + const filePath = path.isAbsolute(htmlFile) ? htmlFile : path.join(process.cwd(), htmlFile); + const validationErrors = []; + + try { + const page = await browser.newPage(); + page.on('console', (msg) => { + // Log the message text to your test runner's console + console.log(`Browser console: ${msg.text()}`); + }); + + await page.goto(`file://${filePath}`); + + bodyDimensions = await getBodyDimensions(page); + + await page.setViewportSize({ + width: Math.round(bodyDimensions.width), + height: Math.round(bodyDimensions.height) + }); + + slideData = await extractSlideData(page); + } finally { + await browser.close(); + } + + // Collect all validation errors + if (bodyDimensions.errors && bodyDimensions.errors.length > 0) { + validationErrors.push(...bodyDimensions.errors); + } + + const dimensionErrors = validateDimensions(bodyDimensions, pres); + if (dimensionErrors.length > 0) { + validationErrors.push(...dimensionErrors); + } + + const textBoxPositionErrors = validateTextBoxPosition(slideData, bodyDimensions); + if (textBoxPositionErrors.length > 0) { + validationErrors.push(...textBoxPositionErrors); + } + + if (slideData.errors && slideData.errors.length > 0) { + validationErrors.push(...slideData.errors); + } + + // Throw all errors at once if any exist + if (validationErrors.length > 0) { + const errorMessage = validationErrors.length === 1 + ? validationErrors[0] + : `Multiple validation errors found:\n${validationErrors.map((e, i) => ` ${i + 1}. ${e}`).join('\n')}`; + throw new Error(errorMessage); + } + + const targetSlide = slide || pres.addSlide(); + + await addBackground(slideData, targetSlide, tmpDir); + addElements(slideData, targetSlide, pres); + + return { slide: targetSlide, placeholders: slideData.placeholders }; + } catch (error) { + if (!error.message.startsWith(htmlFile)) { + throw new Error(`${htmlFile}: ${error.message}`); + } + throw error; + } +} + +module.exports = html2pptx; \ No newline at end of file diff --git a/.claude/skills/pptx/scripts/inventory.py b/.claude/skills/pptx/scripts/inventory.py new file mode 100644 index 0000000..edda390 --- /dev/null +++ b/.claude/skills/pptx/scripts/inventory.py @@ -0,0 +1,1020 @@ +#!/usr/bin/env python3 +""" +Extract structured text content from PowerPoint presentations. + +This module provides functionality to: +- Extract all text content from PowerPoint shapes +- Preserve paragraph formatting (alignment, bullets, fonts, spacing) +- Handle nested GroupShapes recursively with correct absolute positions +- Sort shapes by visual position on slides +- Filter out slide numbers and non-content placeholders +- Export to JSON with clean, structured data + +Classes: + ParagraphData: Represents a text paragraph with formatting + ShapeData: Represents a shape with position and text content + +Main Functions: + extract_text_inventory: Extract all text from a presentation + save_inventory: Save extracted data to JSON + +Usage: + python inventory.py input.pptx output.json +""" + +import argparse +import json +import platform +import sys +from dataclasses import dataclass +from pathlib import Path +from typing import Any, Dict, List, Optional, Tuple, Union + +from PIL import Image, ImageDraw, ImageFont +from pptx import Presentation +from pptx.enum.text import PP_ALIGN +from pptx.shapes.base import BaseShape + +# Type aliases for cleaner signatures +JsonValue = Union[str, int, float, bool, None] +ParagraphDict = Dict[str, JsonValue] +ShapeDict = Dict[ + str, Union[str, float, bool, List[ParagraphDict], List[str], Dict[str, Any], None] +] +InventoryData = Dict[ + str, Dict[str, "ShapeData"] +] # Dict of slide_id -> {shape_id -> ShapeData} +InventoryDict = Dict[str, Dict[str, ShapeDict]] # JSON-serializable inventory + + +def main(): + """Main entry point for command-line usage.""" + parser = argparse.ArgumentParser( + description="Extract text inventory from PowerPoint with proper GroupShape support.", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + python inventory.py presentation.pptx inventory.json + Extracts text inventory with correct absolute positions for grouped shapes + + python inventory.py presentation.pptx inventory.json --issues-only + Extracts only text shapes that have overflow or overlap issues + +The output JSON includes: + - All text content organized by slide and shape + - Correct absolute positions for shapes in groups + - Visual position and size in inches + - Paragraph properties and formatting + - Issue detection: text overflow and shape overlaps + """, + ) + + parser.add_argument("input", help="Input PowerPoint file (.pptx)") + parser.add_argument("output", help="Output JSON file for inventory") + parser.add_argument( + "--issues-only", + action="store_true", + help="Include only text shapes that have overflow or overlap issues", + ) + + args = parser.parse_args() + + input_path = Path(args.input) + if not input_path.exists(): + print(f"Error: Input file not found: {args.input}") + sys.exit(1) + + if not input_path.suffix.lower() == ".pptx": + print("Error: Input must be a PowerPoint file (.pptx)") + sys.exit(1) + + try: + print(f"Extracting text inventory from: {args.input}") + if args.issues_only: + print( + "Filtering to include only text shapes with issues (overflow/overlap)" + ) + inventory = extract_text_inventory(input_path, issues_only=args.issues_only) + + output_path = Path(args.output) + output_path.parent.mkdir(parents=True, exist_ok=True) + save_inventory(inventory, output_path) + + print(f"Output saved to: {args.output}") + + # Report statistics + total_slides = len(inventory) + total_shapes = sum(len(shapes) for shapes in inventory.values()) + if args.issues_only: + if total_shapes > 0: + print( + f"Found {total_shapes} text elements with issues in {total_slides} slides" + ) + else: + print("No issues discovered") + else: + print( + f"Found text in {total_slides} slides with {total_shapes} text elements" + ) + + except Exception as e: + print(f"Error processing presentation: {e}") + import traceback + + traceback.print_exc() + sys.exit(1) + + +@dataclass +class ShapeWithPosition: + """A shape with its absolute position on the slide.""" + + shape: BaseShape + absolute_left: int # in EMUs + absolute_top: int # in EMUs + + +class ParagraphData: + """Data structure for paragraph properties extracted from a PowerPoint paragraph.""" + + def __init__(self, paragraph: Any): + """Initialize from a PowerPoint paragraph object. + + Args: + paragraph: The PowerPoint paragraph object + """ + self.text: str = paragraph.text.strip() + self.bullet: bool = False + self.level: Optional[int] = None + self.alignment: Optional[str] = None + self.space_before: Optional[float] = None + self.space_after: Optional[float] = None + self.font_name: Optional[str] = None + self.font_size: Optional[float] = None + self.bold: Optional[bool] = None + self.italic: Optional[bool] = None + self.underline: Optional[bool] = None + self.color: Optional[str] = None + self.theme_color: Optional[str] = None + self.line_spacing: Optional[float] = None + + # Check for bullet formatting + if ( + hasattr(paragraph, "_p") + and paragraph._p is not None + and paragraph._p.pPr is not None + ): + pPr = paragraph._p.pPr + ns = "{http://schemas.openxmlformats.org/drawingml/2006/main}" + if ( + pPr.find(f"{ns}buChar") is not None + or pPr.find(f"{ns}buAutoNum") is not None + ): + self.bullet = True + if hasattr(paragraph, "level"): + self.level = paragraph.level + + # Add alignment if not LEFT (default) + if hasattr(paragraph, "alignment") and paragraph.alignment is not None: + alignment_map = { + PP_ALIGN.CENTER: "CENTER", + PP_ALIGN.RIGHT: "RIGHT", + PP_ALIGN.JUSTIFY: "JUSTIFY", + } + if paragraph.alignment in alignment_map: + self.alignment = alignment_map[paragraph.alignment] + + # Add spacing properties if set + if hasattr(paragraph, "space_before") and paragraph.space_before: + self.space_before = paragraph.space_before.pt + if hasattr(paragraph, "space_after") and paragraph.space_after: + self.space_after = paragraph.space_after.pt + + # Extract font properties from first run + if paragraph.runs: + first_run = paragraph.runs[0] + if hasattr(first_run, "font"): + font = first_run.font + if font.name: + self.font_name = font.name + if font.size: + self.font_size = font.size.pt + if font.bold is not None: + self.bold = font.bold + if font.italic is not None: + self.italic = font.italic + if font.underline is not None: + self.underline = font.underline + + # Handle color - both RGB and theme colors + try: + # Try RGB color first + if font.color.rgb: + self.color = str(font.color.rgb) + except (AttributeError, TypeError): + # Fall back to theme color + try: + if font.color.theme_color: + self.theme_color = font.color.theme_color.name + except (AttributeError, TypeError): + pass + + # Add line spacing if set + if hasattr(paragraph, "line_spacing") and paragraph.line_spacing is not None: + if hasattr(paragraph.line_spacing, "pt"): + self.line_spacing = round(paragraph.line_spacing.pt, 2) + else: + # Multiplier - convert to points + font_size = self.font_size if self.font_size else 12.0 + self.line_spacing = round(paragraph.line_spacing * font_size, 2) + + def to_dict(self) -> ParagraphDict: + """Convert to dictionary for JSON serialization, excluding None values.""" + result: ParagraphDict = {"text": self.text} + + # Add optional fields only if they have values + if self.bullet: + result["bullet"] = self.bullet + if self.level is not None: + result["level"] = self.level + if self.alignment: + result["alignment"] = self.alignment + if self.space_before is not None: + result["space_before"] = self.space_before + if self.space_after is not None: + result["space_after"] = self.space_after + if self.font_name: + result["font_name"] = self.font_name + if self.font_size is not None: + result["font_size"] = self.font_size + if self.bold is not None: + result["bold"] = self.bold + if self.italic is not None: + result["italic"] = self.italic + if self.underline is not None: + result["underline"] = self.underline + if self.color: + result["color"] = self.color + if self.theme_color: + result["theme_color"] = self.theme_color + if self.line_spacing is not None: + result["line_spacing"] = self.line_spacing + + return result + + +class ShapeData: + """Data structure for shape properties extracted from a PowerPoint shape.""" + + @staticmethod + def emu_to_inches(emu: int) -> float: + """Convert EMUs (English Metric Units) to inches.""" + return emu / 914400.0 + + @staticmethod + def inches_to_pixels(inches: float, dpi: int = 96) -> int: + """Convert inches to pixels at given DPI.""" + return int(inches * dpi) + + @staticmethod + def get_font_path(font_name: str) -> Optional[str]: + """Get the font file path for a given font name. + + Args: + font_name: Name of the font (e.g., 'Arial', 'Calibri') + + Returns: + Path to the font file, or None if not found + """ + system = platform.system() + + # Common font file variations to try + font_variations = [ + font_name, + font_name.lower(), + font_name.replace(" ", ""), + font_name.replace(" ", "-"), + ] + + # Define font directories and extensions by platform + if system == "Darwin": # macOS + font_dirs = [ + "/System/Library/Fonts/", + "/Library/Fonts/", + "~/Library/Fonts/", + ] + extensions = [".ttf", ".otf", ".ttc", ".dfont"] + else: # Linux + font_dirs = [ + "/usr/share/fonts/truetype/", + "/usr/local/share/fonts/", + "~/.fonts/", + ] + extensions = [".ttf", ".otf"] + + # Try to find the font file + from pathlib import Path + + for font_dir in font_dirs: + font_dir_path = Path(font_dir).expanduser() + if not font_dir_path.exists(): + continue + + # First try exact matches + for variant in font_variations: + for ext in extensions: + font_path = font_dir_path / f"{variant}{ext}" + if font_path.exists(): + return str(font_path) + + # Then try fuzzy matching - find files containing the font name + try: + for file_path in font_dir_path.iterdir(): + if file_path.is_file(): + file_name_lower = file_path.name.lower() + font_name_lower = font_name.lower().replace(" ", "") + if font_name_lower in file_name_lower and any( + file_name_lower.endswith(ext) for ext in extensions + ): + return str(file_path) + except (OSError, PermissionError): + continue + + return None + + @staticmethod + def get_slide_dimensions(slide: Any) -> tuple[Optional[int], Optional[int]]: + """Get slide dimensions from slide object. + + Args: + slide: Slide object + + Returns: + Tuple of (width_emu, height_emu) or (None, None) if not found + """ + try: + prs = slide.part.package.presentation_part.presentation + return prs.slide_width, prs.slide_height + except (AttributeError, TypeError): + return None, None + + @staticmethod + def get_default_font_size(shape: BaseShape, slide_layout: Any) -> Optional[float]: + """Extract default font size from slide layout for a placeholder shape. + + Args: + shape: Placeholder shape + slide_layout: Slide layout containing the placeholder definition + + Returns: + Default font size in points, or None if not found + """ + try: + if not hasattr(shape, "placeholder_format"): + return None + + shape_type = shape.placeholder_format.type # type: ignore + for layout_placeholder in slide_layout.placeholders: + if layout_placeholder.placeholder_format.type == shape_type: + # Find first defRPr element with sz (size) attribute + for elem in layout_placeholder.element.iter(): + if "defRPr" in elem.tag and (sz := elem.get("sz")): + return float(sz) / 100.0 # Convert EMUs to points + break + except Exception: + pass + return None + + def __init__( + self, + shape: BaseShape, + absolute_left: Optional[int] = None, + absolute_top: Optional[int] = None, + slide: Optional[Any] = None, + ): + """Initialize from a PowerPoint shape object. + + Args: + shape: The PowerPoint shape object (should be pre-validated) + absolute_left: Absolute left position in EMUs (for shapes in groups) + absolute_top: Absolute top position in EMUs (for shapes in groups) + slide: Optional slide object to get dimensions and layout information + """ + self.shape = shape # Store reference to original shape + self.shape_id: str = "" # Will be set after sorting + + # Get slide dimensions from slide object + self.slide_width_emu, self.slide_height_emu = ( + self.get_slide_dimensions(slide) if slide else (None, None) + ) + + # Get placeholder type if applicable + self.placeholder_type: Optional[str] = None + self.default_font_size: Optional[float] = None + if hasattr(shape, "is_placeholder") and shape.is_placeholder: # type: ignore + if shape.placeholder_format and shape.placeholder_format.type: # type: ignore + self.placeholder_type = ( + str(shape.placeholder_format.type).split(".")[-1].split(" ")[0] # type: ignore + ) + + # Get default font size from layout + if slide and hasattr(slide, "slide_layout"): + self.default_font_size = self.get_default_font_size( + shape, slide.slide_layout + ) + + # Get position information + # Use absolute positions if provided (for shapes in groups), otherwise use shape's position + left_emu = ( + absolute_left + if absolute_left is not None + else (shape.left if hasattr(shape, "left") else 0) + ) + top_emu = ( + absolute_top + if absolute_top is not None + else (shape.top if hasattr(shape, "top") else 0) + ) + + self.left: float = round(self.emu_to_inches(left_emu), 2) # type: ignore + self.top: float = round(self.emu_to_inches(top_emu), 2) # type: ignore + self.width: float = round( + self.emu_to_inches(shape.width if hasattr(shape, "width") else 0), + 2, # type: ignore + ) + self.height: float = round( + self.emu_to_inches(shape.height if hasattr(shape, "height") else 0), + 2, # type: ignore + ) + + # Store EMU positions for overflow calculations + self.left_emu = left_emu + self.top_emu = top_emu + self.width_emu = shape.width if hasattr(shape, "width") else 0 + self.height_emu = shape.height if hasattr(shape, "height") else 0 + + # Calculate overflow status + self.frame_overflow_bottom: Optional[float] = None + self.slide_overflow_right: Optional[float] = None + self.slide_overflow_bottom: Optional[float] = None + self.overlapping_shapes: Dict[ + str, float + ] = {} # Dict of shape_id -> overlap area in sq inches + self.warnings: List[str] = [] + self._estimate_frame_overflow() + self._calculate_slide_overflow() + self._detect_bullet_issues() + + @property + def paragraphs(self) -> List[ParagraphData]: + """Calculate paragraphs from the shape's text frame.""" + if not self.shape or not hasattr(self.shape, "text_frame"): + return [] + + paragraphs = [] + for paragraph in self.shape.text_frame.paragraphs: # type: ignore + if paragraph.text.strip(): + paragraphs.append(ParagraphData(paragraph)) + return paragraphs + + def _get_default_font_size(self) -> int: + """Get default font size from theme text styles or use conservative default.""" + try: + if not ( + hasattr(self.shape, "part") and hasattr(self.shape.part, "slide_layout") + ): + return 14 + + slide_master = self.shape.part.slide_layout.slide_master # type: ignore + if not hasattr(slide_master, "element"): + return 14 + + # Determine theme style based on placeholder type + style_name = "bodyStyle" # Default + if self.placeholder_type and "TITLE" in self.placeholder_type: + style_name = "titleStyle" + + # Find font size in theme styles + for child in slide_master.element.iter(): + tag = child.tag.split("}")[-1] if "}" in child.tag else child.tag + if tag == style_name: + for elem in child.iter(): + if "sz" in elem.attrib: + return int(elem.attrib["sz"]) // 100 + except Exception: + pass + + return 14 # Conservative default for body text + + def _get_usable_dimensions(self, text_frame) -> Tuple[int, int]: + """Get usable width and height in pixels after accounting for margins.""" + # Default PowerPoint margins in inches + margins = {"top": 0.05, "bottom": 0.05, "left": 0.1, "right": 0.1} + + # Override with actual margins if set + if hasattr(text_frame, "margin_top") and text_frame.margin_top: + margins["top"] = self.emu_to_inches(text_frame.margin_top) + if hasattr(text_frame, "margin_bottom") and text_frame.margin_bottom: + margins["bottom"] = self.emu_to_inches(text_frame.margin_bottom) + if hasattr(text_frame, "margin_left") and text_frame.margin_left: + margins["left"] = self.emu_to_inches(text_frame.margin_left) + if hasattr(text_frame, "margin_right") and text_frame.margin_right: + margins["right"] = self.emu_to_inches(text_frame.margin_right) + + # Calculate usable area + usable_width = self.width - margins["left"] - margins["right"] + usable_height = self.height - margins["top"] - margins["bottom"] + + # Convert to pixels + return ( + self.inches_to_pixels(usable_width), + self.inches_to_pixels(usable_height), + ) + + def _wrap_text_line(self, line: str, max_width_px: int, draw, font) -> List[str]: + """Wrap a single line of text to fit within max_width_px.""" + if not line: + return [""] + + # Use textlength for efficient width calculation + if draw.textlength(line, font=font) <= max_width_px: + return [line] + + # Need to wrap - split into words + wrapped = [] + words = line.split(" ") + current_line = "" + + for word in words: + test_line = current_line + (" " if current_line else "") + word + if draw.textlength(test_line, font=font) <= max_width_px: + current_line = test_line + else: + if current_line: + wrapped.append(current_line) + current_line = word + + if current_line: + wrapped.append(current_line) + + return wrapped + + def _estimate_frame_overflow(self) -> None: + """Estimate if text overflows the shape bounds using PIL text measurement.""" + if not self.shape or not hasattr(self.shape, "text_frame"): + return + + text_frame = self.shape.text_frame # type: ignore + if not text_frame or not text_frame.paragraphs: + return + + # Get usable dimensions after accounting for margins + usable_width_px, usable_height_px = self._get_usable_dimensions(text_frame) + if usable_width_px <= 0 or usable_height_px <= 0: + return + + # Set up PIL for text measurement + dummy_img = Image.new("RGB", (1, 1)) + draw = ImageDraw.Draw(dummy_img) + + # Get default font size from placeholder or use conservative estimate + default_font_size = self._get_default_font_size() + + # Calculate total height of all paragraphs + total_height_px = 0 + + for para_idx, paragraph in enumerate(text_frame.paragraphs): + if not paragraph.text.strip(): + continue + + para_data = ParagraphData(paragraph) + + # Load font for this paragraph + font_name = para_data.font_name or "Arial" + font_size = int(para_data.font_size or default_font_size) + + font = None + font_path = self.get_font_path(font_name) + if font_path: + try: + font = ImageFont.truetype(font_path, size=font_size) + except Exception: + font = ImageFont.load_default() + else: + font = ImageFont.load_default() + + # Wrap all lines in this paragraph + all_wrapped_lines = [] + for line in paragraph.text.split("\n"): + wrapped = self._wrap_text_line(line, usable_width_px, draw, font) + all_wrapped_lines.extend(wrapped) + + if all_wrapped_lines: + # Calculate line height + if para_data.line_spacing: + # Custom line spacing explicitly set + line_height_px = para_data.line_spacing * 96 / 72 + else: + # PowerPoint default single spacing (1.0x font size) + line_height_px = font_size * 96 / 72 + + # Add space_before (except first paragraph) + if para_idx > 0 and para_data.space_before: + total_height_px += para_data.space_before * 96 / 72 + + # Add paragraph text height + total_height_px += len(all_wrapped_lines) * line_height_px + + # Add space_after + if para_data.space_after: + total_height_px += para_data.space_after * 96 / 72 + + # Check for overflow (ignore negligible overflows <= 0.05") + if total_height_px > usable_height_px: + overflow_px = total_height_px - usable_height_px + overflow_inches = round(overflow_px / 96.0, 2) + if overflow_inches > 0.05: # Only report significant overflows + self.frame_overflow_bottom = overflow_inches + + def _calculate_slide_overflow(self) -> None: + """Calculate if shape overflows the slide boundaries.""" + if self.slide_width_emu is None or self.slide_height_emu is None: + return + + # Check right overflow (ignore negligible overflows <= 0.01") + right_edge_emu = self.left_emu + self.width_emu + if right_edge_emu > self.slide_width_emu: + overflow_emu = right_edge_emu - self.slide_width_emu + overflow_inches = round(self.emu_to_inches(overflow_emu), 2) + if overflow_inches > 0.01: # Only report significant overflows + self.slide_overflow_right = overflow_inches + + # Check bottom overflow (ignore negligible overflows <= 0.01") + bottom_edge_emu = self.top_emu + self.height_emu + if bottom_edge_emu > self.slide_height_emu: + overflow_emu = bottom_edge_emu - self.slide_height_emu + overflow_inches = round(self.emu_to_inches(overflow_emu), 2) + if overflow_inches > 0.01: # Only report significant overflows + self.slide_overflow_bottom = overflow_inches + + def _detect_bullet_issues(self) -> None: + """Detect bullet point formatting issues in paragraphs.""" + if not self.shape or not hasattr(self.shape, "text_frame"): + return + + text_frame = self.shape.text_frame # type: ignore + if not text_frame or not text_frame.paragraphs: + return + + # Common bullet symbols that indicate manual bullets + bullet_symbols = ["•", "●", "○"] + + for paragraph in text_frame.paragraphs: + text = paragraph.text.strip() + # Check for manual bullet symbols + if text and any(text.startswith(symbol + " ") for symbol in bullet_symbols): + self.warnings.append( + "manual_bullet_symbol: use proper bullet formatting" + ) + break + + @property + def has_any_issues(self) -> bool: + """Check if shape has any issues (overflow, overlap, or warnings).""" + return ( + self.frame_overflow_bottom is not None + or self.slide_overflow_right is not None + or self.slide_overflow_bottom is not None + or len(self.overlapping_shapes) > 0 + or len(self.warnings) > 0 + ) + + def to_dict(self) -> ShapeDict: + """Convert to dictionary for JSON serialization.""" + result: ShapeDict = { + "left": self.left, + "top": self.top, + "width": self.width, + "height": self.height, + } + + # Add optional fields if present + if self.placeholder_type: + result["placeholder_type"] = self.placeholder_type + + if self.default_font_size: + result["default_font_size"] = self.default_font_size + + # Add overflow information only if there is overflow + overflow_data = {} + + # Add frame overflow if present + if self.frame_overflow_bottom is not None: + overflow_data["frame"] = {"overflow_bottom": self.frame_overflow_bottom} + + # Add slide overflow if present + slide_overflow = {} + if self.slide_overflow_right is not None: + slide_overflow["overflow_right"] = self.slide_overflow_right + if self.slide_overflow_bottom is not None: + slide_overflow["overflow_bottom"] = self.slide_overflow_bottom + if slide_overflow: + overflow_data["slide"] = slide_overflow + + # Only add overflow field if there is overflow + if overflow_data: + result["overflow"] = overflow_data + + # Add overlap field if there are overlapping shapes + if self.overlapping_shapes: + result["overlap"] = {"overlapping_shapes": self.overlapping_shapes} + + # Add warnings field if there are warnings + if self.warnings: + result["warnings"] = self.warnings + + # Add paragraphs after placeholder_type + result["paragraphs"] = [para.to_dict() for para in self.paragraphs] + + return result + + +def is_valid_shape(shape: BaseShape) -> bool: + """Check if a shape contains meaningful text content.""" + # Must have a text frame with content + if not hasattr(shape, "text_frame") or not shape.text_frame: # type: ignore + return False + + text = shape.text_frame.text.strip() # type: ignore + if not text: + return False + + # Skip slide numbers and numeric footers + if hasattr(shape, "is_placeholder") and shape.is_placeholder: # type: ignore + if shape.placeholder_format and shape.placeholder_format.type: # type: ignore + placeholder_type = ( + str(shape.placeholder_format.type).split(".")[-1].split(" ")[0] # type: ignore + ) + if placeholder_type == "SLIDE_NUMBER": + return False + if placeholder_type == "FOOTER" and text.isdigit(): + return False + + return True + + +def collect_shapes_with_absolute_positions( + shape: BaseShape, parent_left: int = 0, parent_top: int = 0 +) -> List[ShapeWithPosition]: + """Recursively collect all shapes with valid text, calculating absolute positions. + + For shapes within groups, their positions are relative to the group. + This function calculates the absolute position on the slide by accumulating + parent group offsets. + + Args: + shape: The shape to process + parent_left: Accumulated left offset from parent groups (in EMUs) + parent_top: Accumulated top offset from parent groups (in EMUs) + + Returns: + List of ShapeWithPosition objects with absolute positions + """ + if hasattr(shape, "shapes"): # GroupShape + result = [] + # Get this group's position + group_left = shape.left if hasattr(shape, "left") else 0 + group_top = shape.top if hasattr(shape, "top") else 0 + + # Calculate absolute position for this group + abs_group_left = parent_left + group_left + abs_group_top = parent_top + group_top + + # Process children with accumulated offsets + for child in shape.shapes: # type: ignore + result.extend( + collect_shapes_with_absolute_positions( + child, abs_group_left, abs_group_top + ) + ) + return result + + # Regular shape - check if it has valid text + if is_valid_shape(shape): + # Calculate absolute position + shape_left = shape.left if hasattr(shape, "left") else 0 + shape_top = shape.top if hasattr(shape, "top") else 0 + + return [ + ShapeWithPosition( + shape=shape, + absolute_left=parent_left + shape_left, + absolute_top=parent_top + shape_top, + ) + ] + + return [] + + +def sort_shapes_by_position(shapes: List[ShapeData]) -> List[ShapeData]: + """Sort shapes by visual position (top-to-bottom, left-to-right). + + Shapes within 0.5 inches vertically are considered on the same row. + """ + if not shapes: + return shapes + + # Sort by top position first + shapes = sorted(shapes, key=lambda s: (s.top, s.left)) + + # Group shapes by row (within 0.5 inches vertically) + result = [] + row = [shapes[0]] + row_top = shapes[0].top + + for shape in shapes[1:]: + if abs(shape.top - row_top) <= 0.5: + row.append(shape) + else: + # Sort current row by left position and add to result + result.extend(sorted(row, key=lambda s: s.left)) + row = [shape] + row_top = shape.top + + # Don't forget the last row + result.extend(sorted(row, key=lambda s: s.left)) + return result + + +def calculate_overlap( + rect1: Tuple[float, float, float, float], + rect2: Tuple[float, float, float, float], + tolerance: float = 0.05, +) -> Tuple[bool, float]: + """Calculate if and how much two rectangles overlap. + + Args: + rect1: (left, top, width, height) of first rectangle in inches + rect2: (left, top, width, height) of second rectangle in inches + tolerance: Minimum overlap in inches to consider as overlapping (default: 0.05") + + Returns: + Tuple of (overlaps, overlap_area) where: + - overlaps: True if rectangles overlap by more than tolerance + - overlap_area: Area of overlap in square inches + """ + left1, top1, w1, h1 = rect1 + left2, top2, w2, h2 = rect2 + + # Calculate overlap dimensions + overlap_width = min(left1 + w1, left2 + w2) - max(left1, left2) + overlap_height = min(top1 + h1, top2 + h2) - max(top1, top2) + + # Check if there's meaningful overlap (more than tolerance) + if overlap_width > tolerance and overlap_height > tolerance: + # Calculate overlap area in square inches + overlap_area = overlap_width * overlap_height + return True, round(overlap_area, 2) + + return False, 0 + + +def detect_overlaps(shapes: List[ShapeData]) -> None: + """Detect overlapping shapes and update their overlapping_shapes dictionaries. + + This function requires each ShapeData to have its shape_id already set. + It modifies the shapes in-place, adding shape IDs with overlap areas in square inches. + + Args: + shapes: List of ShapeData objects with shape_id attributes set + """ + n = len(shapes) + + # Compare each pair of shapes + for i in range(n): + for j in range(i + 1, n): + shape1 = shapes[i] + shape2 = shapes[j] + + # Ensure shape IDs are set + assert shape1.shape_id, f"Shape at index {i} has no shape_id" + assert shape2.shape_id, f"Shape at index {j} has no shape_id" + + rect1 = (shape1.left, shape1.top, shape1.width, shape1.height) + rect2 = (shape2.left, shape2.top, shape2.width, shape2.height) + + overlaps, overlap_area = calculate_overlap(rect1, rect2) + + if overlaps: + # Add shape IDs with overlap area in square inches + shape1.overlapping_shapes[shape2.shape_id] = overlap_area + shape2.overlapping_shapes[shape1.shape_id] = overlap_area + + +def extract_text_inventory( + pptx_path: Path, prs: Optional[Any] = None, issues_only: bool = False +) -> InventoryData: + """Extract text content from all slides in a PowerPoint presentation. + + Args: + pptx_path: Path to the PowerPoint file + prs: Optional Presentation object to use. If not provided, will load from pptx_path. + issues_only: If True, only include shapes that have overflow or overlap issues + + Returns a nested dictionary: {slide-N: {shape-N: ShapeData}} + Shapes are sorted by visual position (top-to-bottom, left-to-right). + The ShapeData objects contain the full shape information and can be + converted to dictionaries for JSON serialization using to_dict(). + """ + if prs is None: + prs = Presentation(str(pptx_path)) + inventory: InventoryData = {} + + for slide_idx, slide in enumerate(prs.slides): + # Collect all valid shapes from this slide with absolute positions + shapes_with_positions = [] + for shape in slide.shapes: # type: ignore + shapes_with_positions.extend(collect_shapes_with_absolute_positions(shape)) + + if not shapes_with_positions: + continue + + # Convert to ShapeData with absolute positions and slide reference + shape_data_list = [ + ShapeData( + swp.shape, + swp.absolute_left, + swp.absolute_top, + slide, + ) + for swp in shapes_with_positions + ] + + # Sort by visual position and assign stable IDs in one step + sorted_shapes = sort_shapes_by_position(shape_data_list) + for idx, shape_data in enumerate(sorted_shapes): + shape_data.shape_id = f"shape-{idx}" + + # Detect overlaps using the stable shape IDs + if len(sorted_shapes) > 1: + detect_overlaps(sorted_shapes) + + # Filter for issues only if requested (after overlap detection) + if issues_only: + sorted_shapes = [sd for sd in sorted_shapes if sd.has_any_issues] + + if not sorted_shapes: + continue + + # Create slide inventory using the stable shape IDs + inventory[f"slide-{slide_idx}"] = { + shape_data.shape_id: shape_data for shape_data in sorted_shapes + } + + return inventory + + +def get_inventory_as_dict(pptx_path: Path, issues_only: bool = False) -> InventoryDict: + """Extract text inventory and return as JSON-serializable dictionaries. + + This is a convenience wrapper around extract_text_inventory that returns + dictionaries instead of ShapeData objects, useful for testing and direct + JSON serialization. + + Args: + pptx_path: Path to the PowerPoint file + issues_only: If True, only include shapes that have overflow or overlap issues + + Returns: + Nested dictionary with all data serialized for JSON + """ + inventory = extract_text_inventory(pptx_path, issues_only=issues_only) + + # Convert ShapeData objects to dictionaries + dict_inventory: InventoryDict = {} + for slide_key, shapes in inventory.items(): + dict_inventory[slide_key] = { + shape_key: shape_data.to_dict() for shape_key, shape_data in shapes.items() + } + + return dict_inventory + + +def save_inventory(inventory: InventoryData, output_path: Path) -> None: + """Save inventory to JSON file with proper formatting. + + Converts ShapeData objects to dictionaries for JSON serialization. + """ + # Convert ShapeData objects to dictionaries + json_inventory: InventoryDict = {} + for slide_key, shapes in inventory.items(): + json_inventory[slide_key] = { + shape_key: shape_data.to_dict() for shape_key, shape_data in shapes.items() + } + + with open(output_path, "w", encoding="utf-8") as f: + json.dump(json_inventory, f, indent=2, ensure_ascii=False) + + +if __name__ == "__main__": + main() diff --git a/.claude/skills/pptx/scripts/rearrange.py b/.claude/skills/pptx/scripts/rearrange.py new file mode 100644 index 0000000..2519911 --- /dev/null +++ b/.claude/skills/pptx/scripts/rearrange.py @@ -0,0 +1,231 @@ +#!/usr/bin/env python3 +""" +Rearrange PowerPoint slides based on a sequence of indices. + +Usage: + python rearrange.py template.pptx output.pptx 0,34,34,50,52 + +This will create output.pptx using slides from template.pptx in the specified order. +Slides can be repeated (e.g., 34 appears twice). +""" + +import argparse +import shutil +import sys +from copy import deepcopy +from pathlib import Path + +import six +from pptx import Presentation + + +def main(): + parser = argparse.ArgumentParser( + description="Rearrange PowerPoint slides based on a sequence of indices.", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + python rearrange.py template.pptx output.pptx 0,34,34,50,52 + Creates output.pptx using slides 0, 34 (twice), 50, and 52 from template.pptx + + python rearrange.py template.pptx output.pptx 5,3,1,2,4 + Creates output.pptx with slides reordered as specified + +Note: Slide indices are 0-based (first slide is 0, second is 1, etc.) + """, + ) + + parser.add_argument("template", help="Path to template PPTX file") + parser.add_argument("output", help="Path for output PPTX file") + parser.add_argument( + "sequence", help="Comma-separated sequence of slide indices (0-based)" + ) + + args = parser.parse_args() + + # Parse the slide sequence + try: + slide_sequence = [int(x.strip()) for x in args.sequence.split(",")] + except ValueError: + print( + "Error: Invalid sequence format. Use comma-separated integers (e.g., 0,34,34,50,52)" + ) + sys.exit(1) + + # Check template exists + template_path = Path(args.template) + if not template_path.exists(): + print(f"Error: Template file not found: {args.template}") + sys.exit(1) + + # Create output directory if needed + output_path = Path(args.output) + output_path.parent.mkdir(parents=True, exist_ok=True) + + try: + rearrange_presentation(template_path, output_path, slide_sequence) + except ValueError as e: + print(f"Error: {e}") + sys.exit(1) + except Exception as e: + print(f"Error processing presentation: {e}") + sys.exit(1) + + +def duplicate_slide(pres, index): + """Duplicate a slide in the presentation.""" + source = pres.slides[index] + + # Use source's layout to preserve formatting + new_slide = pres.slides.add_slide(source.slide_layout) + + # Collect all image and media relationships from the source slide + image_rels = {} + for rel_id, rel in six.iteritems(source.part.rels): + if "image" in rel.reltype or "media" in rel.reltype: + image_rels[rel_id] = rel + + # CRITICAL: Clear placeholder shapes to avoid duplicates + for shape in new_slide.shapes: + sp = shape.element + sp.getparent().remove(sp) + + # Copy all shapes from source + for shape in source.shapes: + el = shape.element + new_el = deepcopy(el) + new_slide.shapes._spTree.insert_element_before(new_el, "p:extLst") + + # Handle picture shapes - need to update the blip reference + # Look for all blip elements (they can be in pic or other contexts) + # Using the element's own xpath method without namespaces argument + blips = new_el.xpath(".//a:blip[@r:embed]") + for blip in blips: + old_rId = blip.get( + "{http://schemas.openxmlformats.org/officeDocument/2006/relationships}embed" + ) + if old_rId in image_rels: + # Create a new relationship in the destination slide for this image + old_rel = image_rels[old_rId] + # get_or_add returns the rId directly, or adds and returns new rId + new_rId = new_slide.part.rels.get_or_add( + old_rel.reltype, old_rel._target + ) + # Update the blip's embed reference to use the new relationship ID + blip.set( + "{http://schemas.openxmlformats.org/officeDocument/2006/relationships}embed", + new_rId, + ) + + # Copy any additional image/media relationships that might be referenced elsewhere + for rel_id, rel in image_rels.items(): + try: + new_slide.part.rels.get_or_add(rel.reltype, rel._target) + except Exception: + pass # Relationship might already exist + + return new_slide + + +def delete_slide(pres, index): + """Delete a slide from the presentation.""" + rId = pres.slides._sldIdLst[index].rId + pres.part.drop_rel(rId) + del pres.slides._sldIdLst[index] + + +def reorder_slides(pres, slide_index, target_index): + """Move a slide from one position to another.""" + slides = pres.slides._sldIdLst + + # Remove slide element from current position + slide_element = slides[slide_index] + slides.remove(slide_element) + + # Insert at target position + slides.insert(target_index, slide_element) + + +def rearrange_presentation(template_path, output_path, slide_sequence): + """ + Create a new presentation with slides from template in specified order. + + Args: + template_path: Path to template PPTX file + output_path: Path for output PPTX file + slide_sequence: List of slide indices (0-based) to include + """ + # Copy template to preserve dimensions and theme + if template_path != output_path: + shutil.copy2(template_path, output_path) + prs = Presentation(output_path) + else: + prs = Presentation(template_path) + + total_slides = len(prs.slides) + + # Validate indices + for idx in slide_sequence: + if idx < 0 or idx >= total_slides: + raise ValueError(f"Slide index {idx} out of range (0-{total_slides - 1})") + + # Track original slides and their duplicates + slide_map = [] # List of actual slide indices for final presentation + duplicated = {} # Track duplicates: original_idx -> [duplicate_indices] + + # Step 1: DUPLICATE repeated slides + print(f"Processing {len(slide_sequence)} slides from template...") + for i, template_idx in enumerate(slide_sequence): + if template_idx in duplicated and duplicated[template_idx]: + # Already duplicated this slide, use the duplicate + slide_map.append(duplicated[template_idx].pop(0)) + print(f" [{i}] Using duplicate of slide {template_idx}") + elif slide_sequence.count(template_idx) > 1 and template_idx not in duplicated: + # First occurrence of a repeated slide - create duplicates + slide_map.append(template_idx) + duplicates = [] + count = slide_sequence.count(template_idx) - 1 + print( + f" [{i}] Using original slide {template_idx}, creating {count} duplicate(s)" + ) + for _ in range(count): + duplicate_slide(prs, template_idx) + duplicates.append(len(prs.slides) - 1) + duplicated[template_idx] = duplicates + else: + # Unique slide or first occurrence already handled, use original + slide_map.append(template_idx) + print(f" [{i}] Using original slide {template_idx}") + + # Step 2: DELETE unwanted slides (work backwards) + slides_to_keep = set(slide_map) + print(f"\nDeleting {len(prs.slides) - len(slides_to_keep)} unused slides...") + for i in range(len(prs.slides) - 1, -1, -1): + if i not in slides_to_keep: + delete_slide(prs, i) + # Update slide_map indices after deletion + slide_map = [idx - 1 if idx > i else idx for idx in slide_map] + + # Step 3: REORDER to final sequence + print(f"Reordering {len(slide_map)} slides to final sequence...") + for target_pos in range(len(slide_map)): + # Find which slide should be at target_pos + current_pos = slide_map[target_pos] + if current_pos != target_pos: + reorder_slides(prs, current_pos, target_pos) + # Update slide_map: the move shifts other slides + for i in range(len(slide_map)): + if slide_map[i] > current_pos and slide_map[i] <= target_pos: + slide_map[i] -= 1 + elif slide_map[i] < current_pos and slide_map[i] >= target_pos: + slide_map[i] += 1 + slide_map[target_pos] = target_pos + + # Save the presentation + prs.save(output_path) + print(f"\nSaved rearranged presentation to: {output_path}") + print(f"Final presentation has {len(prs.slides)} slides") + + +if __name__ == "__main__": + main() diff --git a/.claude/skills/pptx/scripts/replace.py b/.claude/skills/pptx/scripts/replace.py new file mode 100644 index 0000000..8f7a8b1 --- /dev/null +++ b/.claude/skills/pptx/scripts/replace.py @@ -0,0 +1,385 @@ +#!/usr/bin/env python3 +"""Apply text replacements to PowerPoint presentation. + +Usage: + python replace.py + +The replacements JSON should have the structure output by inventory.py. +ALL text shapes identified by inventory.py will have their text cleared +unless "paragraphs" is specified in the replacements for that shape. +""" + +import json +import sys +from pathlib import Path +from typing import Any, Dict, List + +from inventory import InventoryData, extract_text_inventory +from pptx import Presentation +from pptx.dml.color import RGBColor +from pptx.enum.dml import MSO_THEME_COLOR +from pptx.enum.text import PP_ALIGN +from pptx.oxml.xmlchemy import OxmlElement +from pptx.util import Pt + + +def clear_paragraph_bullets(paragraph): + """Clear bullet formatting from a paragraph.""" + pPr = paragraph._element.get_or_add_pPr() + + # Remove existing bullet elements + for child in list(pPr): + if ( + child.tag.endswith("buChar") + or child.tag.endswith("buNone") + or child.tag.endswith("buAutoNum") + or child.tag.endswith("buFont") + ): + pPr.remove(child) + + return pPr + + +def apply_paragraph_properties(paragraph, para_data: Dict[str, Any]): + """Apply formatting properties to a paragraph.""" + # Get the text but don't set it on paragraph directly yet + text = para_data.get("text", "") + + # Get or create paragraph properties + pPr = clear_paragraph_bullets(paragraph) + + # Handle bullet formatting + if para_data.get("bullet", False): + level = para_data.get("level", 0) + paragraph.level = level + + # Calculate font-proportional indentation + font_size = para_data.get("font_size", 18.0) + level_indent_emu = int((font_size * (1.6 + level * 1.6)) * 12700) + hanging_indent_emu = int(-font_size * 0.8 * 12700) + + # Set indentation + pPr.attrib["marL"] = str(level_indent_emu) + pPr.attrib["indent"] = str(hanging_indent_emu) + + # Add bullet character + buChar = OxmlElement("a:buChar") + buChar.set("char", "•") + pPr.append(buChar) + + # Default to left alignment for bullets if not specified + if "alignment" not in para_data: + paragraph.alignment = PP_ALIGN.LEFT + else: + # Remove indentation for non-bullet text + pPr.attrib["marL"] = "0" + pPr.attrib["indent"] = "0" + + # Add buNone element + buNone = OxmlElement("a:buNone") + pPr.insert(0, buNone) + + # Apply alignment + if "alignment" in para_data: + alignment_map = { + "LEFT": PP_ALIGN.LEFT, + "CENTER": PP_ALIGN.CENTER, + "RIGHT": PP_ALIGN.RIGHT, + "JUSTIFY": PP_ALIGN.JUSTIFY, + } + if para_data["alignment"] in alignment_map: + paragraph.alignment = alignment_map[para_data["alignment"]] + + # Apply spacing + if "space_before" in para_data: + paragraph.space_before = Pt(para_data["space_before"]) + if "space_after" in para_data: + paragraph.space_after = Pt(para_data["space_after"]) + if "line_spacing" in para_data: + paragraph.line_spacing = Pt(para_data["line_spacing"]) + + # Apply run-level formatting + if not paragraph.runs: + run = paragraph.add_run() + run.text = text + else: + run = paragraph.runs[0] + run.text = text + + # Apply font properties + apply_font_properties(run, para_data) + + +def apply_font_properties(run, para_data: Dict[str, Any]): + """Apply font properties to a text run.""" + if "bold" in para_data: + run.font.bold = para_data["bold"] + if "italic" in para_data: + run.font.italic = para_data["italic"] + if "underline" in para_data: + run.font.underline = para_data["underline"] + if "font_size" in para_data: + run.font.size = Pt(para_data["font_size"]) + if "font_name" in para_data: + run.font.name = para_data["font_name"] + + # Apply color - prefer RGB, fall back to theme_color + if "color" in para_data: + color_hex = para_data["color"].lstrip("#") + if len(color_hex) == 6: + r = int(color_hex[0:2], 16) + g = int(color_hex[2:4], 16) + b = int(color_hex[4:6], 16) + run.font.color.rgb = RGBColor(r, g, b) + elif "theme_color" in para_data: + # Get theme color by name (e.g., "DARK_1", "ACCENT_1") + theme_name = para_data["theme_color"] + try: + run.font.color.theme_color = getattr(MSO_THEME_COLOR, theme_name) + except AttributeError: + print(f" WARNING: Unknown theme color name '{theme_name}'") + + +def detect_frame_overflow(inventory: InventoryData) -> Dict[str, Dict[str, float]]: + """Detect text overflow in shapes (text exceeding shape bounds). + + Returns dict of slide_key -> shape_key -> overflow_inches. + Only includes shapes that have text overflow. + """ + overflow_map = {} + + for slide_key, shapes_dict in inventory.items(): + for shape_key, shape_data in shapes_dict.items(): + # Check for frame overflow (text exceeding shape bounds) + if shape_data.frame_overflow_bottom is not None: + if slide_key not in overflow_map: + overflow_map[slide_key] = {} + overflow_map[slide_key][shape_key] = shape_data.frame_overflow_bottom + + return overflow_map + + +def validate_replacements(inventory: InventoryData, replacements: Dict) -> List[str]: + """Validate that all shapes in replacements exist in inventory. + + Returns list of error messages. + """ + errors = [] + + for slide_key, shapes_data in replacements.items(): + if not slide_key.startswith("slide-"): + continue + + # Check if slide exists + if slide_key not in inventory: + errors.append(f"Slide '{slide_key}' not found in inventory") + continue + + # Check each shape + for shape_key in shapes_data.keys(): + if shape_key not in inventory[slide_key]: + # Find shapes without replacements defined and show their content + unused_with_content = [] + for k in inventory[slide_key].keys(): + if k not in shapes_data: + shape_data = inventory[slide_key][k] + # Get text from paragraphs as preview + paragraphs = shape_data.paragraphs + if paragraphs and paragraphs[0].text: + first_text = paragraphs[0].text[:50] + if len(paragraphs[0].text) > 50: + first_text += "..." + unused_with_content.append(f"{k} ('{first_text}')") + else: + unused_with_content.append(k) + + errors.append( + f"Shape '{shape_key}' not found on '{slide_key}'. " + f"Shapes without replacements: {', '.join(sorted(unused_with_content)) if unused_with_content else 'none'}" + ) + + return errors + + +def check_duplicate_keys(pairs): + """Check for duplicate keys when loading JSON.""" + result = {} + for key, value in pairs: + if key in result: + raise ValueError(f"Duplicate key found in JSON: '{key}'") + result[key] = value + return result + + +def apply_replacements(pptx_file: str, json_file: str, output_file: str): + """Apply text replacements from JSON to PowerPoint presentation.""" + + # Load presentation + prs = Presentation(pptx_file) + + # Get inventory of all text shapes (returns ShapeData objects) + # Pass prs to use same Presentation instance + inventory = extract_text_inventory(Path(pptx_file), prs) + + # Detect text overflow in original presentation + original_overflow = detect_frame_overflow(inventory) + + # Load replacement data with duplicate key detection + with open(json_file, "r") as f: + replacements = json.load(f, object_pairs_hook=check_duplicate_keys) + + # Validate replacements + errors = validate_replacements(inventory, replacements) + if errors: + print("ERROR: Invalid shapes in replacement JSON:") + for error in errors: + print(f" - {error}") + print("\nPlease check the inventory and update your replacement JSON.") + print( + "You can regenerate the inventory with: python inventory.py " + ) + raise ValueError(f"Found {len(errors)} validation error(s)") + + # Track statistics + shapes_processed = 0 + shapes_cleared = 0 + shapes_replaced = 0 + + # Process each slide from inventory + for slide_key, shapes_dict in inventory.items(): + if not slide_key.startswith("slide-"): + continue + + slide_index = int(slide_key.split("-")[1]) + + if slide_index >= len(prs.slides): + print(f"Warning: Slide {slide_index} not found") + continue + + # Process each shape from inventory + for shape_key, shape_data in shapes_dict.items(): + shapes_processed += 1 + + # Get the shape directly from ShapeData + shape = shape_data.shape + if not shape: + print(f"Warning: {shape_key} has no shape reference") + continue + + # ShapeData already validates text_frame in __init__ + text_frame = shape.text_frame # type: ignore + + text_frame.clear() # type: ignore + shapes_cleared += 1 + + # Check for replacement paragraphs + replacement_shape_data = replacements.get(slide_key, {}).get(shape_key, {}) + if "paragraphs" not in replacement_shape_data: + continue + + shapes_replaced += 1 + + # Add replacement paragraphs + for i, para_data in enumerate(replacement_shape_data["paragraphs"]): + if i == 0: + p = text_frame.paragraphs[0] # type: ignore + else: + p = text_frame.add_paragraph() # type: ignore + + apply_paragraph_properties(p, para_data) + + # Check for issues after replacements + # Save to a temporary file and reload to avoid modifying the presentation during inventory + # (extract_text_inventory accesses font.color which adds empty elements) + import tempfile + + with tempfile.NamedTemporaryFile(suffix=".pptx", delete=False) as tmp: + tmp_path = Path(tmp.name) + prs.save(str(tmp_path)) + + try: + updated_inventory = extract_text_inventory(tmp_path) + updated_overflow = detect_frame_overflow(updated_inventory) + finally: + tmp_path.unlink() # Clean up temp file + + # Check if any text overflow got worse + overflow_errors = [] + for slide_key, shape_overflows in updated_overflow.items(): + for shape_key, new_overflow in shape_overflows.items(): + # Get original overflow (0 if there was no overflow before) + original = original_overflow.get(slide_key, {}).get(shape_key, 0.0) + + # Error if overflow increased + if new_overflow > original + 0.01: # Small tolerance for rounding + increase = new_overflow - original + overflow_errors.append( + f'{slide_key}/{shape_key}: overflow worsened by {increase:.2f}" ' + f'(was {original:.2f}", now {new_overflow:.2f}")' + ) + + # Collect warnings from updated shapes + warnings = [] + for slide_key, shapes_dict in updated_inventory.items(): + for shape_key, shape_data in shapes_dict.items(): + if shape_data.warnings: + for warning in shape_data.warnings: + warnings.append(f"{slide_key}/{shape_key}: {warning}") + + # Fail if there are any issues + if overflow_errors or warnings: + print("\nERROR: Issues detected in replacement output:") + if overflow_errors: + print("\nText overflow worsened:") + for error in overflow_errors: + print(f" - {error}") + if warnings: + print("\nFormatting warnings:") + for warning in warnings: + print(f" - {warning}") + print("\nPlease fix these issues before saving.") + raise ValueError( + f"Found {len(overflow_errors)} overflow error(s) and {len(warnings)} warning(s)" + ) + + # Save the presentation + prs.save(output_file) + + # Report results + print(f"Saved updated presentation to: {output_file}") + print(f"Processed {len(prs.slides)} slides") + print(f" - Shapes processed: {shapes_processed}") + print(f" - Shapes cleared: {shapes_cleared}") + print(f" - Shapes replaced: {shapes_replaced}") + + +def main(): + """Main entry point for command-line usage.""" + if len(sys.argv) != 4: + print(__doc__) + sys.exit(1) + + input_pptx = Path(sys.argv[1]) + replacements_json = Path(sys.argv[2]) + output_pptx = Path(sys.argv[3]) + + if not input_pptx.exists(): + print(f"Error: Input file '{input_pptx}' not found") + sys.exit(1) + + if not replacements_json.exists(): + print(f"Error: Replacements JSON file '{replacements_json}' not found") + sys.exit(1) + + try: + apply_replacements(str(input_pptx), str(replacements_json), str(output_pptx)) + except Exception as e: + print(f"Error applying replacements: {e}") + import traceback + + traceback.print_exc() + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/.claude/skills/pptx/scripts/thumbnail.py b/.claude/skills/pptx/scripts/thumbnail.py new file mode 100644 index 0000000..5c7fdf1 --- /dev/null +++ b/.claude/skills/pptx/scripts/thumbnail.py @@ -0,0 +1,450 @@ +#!/usr/bin/env python3 +""" +Create thumbnail grids from PowerPoint presentation slides. + +Creates a grid layout of slide thumbnails with configurable columns (max 6). +Each grid contains up to cols×(cols+1) images. For presentations with more +slides, multiple numbered grid files are created automatically. + +The program outputs the names of all files created. + +Output: +- Single grid: {prefix}.jpg (if slides fit in one grid) +- Multiple grids: {prefix}-1.jpg, {prefix}-2.jpg, etc. + +Grid limits by column count: +- 3 cols: max 12 slides per grid (3×4) +- 4 cols: max 20 slides per grid (4×5) +- 5 cols: max 30 slides per grid (5×6) [default] +- 6 cols: max 42 slides per grid (6×7) + +Usage: + python thumbnail.py input.pptx [output_prefix] [--cols N] [--outline-placeholders] + +Examples: + python thumbnail.py presentation.pptx + # Creates: thumbnails.jpg (using default prefix) + # Outputs: + # Created 1 grid(s): + # - thumbnails.jpg + + python thumbnail.py large-deck.pptx grid --cols 4 + # Creates: grid-1.jpg, grid-2.jpg, grid-3.jpg + # Outputs: + # Created 3 grid(s): + # - grid-1.jpg + # - grid-2.jpg + # - grid-3.jpg + + python thumbnail.py template.pptx analysis --outline-placeholders + # Creates thumbnail grids with red outlines around text placeholders +""" + +import argparse +import subprocess +import sys +import tempfile +from pathlib import Path + +from inventory import extract_text_inventory +from PIL import Image, ImageDraw, ImageFont +from pptx import Presentation + +# Constants +THUMBNAIL_WIDTH = 300 # Fixed thumbnail width in pixels +CONVERSION_DPI = 100 # DPI for PDF to image conversion +MAX_COLS = 6 # Maximum number of columns +DEFAULT_COLS = 5 # Default number of columns +JPEG_QUALITY = 95 # JPEG compression quality + +# Grid layout constants +GRID_PADDING = 20 # Padding between thumbnails +BORDER_WIDTH = 2 # Border width around thumbnails +FONT_SIZE_RATIO = 0.12 # Font size as fraction of thumbnail width +LABEL_PADDING_RATIO = 0.4 # Label padding as fraction of font size + + +def main(): + parser = argparse.ArgumentParser( + description="Create thumbnail grids from PowerPoint slides." + ) + parser.add_argument("input", help="Input PowerPoint file (.pptx)") + parser.add_argument( + "output_prefix", + nargs="?", + default="thumbnails", + help="Output prefix for image files (default: thumbnails, will create prefix.jpg or prefix-N.jpg)", + ) + parser.add_argument( + "--cols", + type=int, + default=DEFAULT_COLS, + help=f"Number of columns (default: {DEFAULT_COLS}, max: {MAX_COLS})", + ) + parser.add_argument( + "--outline-placeholders", + action="store_true", + help="Outline text placeholders with a colored border", + ) + + args = parser.parse_args() + + # Validate columns + cols = min(args.cols, MAX_COLS) + if args.cols > MAX_COLS: + print(f"Warning: Columns limited to {MAX_COLS} (requested {args.cols})") + + # Validate input + input_path = Path(args.input) + if not input_path.exists() or input_path.suffix.lower() != ".pptx": + print(f"Error: Invalid PowerPoint file: {args.input}") + sys.exit(1) + + # Construct output path (always JPG) + output_path = Path(f"{args.output_prefix}.jpg") + + print(f"Processing: {args.input}") + + try: + with tempfile.TemporaryDirectory() as temp_dir: + # Get placeholder regions if outlining is enabled + placeholder_regions = None + slide_dimensions = None + if args.outline_placeholders: + print("Extracting placeholder regions...") + placeholder_regions, slide_dimensions = get_placeholder_regions( + input_path + ) + if placeholder_regions: + print(f"Found placeholders on {len(placeholder_regions)} slides") + + # Convert slides to images + slide_images = convert_to_images(input_path, Path(temp_dir), CONVERSION_DPI) + if not slide_images: + print("Error: No slides found") + sys.exit(1) + + print(f"Found {len(slide_images)} slides") + + # Create grids (max cols×(cols+1) images per grid) + grid_files = create_grids( + slide_images, + cols, + THUMBNAIL_WIDTH, + output_path, + placeholder_regions, + slide_dimensions, + ) + + # Print saved files + print(f"Created {len(grid_files)} grid(s):") + for grid_file in grid_files: + print(f" - {grid_file}") + + except Exception as e: + print(f"Error: {e}") + sys.exit(1) + + +def create_hidden_slide_placeholder(size): + """Create placeholder image for hidden slides.""" + img = Image.new("RGB", size, color="#F0F0F0") + draw = ImageDraw.Draw(img) + line_width = max(5, min(size) // 100) + draw.line([(0, 0), size], fill="#CCCCCC", width=line_width) + draw.line([(size[0], 0), (0, size[1])], fill="#CCCCCC", width=line_width) + return img + + +def get_placeholder_regions(pptx_path): + """Extract ALL text regions from the presentation. + + Returns a tuple of (placeholder_regions, slide_dimensions). + text_regions is a dict mapping slide indices to lists of text regions. + Each region is a dict with 'left', 'top', 'width', 'height' in inches. + slide_dimensions is a tuple of (width_inches, height_inches). + """ + prs = Presentation(str(pptx_path)) + inventory = extract_text_inventory(pptx_path, prs) + placeholder_regions = {} + + # Get actual slide dimensions in inches (EMU to inches conversion) + slide_width_inches = (prs.slide_width or 9144000) / 914400.0 + slide_height_inches = (prs.slide_height or 5143500) / 914400.0 + + for slide_key, shapes in inventory.items(): + # Extract slide index from "slide-N" format + slide_idx = int(slide_key.split("-")[1]) + regions = [] + + for shape_key, shape_data in shapes.items(): + # The inventory only contains shapes with text, so all shapes should be highlighted + regions.append( + { + "left": shape_data.left, + "top": shape_data.top, + "width": shape_data.width, + "height": shape_data.height, + } + ) + + if regions: + placeholder_regions[slide_idx] = regions + + return placeholder_regions, (slide_width_inches, slide_height_inches) + + +def convert_to_images(pptx_path, temp_dir, dpi): + """Convert PowerPoint to images via PDF, handling hidden slides.""" + # Detect hidden slides + print("Analyzing presentation...") + prs = Presentation(str(pptx_path)) + total_slides = len(prs.slides) + + # Find hidden slides (1-based indexing for display) + hidden_slides = { + idx + 1 + for idx, slide in enumerate(prs.slides) + if slide.element.get("show") == "0" + } + + print(f"Total slides: {total_slides}") + if hidden_slides: + print(f"Hidden slides: {sorted(hidden_slides)}") + + pdf_path = temp_dir / f"{pptx_path.stem}.pdf" + + # Convert to PDF + print("Converting to PDF...") + result = subprocess.run( + [ + "soffice", + "--headless", + "--convert-to", + "pdf", + "--outdir", + str(temp_dir), + str(pptx_path), + ], + capture_output=True, + text=True, + ) + if result.returncode != 0 or not pdf_path.exists(): + raise RuntimeError("PDF conversion failed") + + # Convert PDF to images + print(f"Converting to images at {dpi} DPI...") + result = subprocess.run( + ["pdftoppm", "-jpeg", "-r", str(dpi), str(pdf_path), str(temp_dir / "slide")], + capture_output=True, + text=True, + ) + if result.returncode != 0: + raise RuntimeError("Image conversion failed") + + visible_images = sorted(temp_dir.glob("slide-*.jpg")) + + # Create full list with placeholders for hidden slides + all_images = [] + visible_idx = 0 + + # Get placeholder dimensions from first visible slide + if visible_images: + with Image.open(visible_images[0]) as img: + placeholder_size = img.size + else: + placeholder_size = (1920, 1080) + + for slide_num in range(1, total_slides + 1): + if slide_num in hidden_slides: + # Create placeholder image for hidden slide + placeholder_path = temp_dir / f"hidden-{slide_num:03d}.jpg" + placeholder_img = create_hidden_slide_placeholder(placeholder_size) + placeholder_img.save(placeholder_path, "JPEG") + all_images.append(placeholder_path) + else: + # Use the actual visible slide image + if visible_idx < len(visible_images): + all_images.append(visible_images[visible_idx]) + visible_idx += 1 + + return all_images + + +def create_grids( + image_paths, + cols, + width, + output_path, + placeholder_regions=None, + slide_dimensions=None, +): + """Create multiple thumbnail grids from slide images, max cols×(cols+1) images per grid.""" + # Maximum images per grid is cols × (cols + 1) for better proportions + max_images_per_grid = cols * (cols + 1) + grid_files = [] + + print( + f"Creating grids with {cols} columns (max {max_images_per_grid} images per grid)" + ) + + # Split images into chunks + for chunk_idx, start_idx in enumerate( + range(0, len(image_paths), max_images_per_grid) + ): + end_idx = min(start_idx + max_images_per_grid, len(image_paths)) + chunk_images = image_paths[start_idx:end_idx] + + # Create grid for this chunk + grid = create_grid( + chunk_images, cols, width, start_idx, placeholder_regions, slide_dimensions + ) + + # Generate output filename + if len(image_paths) <= max_images_per_grid: + # Single grid - use base filename without suffix + grid_filename = output_path + else: + # Multiple grids - insert index before extension with dash + stem = output_path.stem + suffix = output_path.suffix + grid_filename = output_path.parent / f"{stem}-{chunk_idx + 1}{suffix}" + + # Save grid + grid_filename.parent.mkdir(parents=True, exist_ok=True) + grid.save(str(grid_filename), quality=JPEG_QUALITY) + grid_files.append(str(grid_filename)) + + return grid_files + + +def create_grid( + image_paths, + cols, + width, + start_slide_num=0, + placeholder_regions=None, + slide_dimensions=None, +): + """Create thumbnail grid from slide images with optional placeholder outlining.""" + font_size = int(width * FONT_SIZE_RATIO) + label_padding = int(font_size * LABEL_PADDING_RATIO) + + # Get dimensions + with Image.open(image_paths[0]) as img: + aspect = img.height / img.width + height = int(width * aspect) + + # Calculate grid size + rows = (len(image_paths) + cols - 1) // cols + grid_w = cols * width + (cols + 1) * GRID_PADDING + grid_h = rows * (height + font_size + label_padding * 2) + (rows + 1) * GRID_PADDING + + # Create grid + grid = Image.new("RGB", (grid_w, grid_h), "white") + draw = ImageDraw.Draw(grid) + + # Load font with size based on thumbnail width + try: + # Use Pillow's default font with size + font = ImageFont.load_default(size=font_size) + except Exception: + # Fall back to basic default font if size parameter not supported + font = ImageFont.load_default() + + # Place thumbnails + for i, img_path in enumerate(image_paths): + row, col = i // cols, i % cols + x = col * width + (col + 1) * GRID_PADDING + y_base = ( + row * (height + font_size + label_padding * 2) + (row + 1) * GRID_PADDING + ) + + # Add label with actual slide number + label = f"{start_slide_num + i}" + bbox = draw.textbbox((0, 0), label, font=font) + text_w = bbox[2] - bbox[0] + draw.text( + (x + (width - text_w) // 2, y_base + label_padding), + label, + fill="black", + font=font, + ) + + # Add thumbnail below label with proportional spacing + y_thumbnail = y_base + label_padding + font_size + label_padding + + with Image.open(img_path) as img: + # Get original dimensions before thumbnail + orig_w, orig_h = img.size + + # Apply placeholder outlines if enabled + if placeholder_regions and (start_slide_num + i) in placeholder_regions: + # Convert to RGBA for transparency support + if img.mode != "RGBA": + img = img.convert("RGBA") + + # Get the regions for this slide + regions = placeholder_regions[start_slide_num + i] + + # Calculate scale factors using actual slide dimensions + if slide_dimensions: + slide_width_inches, slide_height_inches = slide_dimensions + else: + # Fallback: estimate from image size at CONVERSION_DPI + slide_width_inches = orig_w / CONVERSION_DPI + slide_height_inches = orig_h / CONVERSION_DPI + + x_scale = orig_w / slide_width_inches + y_scale = orig_h / slide_height_inches + + # Create a highlight overlay + overlay = Image.new("RGBA", img.size, (255, 255, 255, 0)) + overlay_draw = ImageDraw.Draw(overlay) + + # Highlight each placeholder region + for region in regions: + # Convert from inches to pixels in the original image + px_left = int(region["left"] * x_scale) + px_top = int(region["top"] * y_scale) + px_width = int(region["width"] * x_scale) + px_height = int(region["height"] * y_scale) + + # Draw highlight outline with red color and thick stroke + # Using a bright red outline instead of fill + stroke_width = max( + 5, min(orig_w, orig_h) // 150 + ) # Thicker proportional stroke width + overlay_draw.rectangle( + [(px_left, px_top), (px_left + px_width, px_top + px_height)], + outline=(255, 0, 0, 255), # Bright red, fully opaque + width=stroke_width, + ) + + # Composite the overlay onto the image using alpha blending + img = Image.alpha_composite(img, overlay) + # Convert back to RGB for JPEG saving + img = img.convert("RGB") + + img.thumbnail((width, height), Image.Resampling.LANCZOS) + w, h = img.size + tx = x + (width - w) // 2 + ty = y_thumbnail + (height - h) // 2 + grid.paste(img, (tx, ty)) + + # Add border + if BORDER_WIDTH > 0: + draw.rectangle( + [ + (tx - BORDER_WIDTH, ty - BORDER_WIDTH), + (tx + w + BORDER_WIDTH - 1, ty + h + BORDER_WIDTH - 1), + ], + outline="gray", + width=BORDER_WIDTH, + ) + + return grid + + +if __name__ == "__main__": + main() diff --git a/.claude/skills/skill-creator/LICENSE.txt b/.claude/skills/skill-creator/LICENSE.txt new file mode 100644 index 0000000..7a4a3ea --- /dev/null +++ b/.claude/skills/skill-creator/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/.claude/skills/skill-creator/SKILL.md b/.claude/skills/skill-creator/SKILL.md new file mode 100644 index 0000000..b7f8659 --- /dev/null +++ b/.claude/skills/skill-creator/SKILL.md @@ -0,0 +1,356 @@ +--- +name: skill-creator +description: Guide for creating effective skills. This skill should be used when users want to create a new skill (or update an existing skill) that extends Claude's capabilities with specialized knowledge, workflows, or tool integrations. +license: Complete terms in LICENSE.txt +--- + +# Skill Creator + +This skill provides guidance for creating effective skills. + +## About Skills + +Skills are modular, self-contained packages that extend Claude's capabilities by providing +specialized knowledge, workflows, and tools. Think of them as "onboarding guides" for specific +domains or tasks—they transform Claude from a general-purpose agent into a specialized agent +equipped with procedural knowledge that no model can fully possess. + +### What Skills Provide + +1. Specialized workflows - Multi-step procedures for specific domains +2. Tool integrations - Instructions for working with specific file formats or APIs +3. Domain expertise - Company-specific knowledge, schemas, business logic +4. Bundled resources - Scripts, references, and assets for complex and repetitive tasks + +## Core Principles + +### Concise is Key + +The context window is a public good. Skills share the context window with everything else Claude needs: system prompt, conversation history, other Skills' metadata, and the actual user request. + +**Default assumption: Claude is already very smart.** Only add context Claude doesn't already have. Challenge each piece of information: "Does Claude really need this explanation?" and "Does this paragraph justify its token cost?" + +Prefer concise examples over verbose explanations. + +### Set Appropriate Degrees of Freedom + +Match the level of specificity to the task's fragility and variability: + +**High freedom (text-based instructions)**: Use when multiple approaches are valid, decisions depend on context, or heuristics guide the approach. + +**Medium freedom (pseudocode or scripts with parameters)**: Use when a preferred pattern exists, some variation is acceptable, or configuration affects behavior. + +**Low freedom (specific scripts, few parameters)**: Use when operations are fragile and error-prone, consistency is critical, or a specific sequence must be followed. + +Think of Claude as exploring a path: a narrow bridge with cliffs needs specific guardrails (low freedom), while an open field allows many routes (high freedom). + +### Anatomy of a Skill + +Every skill consists of a required SKILL.md file and optional bundled resources: + +``` +skill-name/ +├── SKILL.md (required) +│ ├── YAML frontmatter metadata (required) +│ │ ├── name: (required) +│ │ └── description: (required) +│ └── Markdown instructions (required) +└── Bundled Resources (optional) + ├── scripts/ - Executable code (Python/Bash/etc.) + ├── references/ - Documentation intended to be loaded into context as needed + └── assets/ - Files used in output (templates, icons, fonts, etc.) +``` + +#### SKILL.md (required) + +Every SKILL.md consists of: + +- **Frontmatter** (YAML): Contains `name` and `description` fields. These are the only fields that Claude reads to determine when the skill gets used, thus it is very important to be clear and comprehensive in describing what the skill is, and when it should be used. +- **Body** (Markdown): Instructions and guidance for using the skill. Only loaded AFTER the skill triggers (if at all). + +#### Bundled Resources (optional) + +##### Scripts (`scripts/`) + +Executable code (Python/Bash/etc.) for tasks that require deterministic reliability or are repeatedly rewritten. + +- **When to include**: When the same code is being rewritten repeatedly or deterministic reliability is needed +- **Example**: `scripts/rotate_pdf.py` for PDF rotation tasks +- **Benefits**: Token efficient, deterministic, may be executed without loading into context +- **Note**: Scripts may still need to be read by Claude for patching or environment-specific adjustments + +##### References (`references/`) + +Documentation and reference material intended to be loaded as needed into context to inform Claude's process and thinking. + +- **When to include**: For documentation that Claude should reference while working +- **Examples**: `references/finance.md` for financial schemas, `references/mnda.md` for company NDA template, `references/policies.md` for company policies, `references/api_docs.md` for API specifications +- **Use cases**: Database schemas, API documentation, domain knowledge, company policies, detailed workflow guides +- **Benefits**: Keeps SKILL.md lean, loaded only when Claude determines it's needed +- **Best practice**: If files are large (>10k words), include grep search patterns in SKILL.md +- **Avoid duplication**: Information should live in either SKILL.md or references files, not both. Prefer references files for detailed information unless it's truly core to the skill—this keeps SKILL.md lean while making information discoverable without hogging the context window. Keep only essential procedural instructions and workflow guidance in SKILL.md; move detailed reference material, schemas, and examples to references files. + +##### Assets (`assets/`) + +Files not intended to be loaded into context, but rather used within the output Claude produces. + +- **When to include**: When the skill needs files that will be used in the final output +- **Examples**: `assets/logo.png` for brand assets, `assets/slides.pptx` for PowerPoint templates, `assets/frontend-template/` for HTML/React boilerplate, `assets/font.ttf` for typography +- **Use cases**: Templates, images, icons, boilerplate code, fonts, sample documents that get copied or modified +- **Benefits**: Separates output resources from documentation, enables Claude to use files without loading them into context + +#### What to Not Include in a Skill + +A skill should only contain essential files that directly support its functionality. Do NOT create extraneous documentation or auxiliary files, including: + +- README.md +- INSTALLATION_GUIDE.md +- QUICK_REFERENCE.md +- CHANGELOG.md +- etc. + +The skill should only contain the information needed for an AI agent to do the job at hand. It should not contain auxilary context about the process that went into creating it, setup and testing procedures, user-facing documentation, etc. Creating additional documentation files just adds clutter and confusion. + +### Progressive Disclosure Design Principle + +Skills use a three-level loading system to manage context efficiently: + +1. **Metadata (name + description)** - Always in context (~100 words) +2. **SKILL.md body** - When skill triggers (<5k words) +3. **Bundled resources** - As needed by Claude (Unlimited because scripts can be executed without reading into context window) + +#### Progressive Disclosure Patterns + +Keep SKILL.md body to the essentials and under 500 lines to minimize context bloat. Split content into separate files when approaching this limit. When splitting out content into other files, it is very important to reference them from SKILL.md and describe clearly when to read them, to ensure the reader of the skill knows they exist and when to use them. + +**Key principle:** When a skill supports multiple variations, frameworks, or options, keep only the core workflow and selection guidance in SKILL.md. Move variant-specific details (patterns, examples, configuration) into separate reference files. + +**Pattern 1: High-level guide with references** + +```markdown +# PDF Processing + +## Quick start + +Extract text with pdfplumber: +[code example] + +## Advanced features + +- **Form filling**: See [FORMS.md](FORMS.md) for complete guide +- **API reference**: See [REFERENCE.md](REFERENCE.md) for all methods +- **Examples**: See [EXAMPLES.md](EXAMPLES.md) for common patterns +``` + +Claude loads FORMS.md, REFERENCE.md, or EXAMPLES.md only when needed. + +**Pattern 2: Domain-specific organization** + +For Skills with multiple domains, organize content by domain to avoid loading irrelevant context: + +``` +bigquery-skill/ +├── SKILL.md (overview and navigation) +└── reference/ + ├── finance.md (revenue, billing metrics) + ├── sales.md (opportunities, pipeline) + ├── product.md (API usage, features) + └── marketing.md (campaigns, attribution) +``` + +When a user asks about sales metrics, Claude only reads sales.md. + +Similarly, for skills supporting multiple frameworks or variants, organize by variant: + +``` +cloud-deploy/ +├── SKILL.md (workflow + provider selection) +└── references/ + ├── aws.md (AWS deployment patterns) + ├── gcp.md (GCP deployment patterns) + └── azure.md (Azure deployment patterns) +``` + +When the user chooses AWS, Claude only reads aws.md. + +**Pattern 3: Conditional details** + +Show basic content, link to advanced content: + +```markdown +# DOCX Processing + +## Creating documents + +Use docx-js for new documents. See [DOCX-JS.md](DOCX-JS.md). + +## Editing documents + +For simple edits, modify the XML directly. + +**For tracked changes**: See [REDLINING.md](REDLINING.md) +**For OOXML details**: See [OOXML.md](OOXML.md) +``` + +Claude reads REDLINING.md or OOXML.md only when the user needs those features. + +**Important guidelines:** + +- **Avoid deeply nested references** - Keep references one level deep from SKILL.md. All reference files should link directly from SKILL.md. +- **Structure longer reference files** - For files longer than 100 lines, include a table of contents at the top so Claude can see the full scope when previewing. + +## Skill Creation Process + +Skill creation involves these steps: + +1. Understand the skill with concrete examples +2. Plan reusable skill contents (scripts, references, assets) +3. Initialize the skill (run init_skill.py) +4. Edit the skill (implement resources and write SKILL.md) +5. Package the skill (run package_skill.py) +6. Iterate based on real usage + +Follow these steps in order, skipping only if there is a clear reason why they are not applicable. + +### Step 1: Understanding the Skill with Concrete Examples + +Skip this step only when the skill's usage patterns are already clearly understood. It remains valuable even when working with an existing skill. + +To create an effective skill, clearly understand concrete examples of how the skill will be used. This understanding can come from either direct user examples or generated examples that are validated with user feedback. + +For example, when building an image-editor skill, relevant questions include: + +- "What functionality should the image-editor skill support? Editing, rotating, anything else?" +- "Can you give some examples of how this skill would be used?" +- "I can imagine users asking for things like 'Remove the red-eye from this image' or 'Rotate this image'. Are there other ways you imagine this skill being used?" +- "What would a user say that should trigger this skill?" + +To avoid overwhelming users, avoid asking too many questions in a single message. Start with the most important questions and follow up as needed for better effectiveness. + +Conclude this step when there is a clear sense of the functionality the skill should support. + +### Step 2: Planning the Reusable Skill Contents + +To turn concrete examples into an effective skill, analyze each example by: + +1. Considering how to execute on the example from scratch +2. Identifying what scripts, references, and assets would be helpful when executing these workflows repeatedly + +Example: When building a `pdf-editor` skill to handle queries like "Help me rotate this PDF," the analysis shows: + +1. Rotating a PDF requires re-writing the same code each time +2. A `scripts/rotate_pdf.py` script would be helpful to store in the skill + +Example: When designing a `frontend-webapp-builder` skill for queries like "Build me a todo app" or "Build me a dashboard to track my steps," the analysis shows: + +1. Writing a frontend webapp requires the same boilerplate HTML/React each time +2. An `assets/hello-world/` template containing the boilerplate HTML/React project files would be helpful to store in the skill + +Example: When building a `big-query` skill to handle queries like "How many users have logged in today?" the analysis shows: + +1. Querying BigQuery requires re-discovering the table schemas and relationships each time +2. A `references/schema.md` file documenting the table schemas would be helpful to store in the skill + +To establish the skill's contents, analyze each concrete example to create a list of the reusable resources to include: scripts, references, and assets. + +### Step 3: Initializing the Skill + +At this point, it is time to actually create the skill. + +Skip this step only if the skill being developed already exists, and iteration or packaging is needed. In this case, continue to the next step. + +When creating a new skill from scratch, always run the `init_skill.py` script. The script conveniently generates a new template skill directory that automatically includes everything a skill requires, making the skill creation process much more efficient and reliable. + +Usage: + +```bash +scripts/init_skill.py --path +``` + +The script: + +- Creates the skill directory at the specified path +- Generates a SKILL.md template with proper frontmatter and TODO placeholders +- Creates example resource directories: `scripts/`, `references/`, and `assets/` +- Adds example files in each directory that can be customized or deleted + +After initialization, customize or remove the generated SKILL.md and example files as needed. + +### Step 4: Edit the Skill + +When editing the (newly-generated or existing) skill, remember that the skill is being created for another instance of Claude to use. Include information that would be beneficial and non-obvious to Claude. Consider what procedural knowledge, domain-specific details, or reusable assets would help another Claude instance execute these tasks more effectively. + +#### Learn Proven Design Patterns + +Consult these helpful guides based on your skill's needs: + +- **Multi-step processes**: See references/workflows.md for sequential workflows and conditional logic +- **Specific output formats or quality standards**: See references/output-patterns.md for template and example patterns + +These files contain established best practices for effective skill design. + +#### Start with Reusable Skill Contents + +To begin implementation, start with the reusable resources identified above: `scripts/`, `references/`, and `assets/` files. Note that this step may require user input. For example, when implementing a `brand-guidelines` skill, the user may need to provide brand assets or templates to store in `assets/`, or documentation to store in `references/`. + +Added scripts must be tested by actually running them to ensure there are no bugs and that the output matches what is expected. If there are many similar scripts, only a representative sample needs to be tested to ensure confidence that they all work while balancing time to completion. + +Any example files and directories not needed for the skill should be deleted. The initialization script creates example files in `scripts/`, `references/`, and `assets/` to demonstrate structure, but most skills won't need all of them. + +#### Update SKILL.md + +**Writing Guidelines:** Always use imperative/infinitive form. + +##### Frontmatter + +Write the YAML frontmatter with `name` and `description`: + +- `name`: The skill name +- `description`: This is the primary triggering mechanism for your skill, and helps Claude understand when to use the skill. + - Include both what the Skill does and specific triggers/contexts for when to use it. + - Include all "when to use" information here - Not in the body. The body is only loaded after triggering, so "When to Use This Skill" sections in the body are not helpful to Claude. + - Example description for a `docx` skill: "Comprehensive document creation, editing, and analysis with support for tracked changes, comments, formatting preservation, and text extraction. Use when Claude needs to work with professional documents (.docx files) for: (1) Creating new documents, (2) Modifying or editing content, (3) Working with tracked changes, (4) Adding comments, or any other document tasks" + +Do not include any other fields in YAML frontmatter. + +##### Body + +Write instructions for using the skill and its bundled resources. + +### Step 5: Packaging a Skill + +Once development of the skill is complete, it must be packaged into a distributable .skill file that gets shared with the user. The packaging process automatically validates the skill first to ensure it meets all requirements: + +```bash +scripts/package_skill.py +``` + +Optional output directory specification: + +```bash +scripts/package_skill.py ./dist +``` + +The packaging script will: + +1. **Validate** the skill automatically, checking: + + - YAML frontmatter format and required fields + - Skill naming conventions and directory structure + - Description completeness and quality + - File organization and resource references + +2. **Package** the skill if validation passes, creating a .skill file named after the skill (e.g., `my-skill.skill`) that includes all files and maintains the proper directory structure for distribution. The .skill file is a zip file with a .skill extension. + +If validation fails, the script will report the errors and exit without creating a package. Fix any validation errors and run the packaging command again. + +### Step 6: Iterate + +After testing the skill, users may request improvements. Often this happens right after using the skill, with fresh context of how the skill performed. + +**Iteration workflow:** + +1. Use the skill on real tasks +2. Notice struggles or inefficiencies +3. Identify how SKILL.md or bundled resources should be updated +4. Implement changes and test again diff --git a/.claude/skills/skill-creator/references/output-patterns.md b/.claude/skills/skill-creator/references/output-patterns.md new file mode 100644 index 0000000..073ddda --- /dev/null +++ b/.claude/skills/skill-creator/references/output-patterns.md @@ -0,0 +1,82 @@ +# Output Patterns + +Use these patterns when skills need to produce consistent, high-quality output. + +## Template Pattern + +Provide templates for output format. Match the level of strictness to your needs. + +**For strict requirements (like API responses or data formats):** + +```markdown +## Report structure + +ALWAYS use this exact template structure: + +# [Analysis Title] + +## Executive summary +[One-paragraph overview of key findings] + +## Key findings +- Finding 1 with supporting data +- Finding 2 with supporting data +- Finding 3 with supporting data + +## Recommendations +1. Specific actionable recommendation +2. Specific actionable recommendation +``` + +**For flexible guidance (when adaptation is useful):** + +```markdown +## Report structure + +Here is a sensible default format, but use your best judgment: + +# [Analysis Title] + +## Executive summary +[Overview] + +## Key findings +[Adapt sections based on what you discover] + +## Recommendations +[Tailor to the specific context] + +Adjust sections as needed for the specific analysis type. +``` + +## Examples Pattern + +For skills where output quality depends on seeing examples, provide input/output pairs: + +```markdown +## Commit message format + +Generate commit messages following these examples: + +**Example 1:** +Input: Added user authentication with JWT tokens +Output: +``` +feat(auth): implement JWT-based authentication + +Add login endpoint and token validation middleware +``` + +**Example 2:** +Input: Fixed bug where dates displayed incorrectly in reports +Output: +``` +fix(reports): correct date formatting in timezone conversion + +Use UTC timestamps consistently across report generation +``` + +Follow this style: type(scope): brief description, then detailed explanation. +``` + +Examples help Claude understand the desired style and level of detail more clearly than descriptions alone. diff --git a/.claude/skills/skill-creator/references/workflows.md b/.claude/skills/skill-creator/references/workflows.md new file mode 100644 index 0000000..a350c3c --- /dev/null +++ b/.claude/skills/skill-creator/references/workflows.md @@ -0,0 +1,28 @@ +# Workflow Patterns + +## Sequential Workflows + +For complex tasks, break operations into clear, sequential steps. It is often helpful to give Claude an overview of the process towards the beginning of SKILL.md: + +```markdown +Filling a PDF form involves these steps: + +1. Analyze the form (run analyze_form.py) +2. Create field mapping (edit fields.json) +3. Validate mapping (run validate_fields.py) +4. Fill the form (run fill_form.py) +5. Verify output (run verify_output.py) +``` + +## Conditional Workflows + +For tasks with branching logic, guide Claude through decision points: + +```markdown +1. Determine the modification type: + **Creating new content?** → Follow "Creation workflow" below + **Editing existing content?** → Follow "Editing workflow" below + +2. Creation workflow: [steps] +3. Editing workflow: [steps] +``` \ No newline at end of file diff --git a/.claude/skills/skill-creator/scripts/init_skill.py b/.claude/skills/skill-creator/scripts/init_skill.py new file mode 100644 index 0000000..329ad4e --- /dev/null +++ b/.claude/skills/skill-creator/scripts/init_skill.py @@ -0,0 +1,303 @@ +#!/usr/bin/env python3 +""" +Skill Initializer - Creates a new skill from template + +Usage: + init_skill.py --path + +Examples: + init_skill.py my-new-skill --path skills/public + init_skill.py my-api-helper --path skills/private + init_skill.py custom-skill --path /custom/location +""" + +import sys +from pathlib import Path + + +SKILL_TEMPLATE = """--- +name: {skill_name} +description: [TODO: Complete and informative explanation of what the skill does and when to use it. Include WHEN to use this skill - specific scenarios, file types, or tasks that trigger it.] +--- + +# {skill_title} + +## Overview + +[TODO: 1-2 sentences explaining what this skill enables] + +## Structuring This Skill + +[TODO: Choose the structure that best fits this skill's purpose. Common patterns: + +**1. Workflow-Based** (best for sequential processes) +- Works well when there are clear step-by-step procedures +- Example: DOCX skill with "Workflow Decision Tree" → "Reading" → "Creating" → "Editing" +- Structure: ## Overview → ## Workflow Decision Tree → ## Step 1 → ## Step 2... + +**2. Task-Based** (best for tool collections) +- Works well when the skill offers different operations/capabilities +- Example: PDF skill with "Quick Start" → "Merge PDFs" → "Split PDFs" → "Extract Text" +- Structure: ## Overview → ## Quick Start → ## Task Category 1 → ## Task Category 2... + +**3. Reference/Guidelines** (best for standards or specifications) +- Works well for brand guidelines, coding standards, or requirements +- Example: Brand styling with "Brand Guidelines" → "Colors" → "Typography" → "Features" +- Structure: ## Overview → ## Guidelines → ## Specifications → ## Usage... + +**4. Capabilities-Based** (best for integrated systems) +- Works well when the skill provides multiple interrelated features +- Example: Product Management with "Core Capabilities" → numbered capability list +- Structure: ## Overview → ## Core Capabilities → ### 1. Feature → ### 2. Feature... + +Patterns can be mixed and matched as needed. Most skills combine patterns (e.g., start with task-based, add workflow for complex operations). + +Delete this entire "Structuring This Skill" section when done - it's just guidance.] + +## [TODO: Replace with the first main section based on chosen structure] + +[TODO: Add content here. See examples in existing skills: +- Code samples for technical skills +- Decision trees for complex workflows +- Concrete examples with realistic user requests +- References to scripts/templates/references as needed] + +## Resources + +This skill includes example resource directories that demonstrate how to organize different types of bundled resources: + +### scripts/ +Executable code (Python/Bash/etc.) that can be run directly to perform specific operations. + +**Examples from other skills:** +- PDF skill: `fill_fillable_fields.py`, `extract_form_field_info.py` - utilities for PDF manipulation +- DOCX skill: `document.py`, `utilities.py` - Python modules for document processing + +**Appropriate for:** Python scripts, shell scripts, or any executable code that performs automation, data processing, or specific operations. + +**Note:** Scripts may be executed without loading into context, but can still be read by Claude for patching or environment adjustments. + +### references/ +Documentation and reference material intended to be loaded into context to inform Claude's process and thinking. + +**Examples from other skills:** +- Product management: `communication.md`, `context_building.md` - detailed workflow guides +- BigQuery: API reference documentation and query examples +- Finance: Schema documentation, company policies + +**Appropriate for:** In-depth documentation, API references, database schemas, comprehensive guides, or any detailed information that Claude should reference while working. + +### assets/ +Files not intended to be loaded into context, but rather used within the output Claude produces. + +**Examples from other skills:** +- Brand styling: PowerPoint template files (.pptx), logo files +- Frontend builder: HTML/React boilerplate project directories +- Typography: Font files (.ttf, .woff2) + +**Appropriate for:** Templates, boilerplate code, document templates, images, icons, fonts, or any files meant to be copied or used in the final output. + +--- + +**Any unneeded directories can be deleted.** Not every skill requires all three types of resources. +""" + +EXAMPLE_SCRIPT = '''#!/usr/bin/env python3 +""" +Example helper script for {skill_name} + +This is a placeholder script that can be executed directly. +Replace with actual implementation or delete if not needed. + +Example real scripts from other skills: +- pdf/scripts/fill_fillable_fields.py - Fills PDF form fields +- pdf/scripts/convert_pdf_to_images.py - Converts PDF pages to images +""" + +def main(): + print("This is an example script for {skill_name}") + # TODO: Add actual script logic here + # This could be data processing, file conversion, API calls, etc. + +if __name__ == "__main__": + main() +''' + +EXAMPLE_REFERENCE = """# Reference Documentation for {skill_title} + +This is a placeholder for detailed reference documentation. +Replace with actual reference content or delete if not needed. + +Example real reference docs from other skills: +- product-management/references/communication.md - Comprehensive guide for status updates +- product-management/references/context_building.md - Deep-dive on gathering context +- bigquery/references/ - API references and query examples + +## When Reference Docs Are Useful + +Reference docs are ideal for: +- Comprehensive API documentation +- Detailed workflow guides +- Complex multi-step processes +- Information too lengthy for main SKILL.md +- Content that's only needed for specific use cases + +## Structure Suggestions + +### API Reference Example +- Overview +- Authentication +- Endpoints with examples +- Error codes +- Rate limits + +### Workflow Guide Example +- Prerequisites +- Step-by-step instructions +- Common patterns +- Troubleshooting +- Best practices +""" + +EXAMPLE_ASSET = """# Example Asset File + +This placeholder represents where asset files would be stored. +Replace with actual asset files (templates, images, fonts, etc.) or delete if not needed. + +Asset files are NOT intended to be loaded into context, but rather used within +the output Claude produces. + +Example asset files from other skills: +- Brand guidelines: logo.png, slides_template.pptx +- Frontend builder: hello-world/ directory with HTML/React boilerplate +- Typography: custom-font.ttf, font-family.woff2 +- Data: sample_data.csv, test_dataset.json + +## Common Asset Types + +- Templates: .pptx, .docx, boilerplate directories +- Images: .png, .jpg, .svg, .gif +- Fonts: .ttf, .otf, .woff, .woff2 +- Boilerplate code: Project directories, starter files +- Icons: .ico, .svg +- Data files: .csv, .json, .xml, .yaml + +Note: This is a text placeholder. Actual assets can be any file type. +""" + + +def title_case_skill_name(skill_name): + """Convert hyphenated skill name to Title Case for display.""" + return ' '.join(word.capitalize() for word in skill_name.split('-')) + + +def init_skill(skill_name, path): + """ + Initialize a new skill directory with template SKILL.md. + + Args: + skill_name: Name of the skill + path: Path where the skill directory should be created + + Returns: + Path to created skill directory, or None if error + """ + # Determine skill directory path + skill_dir = Path(path).resolve() / skill_name + + # Check if directory already exists + if skill_dir.exists(): + print(f"❌ Error: Skill directory already exists: {skill_dir}") + return None + + # Create skill directory + try: + skill_dir.mkdir(parents=True, exist_ok=False) + print(f"✅ Created skill directory: {skill_dir}") + except Exception as e: + print(f"❌ Error creating directory: {e}") + return None + + # Create SKILL.md from template + skill_title = title_case_skill_name(skill_name) + skill_content = SKILL_TEMPLATE.format( + skill_name=skill_name, + skill_title=skill_title + ) + + skill_md_path = skill_dir / 'SKILL.md' + try: + skill_md_path.write_text(skill_content) + print("✅ Created SKILL.md") + except Exception as e: + print(f"❌ Error creating SKILL.md: {e}") + return None + + # Create resource directories with example files + try: + # Create scripts/ directory with example script + scripts_dir = skill_dir / 'scripts' + scripts_dir.mkdir(exist_ok=True) + example_script = scripts_dir / 'example.py' + example_script.write_text(EXAMPLE_SCRIPT.format(skill_name=skill_name)) + example_script.chmod(0o755) + print("✅ Created scripts/example.py") + + # Create references/ directory with example reference doc + references_dir = skill_dir / 'references' + references_dir.mkdir(exist_ok=True) + example_reference = references_dir / 'api_reference.md' + example_reference.write_text(EXAMPLE_REFERENCE.format(skill_title=skill_title)) + print("✅ Created references/api_reference.md") + + # Create assets/ directory with example asset placeholder + assets_dir = skill_dir / 'assets' + assets_dir.mkdir(exist_ok=True) + example_asset = assets_dir / 'example_asset.txt' + example_asset.write_text(EXAMPLE_ASSET) + print("✅ Created assets/example_asset.txt") + except Exception as e: + print(f"❌ Error creating resource directories: {e}") + return None + + # Print next steps + print(f"\n✅ Skill '{skill_name}' initialized successfully at {skill_dir}") + print("\nNext steps:") + print("1. Edit SKILL.md to complete the TODO items and update the description") + print("2. Customize or delete the example files in scripts/, references/, and assets/") + print("3. Run the validator when ready to check the skill structure") + + return skill_dir + + +def main(): + if len(sys.argv) < 4 or sys.argv[2] != '--path': + print("Usage: init_skill.py --path ") + print("\nSkill name requirements:") + print(" - Hyphen-case identifier (e.g., 'data-analyzer')") + print(" - Lowercase letters, digits, and hyphens only") + print(" - Max 40 characters") + print(" - Must match directory name exactly") + print("\nExamples:") + print(" init_skill.py my-new-skill --path skills/public") + print(" init_skill.py my-api-helper --path skills/private") + print(" init_skill.py custom-skill --path /custom/location") + sys.exit(1) + + skill_name = sys.argv[1] + path = sys.argv[3] + + print(f"🚀 Initializing skill: {skill_name}") + print(f" Location: {path}") + print() + + result = init_skill(skill_name, path) + + if result: + sys.exit(0) + else: + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/.claude/skills/skill-creator/scripts/package_skill.py b/.claude/skills/skill-creator/scripts/package_skill.py new file mode 100644 index 0000000..5cd36cb --- /dev/null +++ b/.claude/skills/skill-creator/scripts/package_skill.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python3 +""" +Skill Packager - Creates a distributable .skill file of a skill folder + +Usage: + python utils/package_skill.py [output-directory] + +Example: + python utils/package_skill.py skills/public/my-skill + python utils/package_skill.py skills/public/my-skill ./dist +""" + +import sys +import zipfile +from pathlib import Path +from quick_validate import validate_skill + + +def package_skill(skill_path, output_dir=None): + """ + Package a skill folder into a .skill file. + + Args: + skill_path: Path to the skill folder + output_dir: Optional output directory for the .skill file (defaults to current directory) + + Returns: + Path to the created .skill file, or None if error + """ + skill_path = Path(skill_path).resolve() + + # Validate skill folder exists + if not skill_path.exists(): + print(f"❌ Error: Skill folder not found: {skill_path}") + return None + + if not skill_path.is_dir(): + print(f"❌ Error: Path is not a directory: {skill_path}") + return None + + # Validate SKILL.md exists + skill_md = skill_path / "SKILL.md" + if not skill_md.exists(): + print(f"❌ Error: SKILL.md not found in {skill_path}") + return None + + # Run validation before packaging + print("🔍 Validating skill...") + valid, message = validate_skill(skill_path) + if not valid: + print(f"❌ Validation failed: {message}") + print(" Please fix the validation errors before packaging.") + return None + print(f"✅ {message}\n") + + # Determine output location + skill_name = skill_path.name + if output_dir: + output_path = Path(output_dir).resolve() + output_path.mkdir(parents=True, exist_ok=True) + else: + output_path = Path.cwd() + + skill_filename = output_path / f"{skill_name}.skill" + + # Create the .skill file (zip format) + try: + with zipfile.ZipFile(skill_filename, 'w', zipfile.ZIP_DEFLATED) as zipf: + # Walk through the skill directory + for file_path in skill_path.rglob('*'): + if file_path.is_file(): + # Calculate the relative path within the zip + arcname = file_path.relative_to(skill_path.parent) + zipf.write(file_path, arcname) + print(f" Added: {arcname}") + + print(f"\n✅ Successfully packaged skill to: {skill_filename}") + return skill_filename + + except Exception as e: + print(f"❌ Error creating .skill file: {e}") + return None + + +def main(): + if len(sys.argv) < 2: + print("Usage: python utils/package_skill.py [output-directory]") + print("\nExample:") + print(" python utils/package_skill.py skills/public/my-skill") + print(" python utils/package_skill.py skills/public/my-skill ./dist") + sys.exit(1) + + skill_path = sys.argv[1] + output_dir = sys.argv[2] if len(sys.argv) > 2 else None + + print(f"📦 Packaging skill: {skill_path}") + if output_dir: + print(f" Output directory: {output_dir}") + print() + + result = package_skill(skill_path, output_dir) + + if result: + sys.exit(0) + else: + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/.claude/skills/skill-creator/scripts/quick_validate.py b/.claude/skills/skill-creator/scripts/quick_validate.py new file mode 100644 index 0000000..d9fbeb7 --- /dev/null +++ b/.claude/skills/skill-creator/scripts/quick_validate.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python3 +""" +Quick validation script for skills - minimal version +""" + +import sys +import os +import re +import yaml +from pathlib import Path + +def validate_skill(skill_path): + """Basic validation of a skill""" + skill_path = Path(skill_path) + + # Check SKILL.md exists + skill_md = skill_path / 'SKILL.md' + if not skill_md.exists(): + return False, "SKILL.md not found" + + # Read and validate frontmatter + content = skill_md.read_text() + if not content.startswith('---'): + return False, "No YAML frontmatter found" + + # Extract frontmatter + match = re.match(r'^---\n(.*?)\n---', content, re.DOTALL) + if not match: + return False, "Invalid frontmatter format" + + frontmatter_text = match.group(1) + + # Parse YAML frontmatter + try: + frontmatter = yaml.safe_load(frontmatter_text) + if not isinstance(frontmatter, dict): + return False, "Frontmatter must be a YAML dictionary" + except yaml.YAMLError as e: + return False, f"Invalid YAML in frontmatter: {e}" + + # Define allowed properties + ALLOWED_PROPERTIES = {'name', 'description', 'license', 'allowed-tools', 'metadata'} + + # Check for unexpected properties (excluding nested keys under metadata) + unexpected_keys = set(frontmatter.keys()) - ALLOWED_PROPERTIES + if unexpected_keys: + return False, ( + f"Unexpected key(s) in SKILL.md frontmatter: {', '.join(sorted(unexpected_keys))}. " + f"Allowed properties are: {', '.join(sorted(ALLOWED_PROPERTIES))}" + ) + + # Check required fields + if 'name' not in frontmatter: + return False, "Missing 'name' in frontmatter" + if 'description' not in frontmatter: + return False, "Missing 'description' in frontmatter" + + # Extract name for validation + name = frontmatter.get('name', '') + if not isinstance(name, str): + return False, f"Name must be a string, got {type(name).__name__}" + name = name.strip() + if name: + # Check naming convention (hyphen-case: lowercase with hyphens) + if not re.match(r'^[a-z0-9-]+$', name): + return False, f"Name '{name}' should be hyphen-case (lowercase letters, digits, and hyphens only)" + if name.startswith('-') or name.endswith('-') or '--' in name: + return False, f"Name '{name}' cannot start/end with hyphen or contain consecutive hyphens" + # Check name length (max 64 characters per spec) + if len(name) > 64: + return False, f"Name is too long ({len(name)} characters). Maximum is 64 characters." + + # Extract and validate description + description = frontmatter.get('description', '') + if not isinstance(description, str): + return False, f"Description must be a string, got {type(description).__name__}" + description = description.strip() + if description: + # Check for angle brackets + if '<' in description or '>' in description: + return False, "Description cannot contain angle brackets (< or >)" + # Check description length (max 1024 characters per spec) + if len(description) > 1024: + return False, f"Description is too long ({len(description)} characters). Maximum is 1024 characters." + + return True, "Skill is valid!" + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("Usage: python quick_validate.py ") + sys.exit(1) + + valid, message = validate_skill(sys.argv[1]) + print(message) + sys.exit(0 if valid else 1) \ No newline at end of file diff --git a/.claude/skills/slack-gif-creator/LICENSE.txt b/.claude/skills/slack-gif-creator/LICENSE.txt new file mode 100644 index 0000000..7a4a3ea --- /dev/null +++ b/.claude/skills/slack-gif-creator/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/.claude/skills/slack-gif-creator/SKILL.md b/.claude/skills/slack-gif-creator/SKILL.md new file mode 100644 index 0000000..16660d8 --- /dev/null +++ b/.claude/skills/slack-gif-creator/SKILL.md @@ -0,0 +1,254 @@ +--- +name: slack-gif-creator +description: Knowledge and utilities for creating animated GIFs optimized for Slack. Provides constraints, validation tools, and animation concepts. Use when users request animated GIFs for Slack like "make me a GIF of X doing Y for Slack." +license: Complete terms in LICENSE.txt +--- + +# Slack GIF Creator + +A toolkit providing utilities and knowledge for creating animated GIFs optimized for Slack. + +## Slack Requirements + +**Dimensions:** +- Emoji GIFs: 128x128 (recommended) +- Message GIFs: 480x480 + +**Parameters:** +- FPS: 10-30 (lower is smaller file size) +- Colors: 48-128 (fewer = smaller file size) +- Duration: Keep under 3 seconds for emoji GIFs + +## Core Workflow + +```python +from core.gif_builder import GIFBuilder +from PIL import Image, ImageDraw + +# 1. Create builder +builder = GIFBuilder(width=128, height=128, fps=10) + +# 2. Generate frames +for i in range(12): + frame = Image.new('RGB', (128, 128), (240, 248, 255)) + draw = ImageDraw.Draw(frame) + + # Draw your animation using PIL primitives + # (circles, polygons, lines, etc.) + + builder.add_frame(frame) + +# 3. Save with optimization +builder.save('output.gif', num_colors=48, optimize_for_emoji=True) +``` + +## Drawing Graphics + +### Working with User-Uploaded Images +If a user uploads an image, consider whether they want to: +- **Use it directly** (e.g., "animate this", "split this into frames") +- **Use it as inspiration** (e.g., "make something like this") + +Load and work with images using PIL: +```python +from PIL import Image + +uploaded = Image.open('file.png') +# Use directly, or just as reference for colors/style +``` + +### Drawing from Scratch +When drawing graphics from scratch, use PIL ImageDraw primitives: + +```python +from PIL import ImageDraw + +draw = ImageDraw.Draw(frame) + +# Circles/ovals +draw.ellipse([x1, y1, x2, y2], fill=(r, g, b), outline=(r, g, b), width=3) + +# Stars, triangles, any polygon +points = [(x1, y1), (x2, y2), (x3, y3), ...] +draw.polygon(points, fill=(r, g, b), outline=(r, g, b), width=3) + +# Lines +draw.line([(x1, y1), (x2, y2)], fill=(r, g, b), width=5) + +# Rectangles +draw.rectangle([x1, y1, x2, y2], fill=(r, g, b), outline=(r, g, b), width=3) +``` + +**Don't use:** Emoji fonts (unreliable across platforms) or assume pre-packaged graphics exist in this skill. + +### Making Graphics Look Good + +Graphics should look polished and creative, not basic. Here's how: + +**Use thicker lines** - Always set `width=2` or higher for outlines and lines. Thin lines (width=1) look choppy and amateurish. + +**Add visual depth**: +- Use gradients for backgrounds (`create_gradient_background`) +- Layer multiple shapes for complexity (e.g., a star with a smaller star inside) + +**Make shapes more interesting**: +- Don't just draw a plain circle - add highlights, rings, or patterns +- Stars can have glows (draw larger, semi-transparent versions behind) +- Combine multiple shapes (stars + sparkles, circles + rings) + +**Pay attention to colors**: +- Use vibrant, complementary colors +- Add contrast (dark outlines on light shapes, light outlines on dark shapes) +- Consider the overall composition + +**For complex shapes** (hearts, snowflakes, etc.): +- Use combinations of polygons and ellipses +- Calculate points carefully for symmetry +- Add details (a heart can have a highlight curve, snowflakes have intricate branches) + +Be creative and detailed! A good Slack GIF should look polished, not like placeholder graphics. + +## Available Utilities + +### GIFBuilder (`core.gif_builder`) +Assembles frames and optimizes for Slack: +```python +builder = GIFBuilder(width=128, height=128, fps=10) +builder.add_frame(frame) # Add PIL Image +builder.add_frames(frames) # Add list of frames +builder.save('out.gif', num_colors=48, optimize_for_emoji=True, remove_duplicates=True) +``` + +### Validators (`core.validators`) +Check if GIF meets Slack requirements: +```python +from core.validators import validate_gif, is_slack_ready + +# Detailed validation +passes, info = validate_gif('my.gif', is_emoji=True, verbose=True) + +# Quick check +if is_slack_ready('my.gif'): + print("Ready!") +``` + +### Easing Functions (`core.easing`) +Smooth motion instead of linear: +```python +from core.easing import interpolate + +# Progress from 0.0 to 1.0 +t = i / (num_frames - 1) + +# Apply easing +y = interpolate(start=0, end=400, t=t, easing='ease_out') + +# Available: linear, ease_in, ease_out, ease_in_out, +# bounce_out, elastic_out, back_out +``` + +### Frame Helpers (`core.frame_composer`) +Convenience functions for common needs: +```python +from core.frame_composer import ( + create_blank_frame, # Solid color background + create_gradient_background, # Vertical gradient + draw_circle, # Helper for circles + draw_text, # Simple text rendering + draw_star # 5-pointed star +) +``` + +## Animation Concepts + +### Shake/Vibrate +Offset object position with oscillation: +- Use `math.sin()` or `math.cos()` with frame index +- Add small random variations for natural feel +- Apply to x and/or y position + +### Pulse/Heartbeat +Scale object size rhythmically: +- Use `math.sin(t * frequency * 2 * math.pi)` for smooth pulse +- For heartbeat: two quick pulses then pause (adjust sine wave) +- Scale between 0.8 and 1.2 of base size + +### Bounce +Object falls and bounces: +- Use `interpolate()` with `easing='bounce_out'` for landing +- Use `easing='ease_in'` for falling (accelerating) +- Apply gravity by increasing y velocity each frame + +### Spin/Rotate +Rotate object around center: +- PIL: `image.rotate(angle, resample=Image.BICUBIC)` +- For wobble: use sine wave for angle instead of linear + +### Fade In/Out +Gradually appear or disappear: +- Create RGBA image, adjust alpha channel +- Or use `Image.blend(image1, image2, alpha)` +- Fade in: alpha from 0 to 1 +- Fade out: alpha from 1 to 0 + +### Slide +Move object from off-screen to position: +- Start position: outside frame bounds +- End position: target location +- Use `interpolate()` with `easing='ease_out'` for smooth stop +- For overshoot: use `easing='back_out'` + +### Zoom +Scale and position for zoom effect: +- Zoom in: scale from 0.1 to 2.0, crop center +- Zoom out: scale from 2.0 to 1.0 +- Can add motion blur for drama (PIL filter) + +### Explode/Particle Burst +Create particles radiating outward: +- Generate particles with random angles and velocities +- Update each particle: `x += vx`, `y += vy` +- Add gravity: `vy += gravity_constant` +- Fade out particles over time (reduce alpha) + +## Optimization Strategies + +Only when asked to make the file size smaller, implement a few of the following methods: + +1. **Fewer frames** - Lower FPS (10 instead of 20) or shorter duration +2. **Fewer colors** - `num_colors=48` instead of 128 +3. **Smaller dimensions** - 128x128 instead of 480x480 +4. **Remove duplicates** - `remove_duplicates=True` in save() +5. **Emoji mode** - `optimize_for_emoji=True` auto-optimizes + +```python +# Maximum optimization for emoji +builder.save( + 'emoji.gif', + num_colors=48, + optimize_for_emoji=True, + remove_duplicates=True +) +``` + +## Philosophy + +This skill provides: +- **Knowledge**: Slack's requirements and animation concepts +- **Utilities**: GIFBuilder, validators, easing functions +- **Flexibility**: Create the animation logic using PIL primitives + +It does NOT provide: +- Rigid animation templates or pre-made functions +- Emoji font rendering (unreliable across platforms) +- A library of pre-packaged graphics built into the skill + +**Note on user uploads**: This skill doesn't include pre-built graphics, but if a user uploads an image, use PIL to load and work with it - interpret based on their request whether they want it used directly or just as inspiration. + +Be creative! Combine concepts (bouncing + rotating, pulsing + sliding, etc.) and use PIL's full capabilities. + +## Dependencies + +```bash +pip install pillow imageio numpy +``` diff --git a/.claude/skills/slack-gif-creator/core/easing.py b/.claude/skills/slack-gif-creator/core/easing.py new file mode 100644 index 0000000..772fa83 --- /dev/null +++ b/.claude/skills/slack-gif-creator/core/easing.py @@ -0,0 +1,234 @@ +#!/usr/bin/env python3 +""" +Easing Functions - Timing functions for smooth animations. + +Provides various easing functions for natural motion and timing. +All functions take a value t (0.0 to 1.0) and return eased value (0.0 to 1.0). +""" + +import math + + +def linear(t: float) -> float: + """Linear interpolation (no easing).""" + return t + + +def ease_in_quad(t: float) -> float: + """Quadratic ease-in (slow start, accelerating).""" + return t * t + + +def ease_out_quad(t: float) -> float: + """Quadratic ease-out (fast start, decelerating).""" + return t * (2 - t) + + +def ease_in_out_quad(t: float) -> float: + """Quadratic ease-in-out (slow start and end).""" + if t < 0.5: + return 2 * t * t + return -1 + (4 - 2 * t) * t + + +def ease_in_cubic(t: float) -> float: + """Cubic ease-in (slow start).""" + return t * t * t + + +def ease_out_cubic(t: float) -> float: + """Cubic ease-out (fast start).""" + return (t - 1) * (t - 1) * (t - 1) + 1 + + +def ease_in_out_cubic(t: float) -> float: + """Cubic ease-in-out.""" + if t < 0.5: + return 4 * t * t * t + return (t - 1) * (2 * t - 2) * (2 * t - 2) + 1 + + +def ease_in_bounce(t: float) -> float: + """Bounce ease-in (bouncy start).""" + return 1 - ease_out_bounce(1 - t) + + +def ease_out_bounce(t: float) -> float: + """Bounce ease-out (bouncy end).""" + if t < 1 / 2.75: + return 7.5625 * t * t + elif t < 2 / 2.75: + t -= 1.5 / 2.75 + return 7.5625 * t * t + 0.75 + elif t < 2.5 / 2.75: + t -= 2.25 / 2.75 + return 7.5625 * t * t + 0.9375 + else: + t -= 2.625 / 2.75 + return 7.5625 * t * t + 0.984375 + + +def ease_in_out_bounce(t: float) -> float: + """Bounce ease-in-out.""" + if t < 0.5: + return ease_in_bounce(t * 2) * 0.5 + return ease_out_bounce(t * 2 - 1) * 0.5 + 0.5 + + +def ease_in_elastic(t: float) -> float: + """Elastic ease-in (spring effect).""" + if t == 0 or t == 1: + return t + return -math.pow(2, 10 * (t - 1)) * math.sin((t - 1.1) * 5 * math.pi) + + +def ease_out_elastic(t: float) -> float: + """Elastic ease-out (spring effect).""" + if t == 0 or t == 1: + return t + return math.pow(2, -10 * t) * math.sin((t - 0.1) * 5 * math.pi) + 1 + + +def ease_in_out_elastic(t: float) -> float: + """Elastic ease-in-out.""" + if t == 0 or t == 1: + return t + t = t * 2 - 1 + if t < 0: + return -0.5 * math.pow(2, 10 * t) * math.sin((t - 0.1) * 5 * math.pi) + return math.pow(2, -10 * t) * math.sin((t - 0.1) * 5 * math.pi) * 0.5 + 1 + + +# Convenience mapping +EASING_FUNCTIONS = { + "linear": linear, + "ease_in": ease_in_quad, + "ease_out": ease_out_quad, + "ease_in_out": ease_in_out_quad, + "bounce_in": ease_in_bounce, + "bounce_out": ease_out_bounce, + "bounce": ease_in_out_bounce, + "elastic_in": ease_in_elastic, + "elastic_out": ease_out_elastic, + "elastic": ease_in_out_elastic, +} + + +def get_easing(name: str = "linear"): + """Get easing function by name.""" + return EASING_FUNCTIONS.get(name, linear) + + +def interpolate(start: float, end: float, t: float, easing: str = "linear") -> float: + """ + Interpolate between two values with easing. + + Args: + start: Start value + end: End value + t: Progress from 0.0 to 1.0 + easing: Name of easing function + + Returns: + Interpolated value + """ + ease_func = get_easing(easing) + eased_t = ease_func(t) + return start + (end - start) * eased_t + + +def ease_back_in(t: float) -> float: + """Back ease-in (slight overshoot backward before forward motion).""" + c1 = 1.70158 + c3 = c1 + 1 + return c3 * t * t * t - c1 * t * t + + +def ease_back_out(t: float) -> float: + """Back ease-out (overshoot forward then settle back).""" + c1 = 1.70158 + c3 = c1 + 1 + return 1 + c3 * pow(t - 1, 3) + c1 * pow(t - 1, 2) + + +def ease_back_in_out(t: float) -> float: + """Back ease-in-out (overshoot at both ends).""" + c1 = 1.70158 + c2 = c1 * 1.525 + if t < 0.5: + return (pow(2 * t, 2) * ((c2 + 1) * 2 * t - c2)) / 2 + return (pow(2 * t - 2, 2) * ((c2 + 1) * (t * 2 - 2) + c2) + 2) / 2 + + +def apply_squash_stretch( + base_scale: tuple[float, float], intensity: float, direction: str = "vertical" +) -> tuple[float, float]: + """ + Calculate squash and stretch scales for more dynamic animation. + + Args: + base_scale: (width_scale, height_scale) base scales + intensity: Squash/stretch intensity (0.0-1.0) + direction: 'vertical', 'horizontal', or 'both' + + Returns: + (width_scale, height_scale) with squash/stretch applied + """ + width_scale, height_scale = base_scale + + if direction == "vertical": + # Compress vertically, expand horizontally (preserve volume) + height_scale *= 1 - intensity * 0.5 + width_scale *= 1 + intensity * 0.5 + elif direction == "horizontal": + # Compress horizontally, expand vertically + width_scale *= 1 - intensity * 0.5 + height_scale *= 1 + intensity * 0.5 + elif direction == "both": + # General squash (both dimensions) + width_scale *= 1 - intensity * 0.3 + height_scale *= 1 - intensity * 0.3 + + return (width_scale, height_scale) + + +def calculate_arc_motion( + start: tuple[float, float], end: tuple[float, float], height: float, t: float +) -> tuple[float, float]: + """ + Calculate position along a parabolic arc (natural motion path). + + Args: + start: (x, y) starting position + end: (x, y) ending position + height: Arc height at midpoint (positive = upward) + t: Progress (0.0-1.0) + + Returns: + (x, y) position along arc + """ + x1, y1 = start + x2, y2 = end + + # Linear interpolation for x + x = x1 + (x2 - x1) * t + + # Parabolic interpolation for y + # y = start + progress * (end - start) + arc_offset + # Arc offset peaks at t=0.5 + arc_offset = 4 * height * t * (1 - t) + y = y1 + (y2 - y1) * t - arc_offset + + return (x, y) + + +# Add new easing functions to the convenience mapping +EASING_FUNCTIONS.update( + { + "back_in": ease_back_in, + "back_out": ease_back_out, + "back_in_out": ease_back_in_out, + "anticipate": ease_back_in, # Alias + "overshoot": ease_back_out, # Alias + } +) diff --git a/.claude/skills/slack-gif-creator/core/frame_composer.py b/.claude/skills/slack-gif-creator/core/frame_composer.py new file mode 100644 index 0000000..1afe434 --- /dev/null +++ b/.claude/skills/slack-gif-creator/core/frame_composer.py @@ -0,0 +1,176 @@ +#!/usr/bin/env python3 +""" +Frame Composer - Utilities for composing visual elements into frames. + +Provides functions for drawing shapes, text, emojis, and compositing elements +together to create animation frames. +""" + +from typing import Optional + +import numpy as np +from PIL import Image, ImageDraw, ImageFont + + +def create_blank_frame( + width: int, height: int, color: tuple[int, int, int] = (255, 255, 255) +) -> Image.Image: + """ + Create a blank frame with solid color background. + + Args: + width: Frame width + height: Frame height + color: RGB color tuple (default: white) + + Returns: + PIL Image + """ + return Image.new("RGB", (width, height), color) + + +def draw_circle( + frame: Image.Image, + center: tuple[int, int], + radius: int, + fill_color: Optional[tuple[int, int, int]] = None, + outline_color: Optional[tuple[int, int, int]] = None, + outline_width: int = 1, +) -> Image.Image: + """ + Draw a circle on a frame. + + Args: + frame: PIL Image to draw on + center: (x, y) center position + radius: Circle radius + fill_color: RGB fill color (None for no fill) + outline_color: RGB outline color (None for no outline) + outline_width: Outline width in pixels + + Returns: + Modified frame + """ + draw = ImageDraw.Draw(frame) + x, y = center + bbox = [x - radius, y - radius, x + radius, y + radius] + draw.ellipse(bbox, fill=fill_color, outline=outline_color, width=outline_width) + return frame + + +def draw_text( + frame: Image.Image, + text: str, + position: tuple[int, int], + color: tuple[int, int, int] = (0, 0, 0), + centered: bool = False, +) -> Image.Image: + """ + Draw text on a frame. + + Args: + frame: PIL Image to draw on + text: Text to draw + position: (x, y) position (top-left unless centered=True) + color: RGB text color + centered: If True, center text at position + + Returns: + Modified frame + """ + draw = ImageDraw.Draw(frame) + + # Uses Pillow's default font. + # If the font should be changed for the emoji, add additional logic here. + font = ImageFont.load_default() + + if centered: + bbox = draw.textbbox((0, 0), text, font=font) + text_width = bbox[2] - bbox[0] + text_height = bbox[3] - bbox[1] + x = position[0] - text_width // 2 + y = position[1] - text_height // 2 + position = (x, y) + + draw.text(position, text, fill=color, font=font) + return frame + + +def create_gradient_background( + width: int, + height: int, + top_color: tuple[int, int, int], + bottom_color: tuple[int, int, int], +) -> Image.Image: + """ + Create a vertical gradient background. + + Args: + width: Frame width + height: Frame height + top_color: RGB color at top + bottom_color: RGB color at bottom + + Returns: + PIL Image with gradient + """ + frame = Image.new("RGB", (width, height)) + draw = ImageDraw.Draw(frame) + + # Calculate color step for each row + r1, g1, b1 = top_color + r2, g2, b2 = bottom_color + + for y in range(height): + # Interpolate color + ratio = y / height + r = int(r1 * (1 - ratio) + r2 * ratio) + g = int(g1 * (1 - ratio) + g2 * ratio) + b = int(b1 * (1 - ratio) + b2 * ratio) + + # Draw horizontal line + draw.line([(0, y), (width, y)], fill=(r, g, b)) + + return frame + + +def draw_star( + frame: Image.Image, + center: tuple[int, int], + size: int, + fill_color: tuple[int, int, int], + outline_color: Optional[tuple[int, int, int]] = None, + outline_width: int = 1, +) -> Image.Image: + """ + Draw a 5-pointed star. + + Args: + frame: PIL Image to draw on + center: (x, y) center position + size: Star size (outer radius) + fill_color: RGB fill color + outline_color: RGB outline color (None for no outline) + outline_width: Outline width + + Returns: + Modified frame + """ + import math + + draw = ImageDraw.Draw(frame) + x, y = center + + # Calculate star points + points = [] + for i in range(10): + angle = (i * 36 - 90) * math.pi / 180 # 36 degrees per point, start at top + radius = size if i % 2 == 0 else size * 0.4 # Alternate between outer and inner + px = x + radius * math.cos(angle) + py = y + radius * math.sin(angle) + points.append((px, py)) + + # Draw star + draw.polygon(points, fill=fill_color, outline=outline_color, width=outline_width) + + return frame diff --git a/.claude/skills/slack-gif-creator/core/gif_builder.py b/.claude/skills/slack-gif-creator/core/gif_builder.py new file mode 100644 index 0000000..5759f14 --- /dev/null +++ b/.claude/skills/slack-gif-creator/core/gif_builder.py @@ -0,0 +1,269 @@ +#!/usr/bin/env python3 +""" +GIF Builder - Core module for assembling frames into GIFs optimized for Slack. + +This module provides the main interface for creating GIFs from programmatically +generated frames, with automatic optimization for Slack's requirements. +""" + +from pathlib import Path +from typing import Optional + +import imageio.v3 as imageio +import numpy as np +from PIL import Image + + +class GIFBuilder: + """Builder for creating optimized GIFs from frames.""" + + def __init__(self, width: int = 480, height: int = 480, fps: int = 15): + """ + Initialize GIF builder. + + Args: + width: Frame width in pixels + height: Frame height in pixels + fps: Frames per second + """ + self.width = width + self.height = height + self.fps = fps + self.frames: list[np.ndarray] = [] + + def add_frame(self, frame: np.ndarray | Image.Image): + """ + Add a frame to the GIF. + + Args: + frame: Frame as numpy array or PIL Image (will be converted to RGB) + """ + if isinstance(frame, Image.Image): + frame = np.array(frame.convert("RGB")) + + # Ensure frame is correct size + if frame.shape[:2] != (self.height, self.width): + pil_frame = Image.fromarray(frame) + pil_frame = pil_frame.resize( + (self.width, self.height), Image.Resampling.LANCZOS + ) + frame = np.array(pil_frame) + + self.frames.append(frame) + + def add_frames(self, frames: list[np.ndarray | Image.Image]): + """Add multiple frames at once.""" + for frame in frames: + self.add_frame(frame) + + def optimize_colors( + self, num_colors: int = 128, use_global_palette: bool = True + ) -> list[np.ndarray]: + """ + Reduce colors in all frames using quantization. + + Args: + num_colors: Target number of colors (8-256) + use_global_palette: Use a single palette for all frames (better compression) + + Returns: + List of color-optimized frames + """ + optimized = [] + + if use_global_palette and len(self.frames) > 1: + # Create a global palette from all frames + # Sample frames to build palette + sample_size = min(5, len(self.frames)) + sample_indices = [ + int(i * len(self.frames) / sample_size) for i in range(sample_size) + ] + sample_frames = [self.frames[i] for i in sample_indices] + + # Combine sample frames into a single image for palette generation + # Flatten each frame to get all pixels, then stack them + all_pixels = np.vstack( + [f.reshape(-1, 3) for f in sample_frames] + ) # (total_pixels, 3) + + # Create a properly-shaped RGB image from the pixel data + # We'll make a roughly square image from all the pixels + total_pixels = len(all_pixels) + width = min(512, int(np.sqrt(total_pixels))) # Reasonable width, max 512 + height = (total_pixels + width - 1) // width # Ceiling division + + # Pad if necessary to fill the rectangle + pixels_needed = width * height + if pixels_needed > total_pixels: + padding = np.zeros((pixels_needed - total_pixels, 3), dtype=np.uint8) + all_pixels = np.vstack([all_pixels, padding]) + + # Reshape to proper RGB image format (H, W, 3) + img_array = ( + all_pixels[:pixels_needed].reshape(height, width, 3).astype(np.uint8) + ) + combined_img = Image.fromarray(img_array, mode="RGB") + + # Generate global palette + global_palette = combined_img.quantize(colors=num_colors, method=2) + + # Apply global palette to all frames + for frame in self.frames: + pil_frame = Image.fromarray(frame) + quantized = pil_frame.quantize(palette=global_palette, dither=1) + optimized.append(np.array(quantized.convert("RGB"))) + else: + # Use per-frame quantization + for frame in self.frames: + pil_frame = Image.fromarray(frame) + quantized = pil_frame.quantize(colors=num_colors, method=2, dither=1) + optimized.append(np.array(quantized.convert("RGB"))) + + return optimized + + def deduplicate_frames(self, threshold: float = 0.9995) -> int: + """ + Remove duplicate or near-duplicate consecutive frames. + + Args: + threshold: Similarity threshold (0.0-1.0). Higher = more strict (0.9995 = nearly identical). + Use 0.9995+ to preserve subtle animations, 0.98 for aggressive removal. + + Returns: + Number of frames removed + """ + if len(self.frames) < 2: + return 0 + + deduplicated = [self.frames[0]] + removed_count = 0 + + for i in range(1, len(self.frames)): + # Compare with previous frame + prev_frame = np.array(deduplicated[-1], dtype=np.float32) + curr_frame = np.array(self.frames[i], dtype=np.float32) + + # Calculate similarity (normalized) + diff = np.abs(prev_frame - curr_frame) + similarity = 1.0 - (np.mean(diff) / 255.0) + + # Keep frame if sufficiently different + # High threshold (0.9995+) means only remove nearly identical frames + if similarity < threshold: + deduplicated.append(self.frames[i]) + else: + removed_count += 1 + + self.frames = deduplicated + return removed_count + + def save( + self, + output_path: str | Path, + num_colors: int = 128, + optimize_for_emoji: bool = False, + remove_duplicates: bool = False, + ) -> dict: + """ + Save frames as optimized GIF for Slack. + + Args: + output_path: Where to save the GIF + num_colors: Number of colors to use (fewer = smaller file) + optimize_for_emoji: If True, optimize for emoji size (128x128, fewer colors) + remove_duplicates: If True, remove duplicate consecutive frames (opt-in) + + Returns: + Dictionary with file info (path, size, dimensions, frame_count) + """ + if not self.frames: + raise ValueError("No frames to save. Add frames with add_frame() first.") + + output_path = Path(output_path) + + # Remove duplicate frames to reduce file size + if remove_duplicates: + removed = self.deduplicate_frames(threshold=0.9995) + if removed > 0: + print( + f" Removed {removed} nearly identical frames (preserved subtle animations)" + ) + + # Optimize for emoji if requested + if optimize_for_emoji: + if self.width > 128 or self.height > 128: + print( + f" Resizing from {self.width}x{self.height} to 128x128 for emoji" + ) + self.width = 128 + self.height = 128 + # Resize all frames + resized_frames = [] + for frame in self.frames: + pil_frame = Image.fromarray(frame) + pil_frame = pil_frame.resize((128, 128), Image.Resampling.LANCZOS) + resized_frames.append(np.array(pil_frame)) + self.frames = resized_frames + num_colors = min(num_colors, 48) # More aggressive color limit for emoji + + # More aggressive FPS reduction for emoji + if len(self.frames) > 12: + print( + f" Reducing frames from {len(self.frames)} to ~12 for emoji size" + ) + # Keep every nth frame to get close to 12 frames + keep_every = max(1, len(self.frames) // 12) + self.frames = [ + self.frames[i] for i in range(0, len(self.frames), keep_every) + ] + + # Optimize colors with global palette + optimized_frames = self.optimize_colors(num_colors, use_global_palette=True) + + # Calculate frame duration in milliseconds + frame_duration = 1000 / self.fps + + # Save GIF + imageio.imwrite( + output_path, + optimized_frames, + duration=frame_duration, + loop=0, # Infinite loop + ) + + # Get file info + file_size_kb = output_path.stat().st_size / 1024 + file_size_mb = file_size_kb / 1024 + + info = { + "path": str(output_path), + "size_kb": file_size_kb, + "size_mb": file_size_mb, + "dimensions": f"{self.width}x{self.height}", + "frame_count": len(optimized_frames), + "fps": self.fps, + "duration_seconds": len(optimized_frames) / self.fps, + "colors": num_colors, + } + + # Print info + print(f"\n✓ GIF created successfully!") + print(f" Path: {output_path}") + print(f" Size: {file_size_kb:.1f} KB ({file_size_mb:.2f} MB)") + print(f" Dimensions: {self.width}x{self.height}") + print(f" Frames: {len(optimized_frames)} @ {self.fps} fps") + print(f" Duration: {info['duration_seconds']:.1f}s") + print(f" Colors: {num_colors}") + + # Size info + if optimize_for_emoji: + print(f" Optimized for emoji (128x128, reduced colors)") + if file_size_mb > 1.0: + print(f"\n Note: Large file size ({file_size_kb:.1f} KB)") + print(" Consider: fewer frames, smaller dimensions, or fewer colors") + + return info + + def clear(self): + """Clear all frames (useful for creating multiple GIFs).""" + self.frames = [] diff --git a/.claude/skills/slack-gif-creator/core/validators.py b/.claude/skills/slack-gif-creator/core/validators.py new file mode 100644 index 0000000..a6f5bdf --- /dev/null +++ b/.claude/skills/slack-gif-creator/core/validators.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python3 +""" +Validators - Check if GIFs meet Slack's requirements. + +These validators help ensure your GIFs meet Slack's size and dimension constraints. +""" + +from pathlib import Path + + +def validate_gif( + gif_path: str | Path, is_emoji: bool = True, verbose: bool = True +) -> tuple[bool, dict]: + """ + Validate GIF for Slack (dimensions, size, frame count). + + Args: + gif_path: Path to GIF file + is_emoji: True for emoji (128x128 recommended), False for message GIF + verbose: Print validation details + + Returns: + Tuple of (passes: bool, results: dict with all details) + """ + from PIL import Image + + gif_path = Path(gif_path) + + if not gif_path.exists(): + return False, {"error": f"File not found: {gif_path}"} + + # Get file size + size_bytes = gif_path.stat().st_size + size_kb = size_bytes / 1024 + size_mb = size_kb / 1024 + + # Get dimensions and frame info + try: + with Image.open(gif_path) as img: + width, height = img.size + + # Count frames + frame_count = 0 + try: + while True: + img.seek(frame_count) + frame_count += 1 + except EOFError: + pass + + # Get duration + try: + duration_ms = img.info.get("duration", 100) + total_duration = (duration_ms * frame_count) / 1000 + fps = frame_count / total_duration if total_duration > 0 else 0 + except: + total_duration = None + fps = None + + except Exception as e: + return False, {"error": f"Failed to read GIF: {e}"} + + # Validate dimensions + if is_emoji: + optimal = width == height == 128 + acceptable = width == height and 64 <= width <= 128 + dim_pass = acceptable + else: + aspect_ratio = ( + max(width, height) / min(width, height) + if min(width, height) > 0 + else float("inf") + ) + dim_pass = aspect_ratio <= 2.0 and 320 <= min(width, height) <= 640 + + results = { + "file": str(gif_path), + "passes": dim_pass, + "width": width, + "height": height, + "size_kb": size_kb, + "size_mb": size_mb, + "frame_count": frame_count, + "duration_seconds": total_duration, + "fps": fps, + "is_emoji": is_emoji, + "optimal": optimal if is_emoji else None, + } + + # Print if verbose + if verbose: + print(f"\nValidating {gif_path.name}:") + print( + f" Dimensions: {width}x{height}" + + ( + f" ({'optimal' if optimal else 'acceptable'})" + if is_emoji and acceptable + else "" + ) + ) + print( + f" Size: {size_kb:.1f} KB" + + (f" ({size_mb:.2f} MB)" if size_mb >= 1.0 else "") + ) + print( + f" Frames: {frame_count}" + + (f" @ {fps:.1f} fps ({total_duration:.1f}s)" if fps else "") + ) + + if not dim_pass: + print( + f" Note: {'Emoji should be 128x128' if is_emoji else 'Unusual dimensions for Slack'}" + ) + + if size_mb > 5.0: + print(f" Note: Large file size - consider fewer frames/colors") + + return dim_pass, results + + +def is_slack_ready( + gif_path: str | Path, is_emoji: bool = True, verbose: bool = True +) -> bool: + """ + Quick check if GIF is ready for Slack. + + Args: + gif_path: Path to GIF file + is_emoji: True for emoji GIF, False for message GIF + verbose: Print feedback + + Returns: + True if dimensions are acceptable + """ + passes, _ = validate_gif(gif_path, is_emoji, verbose) + return passes diff --git a/.claude/skills/slack-gif-creator/requirements.txt b/.claude/skills/slack-gif-creator/requirements.txt new file mode 100644 index 0000000..8bc4493 --- /dev/null +++ b/.claude/skills/slack-gif-creator/requirements.txt @@ -0,0 +1,4 @@ +pillow>=10.0.0 +imageio>=2.31.0 +imageio-ffmpeg>=0.4.9 +numpy>=1.24.0 \ No newline at end of file diff --git a/.claude/skills/theme-factory/LICENSE.txt b/.claude/skills/theme-factory/LICENSE.txt new file mode 100644 index 0000000..7a4a3ea --- /dev/null +++ b/.claude/skills/theme-factory/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/.claude/skills/theme-factory/SKILL.md b/.claude/skills/theme-factory/SKILL.md new file mode 100644 index 0000000..90dfcea --- /dev/null +++ b/.claude/skills/theme-factory/SKILL.md @@ -0,0 +1,59 @@ +--- +name: theme-factory +description: Toolkit for styling artifacts with a theme. These artifacts can be slides, docs, reportings, HTML landing pages, etc. There are 10 pre-set themes with colors/fonts that you can apply to any artifact that has been creating, or can generate a new theme on-the-fly. +license: Complete terms in LICENSE.txt +--- + + +# Theme Factory Skill + +This skill provides a curated collection of professional font and color themes themes, each with carefully selected color palettes and font pairings. Once a theme is chosen, it can be applied to any artifact. + +## Purpose + +To apply consistent, professional styling to presentation slide decks, use this skill. Each theme includes: +- A cohesive color palette with hex codes +- Complementary font pairings for headers and body text +- A distinct visual identity suitable for different contexts and audiences + +## Usage Instructions + +To apply styling to a slide deck or other artifact: + +1. **Show the theme showcase**: Display the `theme-showcase.pdf` file to allow users to see all available themes visually. Do not make any modifications to it; simply show the file for viewing. +2. **Ask for their choice**: Ask which theme to apply to the deck +3. **Wait for selection**: Get explicit confirmation about the chosen theme +4. **Apply the theme**: Once a theme has been chosen, apply the selected theme's colors and fonts to the deck/artifact + +## Themes Available + +The following 10 themes are available, each showcased in `theme-showcase.pdf`: + +1. **Ocean Depths** - Professional and calming maritime theme +2. **Sunset Boulevard** - Warm and vibrant sunset colors +3. **Forest Canopy** - Natural and grounded earth tones +4. **Modern Minimalist** - Clean and contemporary grayscale +5. **Golden Hour** - Rich and warm autumnal palette +6. **Arctic Frost** - Cool and crisp winter-inspired theme +7. **Desert Rose** - Soft and sophisticated dusty tones +8. **Tech Innovation** - Bold and modern tech aesthetic +9. **Botanical Garden** - Fresh and organic garden colors +10. **Midnight Galaxy** - Dramatic and cosmic deep tones + +## Theme Details + +Each theme is defined in the `themes/` directory with complete specifications including: +- Cohesive color palette with hex codes +- Complementary font pairings for headers and body text +- Distinct visual identity suitable for different contexts and audiences + +## Application Process + +After a preferred theme is selected: +1. Read the corresponding theme file from the `themes/` directory +2. Apply the specified colors and fonts consistently throughout the deck +3. Ensure proper contrast and readability +4. Maintain the theme's visual identity across all slides + +## Create your Own Theme +To handle cases where none of the existing themes work for an artifact, create a custom theme. Based on provided inputs, generate a new theme similar to the ones above. Give the theme a similar name describing what the font/color combinations represent. Use any basic description provided to choose appropriate colors/fonts. After generating the theme, show it for review and verification. Following that, apply the theme as described above. diff --git a/.claude/skills/theme-factory/theme-showcase.pdf b/.claude/skills/theme-factory/theme-showcase.pdf new file mode 100644 index 0000000..24495d1 Binary files /dev/null and b/.claude/skills/theme-factory/theme-showcase.pdf differ diff --git a/.claude/skills/theme-factory/themes/arctic-frost.md b/.claude/skills/theme-factory/themes/arctic-frost.md new file mode 100644 index 0000000..e9f1eb0 --- /dev/null +++ b/.claude/skills/theme-factory/themes/arctic-frost.md @@ -0,0 +1,19 @@ +# Arctic Frost + +A cool and crisp winter-inspired theme that conveys clarity, precision, and professionalism. + +## Color Palette + +- **Ice Blue**: `#d4e4f7` - Light backgrounds and highlights +- **Steel Blue**: `#4a6fa5` - Primary accent color +- **Silver**: `#c0c0c0` - Metallic accent elements +- **Crisp White**: `#fafafa` - Clean backgrounds and text + +## Typography + +- **Headers**: DejaVu Sans Bold +- **Body Text**: DejaVu Sans + +## Best Used For + +Healthcare presentations, technology solutions, winter sports, clean tech, pharmaceutical content. diff --git a/.claude/skills/theme-factory/themes/botanical-garden.md b/.claude/skills/theme-factory/themes/botanical-garden.md new file mode 100644 index 0000000..0c95bf7 --- /dev/null +++ b/.claude/skills/theme-factory/themes/botanical-garden.md @@ -0,0 +1,19 @@ +# Botanical Garden + +A fresh and organic theme featuring vibrant garden-inspired colors for lively presentations. + +## Color Palette + +- **Fern Green**: `#4a7c59` - Rich natural green +- **Marigold**: `#f9a620` - Bright floral accent +- **Terracotta**: `#b7472a` - Earthy warm tone +- **Cream**: `#f5f3ed` - Soft neutral backgrounds + +## Typography + +- **Headers**: DejaVu Serif Bold +- **Body Text**: DejaVu Sans + +## Best Used For + +Garden centers, food presentations, farm-to-table content, botanical brands, natural products. diff --git a/.claude/skills/theme-factory/themes/desert-rose.md b/.claude/skills/theme-factory/themes/desert-rose.md new file mode 100644 index 0000000..ea7c74e --- /dev/null +++ b/.claude/skills/theme-factory/themes/desert-rose.md @@ -0,0 +1,19 @@ +# Desert Rose + +A soft and sophisticated theme with dusty, muted tones perfect for elegant presentations. + +## Color Palette + +- **Dusty Rose**: `#d4a5a5` - Soft primary color +- **Clay**: `#b87d6d` - Earthy accent +- **Sand**: `#e8d5c4` - Warm neutral backgrounds +- **Deep Burgundy**: `#5d2e46` - Rich dark contrast + +## Typography + +- **Headers**: FreeSans Bold +- **Body Text**: FreeSans + +## Best Used For + +Fashion presentations, beauty brands, wedding planning, interior design, boutique businesses. diff --git a/.claude/skills/theme-factory/themes/forest-canopy.md b/.claude/skills/theme-factory/themes/forest-canopy.md new file mode 100644 index 0000000..90c2b26 --- /dev/null +++ b/.claude/skills/theme-factory/themes/forest-canopy.md @@ -0,0 +1,19 @@ +# Forest Canopy + +A natural and grounded theme featuring earth tones inspired by dense forest environments. + +## Color Palette + +- **Forest Green**: `#2d4a2b` - Primary dark green +- **Sage**: `#7d8471` - Muted green accent +- **Olive**: `#a4ac86` - Light accent color +- **Ivory**: `#faf9f6` - Backgrounds and text + +## Typography + +- **Headers**: FreeSerif Bold +- **Body Text**: FreeSans + +## Best Used For + +Environmental presentations, sustainability reports, outdoor brands, wellness content, organic products. diff --git a/.claude/skills/theme-factory/themes/golden-hour.md b/.claude/skills/theme-factory/themes/golden-hour.md new file mode 100644 index 0000000..ed8fc25 --- /dev/null +++ b/.claude/skills/theme-factory/themes/golden-hour.md @@ -0,0 +1,19 @@ +# Golden Hour + +A rich and warm autumnal palette that creates an inviting and sophisticated atmosphere. + +## Color Palette + +- **Mustard Yellow**: `#f4a900` - Bold primary accent +- **Terracotta**: `#c1666b` - Warm secondary color +- **Warm Beige**: `#d4b896` - Neutral backgrounds +- **Chocolate Brown**: `#4a403a` - Dark text and anchors + +## Typography + +- **Headers**: FreeSans Bold +- **Body Text**: FreeSans + +## Best Used For + +Restaurant presentations, hospitality brands, fall campaigns, cozy lifestyle content, artisan products. diff --git a/.claude/skills/theme-factory/themes/midnight-galaxy.md b/.claude/skills/theme-factory/themes/midnight-galaxy.md new file mode 100644 index 0000000..97e1c5f --- /dev/null +++ b/.claude/skills/theme-factory/themes/midnight-galaxy.md @@ -0,0 +1,19 @@ +# Midnight Galaxy + +A dramatic and cosmic theme with deep purples and mystical tones for impactful presentations. + +## Color Palette + +- **Deep Purple**: `#2b1e3e` - Rich dark base +- **Cosmic Blue**: `#4a4e8f` - Mystical mid-tone +- **Lavender**: `#a490c2` - Soft accent color +- **Silver**: `#e6e6fa` - Light highlights and text + +## Typography + +- **Headers**: FreeSans Bold +- **Body Text**: FreeSans + +## Best Used For + +Entertainment industry, gaming presentations, nightlife venues, luxury brands, creative agencies. diff --git a/.claude/skills/theme-factory/themes/modern-minimalist.md b/.claude/skills/theme-factory/themes/modern-minimalist.md new file mode 100644 index 0000000..6bd26a2 --- /dev/null +++ b/.claude/skills/theme-factory/themes/modern-minimalist.md @@ -0,0 +1,19 @@ +# Modern Minimalist + +A clean and contemporary theme with a sophisticated grayscale palette for maximum versatility. + +## Color Palette + +- **Charcoal**: `#36454f` - Primary dark color +- **Slate Gray**: `#708090` - Medium gray for accents +- **Light Gray**: `#d3d3d3` - Backgrounds and dividers +- **White**: `#ffffff` - Text and clean backgrounds + +## Typography + +- **Headers**: DejaVu Sans Bold +- **Body Text**: DejaVu Sans + +## Best Used For + +Tech presentations, architecture portfolios, design showcases, modern business proposals, data visualization. diff --git a/.claude/skills/theme-factory/themes/ocean-depths.md b/.claude/skills/theme-factory/themes/ocean-depths.md new file mode 100644 index 0000000..b675126 --- /dev/null +++ b/.claude/skills/theme-factory/themes/ocean-depths.md @@ -0,0 +1,19 @@ +# Ocean Depths + +A professional and calming maritime theme that evokes the serenity of deep ocean waters. + +## Color Palette + +- **Deep Navy**: `#1a2332` - Primary background color +- **Teal**: `#2d8b8b` - Accent color for highlights and emphasis +- **Seafoam**: `#a8dadc` - Secondary accent for lighter elements +- **Cream**: `#f1faee` - Text and light backgrounds + +## Typography + +- **Headers**: DejaVu Sans Bold +- **Body Text**: DejaVu Sans + +## Best Used For + +Corporate presentations, financial reports, professional consulting decks, trust-building content. diff --git a/.claude/skills/theme-factory/themes/sunset-boulevard.md b/.claude/skills/theme-factory/themes/sunset-boulevard.md new file mode 100644 index 0000000..df799a0 --- /dev/null +++ b/.claude/skills/theme-factory/themes/sunset-boulevard.md @@ -0,0 +1,19 @@ +# Sunset Boulevard + +A warm and vibrant theme inspired by golden hour sunsets, perfect for energetic and creative presentations. + +## Color Palette + +- **Burnt Orange**: `#e76f51` - Primary accent color +- **Coral**: `#f4a261` - Secondary warm accent +- **Warm Sand**: `#e9c46a` - Highlighting and backgrounds +- **Deep Purple**: `#264653` - Dark contrast and text + +## Typography + +- **Headers**: DejaVu Serif Bold +- **Body Text**: DejaVu Sans + +## Best Used For + +Creative pitches, marketing presentations, lifestyle brands, event promotions, inspirational content. diff --git a/.claude/skills/theme-factory/themes/tech-innovation.md b/.claude/skills/theme-factory/themes/tech-innovation.md new file mode 100644 index 0000000..e029a43 --- /dev/null +++ b/.claude/skills/theme-factory/themes/tech-innovation.md @@ -0,0 +1,19 @@ +# Tech Innovation + +A bold and modern theme with high-contrast colors perfect for cutting-edge technology presentations. + +## Color Palette + +- **Electric Blue**: `#0066ff` - Vibrant primary accent +- **Neon Cyan**: `#00ffff` - Bright highlight color +- **Dark Gray**: `#1e1e1e` - Deep backgrounds +- **White**: `#ffffff` - Clean text and contrast + +## Typography + +- **Headers**: DejaVu Sans Bold +- **Body Text**: DejaVu Sans + +## Best Used For + +Tech startups, software launches, innovation showcases, AI/ML presentations, digital transformation content. diff --git a/.claude/skills/web-artifacts-builder/LICENSE.txt b/.claude/skills/web-artifacts-builder/LICENSE.txt new file mode 100644 index 0000000..7a4a3ea --- /dev/null +++ b/.claude/skills/web-artifacts-builder/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/.claude/skills/web-artifacts-builder/SKILL.md b/.claude/skills/web-artifacts-builder/SKILL.md new file mode 100644 index 0000000..8b39b19 --- /dev/null +++ b/.claude/skills/web-artifacts-builder/SKILL.md @@ -0,0 +1,74 @@ +--- +name: web-artifacts-builder +description: Suite of tools for creating elaborate, multi-component claude.ai HTML artifacts using modern frontend web technologies (React, Tailwind CSS, shadcn/ui). Use for complex artifacts requiring state management, routing, or shadcn/ui components - not for simple single-file HTML/JSX artifacts. +license: Complete terms in LICENSE.txt +--- + +# Web Artifacts Builder + +To build powerful frontend claude.ai artifacts, follow these steps: +1. Initialize the frontend repo using `scripts/init-artifact.sh` +2. Develop your artifact by editing the generated code +3. Bundle all code into a single HTML file using `scripts/bundle-artifact.sh` +4. Display artifact to user +5. (Optional) Test the artifact + +**Stack**: React 18 + TypeScript + Vite + Parcel (bundling) + Tailwind CSS + shadcn/ui + +## Design & Style Guidelines + +VERY IMPORTANT: To avoid what is often referred to as "AI slop", avoid using excessive centered layouts, purple gradients, uniform rounded corners, and Inter font. + +## Quick Start + +### Step 1: Initialize Project + +Run the initialization script to create a new React project: +```bash +bash scripts/init-artifact.sh +cd +``` + +This creates a fully configured project with: +- ✅ React + TypeScript (via Vite) +- ✅ Tailwind CSS 3.4.1 with shadcn/ui theming system +- ✅ Path aliases (`@/`) configured +- ✅ 40+ shadcn/ui components pre-installed +- ✅ All Radix UI dependencies included +- ✅ Parcel configured for bundling (via .parcelrc) +- ✅ Node 18+ compatibility (auto-detects and pins Vite version) + +### Step 2: Develop Your Artifact + +To build the artifact, edit the generated files. See **Common Development Tasks** below for guidance. + +### Step 3: Bundle to Single HTML File + +To bundle the React app into a single HTML artifact: +```bash +bash scripts/bundle-artifact.sh +``` + +This creates `bundle.html` - a self-contained artifact with all JavaScript, CSS, and dependencies inlined. This file can be directly shared in Claude conversations as an artifact. + +**Requirements**: Your project must have an `index.html` in the root directory. + +**What the script does**: +- Installs bundling dependencies (parcel, @parcel/config-default, parcel-resolver-tspaths, html-inline) +- Creates `.parcelrc` config with path alias support +- Builds with Parcel (no source maps) +- Inlines all assets into single HTML using html-inline + +### Step 4: Share Artifact with User + +Finally, share the bundled HTML file in conversation with the user so they can view it as an artifact. + +### Step 5: Testing/Visualizing the Artifact (Optional) + +Note: This is a completely optional step. Only perform if necessary or requested. + +To test/visualize the artifact, use available tools (including other Skills or built-in tools like Playwright or Puppeteer). In general, avoid testing the artifact upfront as it adds latency between the request and when the finished artifact can be seen. Test later, after presenting the artifact, if requested or if issues arise. + +## Reference + +- **shadcn/ui components**: https://ui.shadcn.com/docs/components \ No newline at end of file diff --git a/.claude/skills/web-artifacts-builder/scripts/bundle-artifact.sh b/.claude/skills/web-artifacts-builder/scripts/bundle-artifact.sh new file mode 100644 index 0000000..c13d229 --- /dev/null +++ b/.claude/skills/web-artifacts-builder/scripts/bundle-artifact.sh @@ -0,0 +1,54 @@ +#!/bin/bash +set -e + +echo "📦 Bundling React app to single HTML artifact..." + +# Check if we're in a project directory +if [ ! -f "package.json" ]; then + echo "❌ Error: No package.json found. Run this script from your project root." + exit 1 +fi + +# Check if index.html exists +if [ ! -f "index.html" ]; then + echo "❌ Error: No index.html found in project root." + echo " This script requires an index.html entry point." + exit 1 +fi + +# Install bundling dependencies +echo "📦 Installing bundling dependencies..." +pnpm add -D parcel @parcel/config-default parcel-resolver-tspaths html-inline + +# Create Parcel config with tspaths resolver +if [ ! -f ".parcelrc" ]; then + echo "🔧 Creating Parcel configuration with path alias support..." + cat > .parcelrc << 'EOF' +{ + "extends": "@parcel/config-default", + "resolvers": ["parcel-resolver-tspaths", "..."] +} +EOF +fi + +# Clean previous build +echo "🧹 Cleaning previous build..." +rm -rf dist bundle.html + +# Build with Parcel +echo "🔨 Building with Parcel..." +pnpm exec parcel build index.html --dist-dir dist --no-source-maps + +# Inline everything into single HTML +echo "🎯 Inlining all assets into single HTML file..." +pnpm exec html-inline dist/index.html > bundle.html + +# Get file size +FILE_SIZE=$(du -h bundle.html | cut -f1) + +echo "" +echo "✅ Bundle complete!" +echo "📄 Output: bundle.html ($FILE_SIZE)" +echo "" +echo "You can now use this single HTML file as an artifact in Claude conversations." +echo "To test locally: open bundle.html in your browser" \ No newline at end of file diff --git a/.claude/skills/web-artifacts-builder/scripts/init-artifact.sh b/.claude/skills/web-artifacts-builder/scripts/init-artifact.sh new file mode 100644 index 0000000..7d1022d --- /dev/null +++ b/.claude/skills/web-artifacts-builder/scripts/init-artifact.sh @@ -0,0 +1,322 @@ +#!/bin/bash + +# Exit on error +set -e + +# Detect Node version +NODE_VERSION=$(node -v | cut -d'v' -f2 | cut -d'.' -f1) + +echo "🔍 Detected Node.js version: $NODE_VERSION" + +if [ "$NODE_VERSION" -lt 18 ]; then + echo "❌ Error: Node.js 18 or higher is required" + echo " Current version: $(node -v)" + exit 1 +fi + +# Set Vite version based on Node version +if [ "$NODE_VERSION" -ge 20 ]; then + VITE_VERSION="latest" + echo "✅ Using Vite latest (Node 20+)" +else + VITE_VERSION="5.4.11" + echo "✅ Using Vite $VITE_VERSION (Node 18 compatible)" +fi + +# Detect OS and set sed syntax +if [[ "$OSTYPE" == "darwin"* ]]; then + SED_INPLACE="sed -i ''" +else + SED_INPLACE="sed -i" +fi + +# Check if pnpm is installed +if ! command -v pnpm &> /dev/null; then + echo "📦 pnpm not found. Installing pnpm..." + npm install -g pnpm +fi + +# Check if project name is provided +if [ -z "$1" ]; then + echo "❌ Usage: ./create-react-shadcn-complete.sh " + exit 1 +fi + +PROJECT_NAME="$1" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +COMPONENTS_TARBALL="$SCRIPT_DIR/shadcn-components.tar.gz" + +# Check if components tarball exists +if [ ! -f "$COMPONENTS_TARBALL" ]; then + echo "❌ Error: shadcn-components.tar.gz not found in script directory" + echo " Expected location: $COMPONENTS_TARBALL" + exit 1 +fi + +echo "🚀 Creating new React + Vite project: $PROJECT_NAME" + +# Create new Vite project (always use latest create-vite, pin vite version later) +pnpm create vite "$PROJECT_NAME" --template react-ts + +# Navigate into project directory +cd "$PROJECT_NAME" + +echo "🧹 Cleaning up Vite template..." +$SED_INPLACE '/.*<\/title>/'"$PROJECT_NAME"'<\/title>/' index.html + +echo "📦 Installing base dependencies..." +pnpm install + +# Pin Vite version for Node 18 +if [ "$NODE_VERSION" -lt 20 ]; then + echo "📌 Pinning Vite to $VITE_VERSION for Node 18 compatibility..." + pnpm add -D vite@$VITE_VERSION +fi + +echo "📦 Installing Tailwind CSS and dependencies..." +pnpm install -D tailwindcss@3.4.1 postcss autoprefixer @types/node tailwindcss-animate +pnpm install class-variance-authority clsx tailwind-merge lucide-react next-themes + +echo "⚙️ Creating Tailwind and PostCSS configuration..." +cat > postcss.config.js << 'EOF' +export default { + plugins: { + tailwindcss: {}, + autoprefixer: {}, + }, +} +EOF + +echo "📝 Configuring Tailwind with shadcn theme..." +cat > tailwind.config.js << 'EOF' +/** @type {import('tailwindcss').Config} */ +module.exports = { + darkMode: ["class"], + content: [ + "./index.html", + "./src/**/*.{js,ts,jsx,tsx}", + ], + theme: { + extend: { + colors: { + border: "hsl(var(--border))", + input: "hsl(var(--input))", + ring: "hsl(var(--ring))", + background: "hsl(var(--background))", + foreground: "hsl(var(--foreground))", + primary: { + DEFAULT: "hsl(var(--primary))", + foreground: "hsl(var(--primary-foreground))", + }, + secondary: { + DEFAULT: "hsl(var(--secondary))", + foreground: "hsl(var(--secondary-foreground))", + }, + destructive: { + DEFAULT: "hsl(var(--destructive))", + foreground: "hsl(var(--destructive-foreground))", + }, + muted: { + DEFAULT: "hsl(var(--muted))", + foreground: "hsl(var(--muted-foreground))", + }, + accent: { + DEFAULT: "hsl(var(--accent))", + foreground: "hsl(var(--accent-foreground))", + }, + popover: { + DEFAULT: "hsl(var(--popover))", + foreground: "hsl(var(--popover-foreground))", + }, + card: { + DEFAULT: "hsl(var(--card))", + foreground: "hsl(var(--card-foreground))", + }, + }, + borderRadius: { + lg: "var(--radius)", + md: "calc(var(--radius) - 2px)", + sm: "calc(var(--radius) - 4px)", + }, + keyframes: { + "accordion-down": { + from: { height: "0" }, + to: { height: "var(--radix-accordion-content-height)" }, + }, + "accordion-up": { + from: { height: "var(--radix-accordion-content-height)" }, + to: { height: "0" }, + }, + }, + animation: { + "accordion-down": "accordion-down 0.2s ease-out", + "accordion-up": "accordion-up 0.2s ease-out", + }, + }, + }, + plugins: [require("tailwindcss-animate")], +} +EOF + +# Add Tailwind directives and CSS variables to index.css +echo "🎨 Adding Tailwind directives and CSS variables..." +cat > src/index.css << 'EOF' +@tailwind base; +@tailwind components; +@tailwind utilities; + +@layer base { + :root { + --background: 0 0% 100%; + --foreground: 0 0% 3.9%; + --card: 0 0% 100%; + --card-foreground: 0 0% 3.9%; + --popover: 0 0% 100%; + --popover-foreground: 0 0% 3.9%; + --primary: 0 0% 9%; + --primary-foreground: 0 0% 98%; + --secondary: 0 0% 96.1%; + --secondary-foreground: 0 0% 9%; + --muted: 0 0% 96.1%; + --muted-foreground: 0 0% 45.1%; + --accent: 0 0% 96.1%; + --accent-foreground: 0 0% 9%; + --destructive: 0 84.2% 60.2%; + --destructive-foreground: 0 0% 98%; + --border: 0 0% 89.8%; + --input: 0 0% 89.8%; + --ring: 0 0% 3.9%; + --radius: 0.5rem; + } + + .dark { + --background: 0 0% 3.9%; + --foreground: 0 0% 98%; + --card: 0 0% 3.9%; + --card-foreground: 0 0% 98%; + --popover: 0 0% 3.9%; + --popover-foreground: 0 0% 98%; + --primary: 0 0% 98%; + --primary-foreground: 0 0% 9%; + --secondary: 0 0% 14.9%; + --secondary-foreground: 0 0% 98%; + --muted: 0 0% 14.9%; + --muted-foreground: 0 0% 63.9%; + --accent: 0 0% 14.9%; + --accent-foreground: 0 0% 98%; + --destructive: 0 62.8% 30.6%; + --destructive-foreground: 0 0% 98%; + --border: 0 0% 14.9%; + --input: 0 0% 14.9%; + --ring: 0 0% 83.1%; + } +} + +@layer base { + * { + @apply border-border; + } + body { + @apply bg-background text-foreground; + } +} +EOF + +# Add path aliases to tsconfig.json +echo "🔧 Adding path aliases to tsconfig.json..." +node -e " +const fs = require('fs'); +const config = JSON.parse(fs.readFileSync('tsconfig.json', 'utf8')); +config.compilerOptions = config.compilerOptions || {}; +config.compilerOptions.baseUrl = '.'; +config.compilerOptions.paths = { '@/*': ['./src/*'] }; +fs.writeFileSync('tsconfig.json', JSON.stringify(config, null, 2)); +" + +# Add path aliases to tsconfig.app.json +echo "🔧 Adding path aliases to tsconfig.app.json..." +node -e " +const fs = require('fs'); +const path = 'tsconfig.app.json'; +const content = fs.readFileSync(path, 'utf8'); +// Remove comments manually +const lines = content.split('\n').filter(line => !line.trim().startsWith('//')); +const jsonContent = lines.join('\n'); +const config = JSON.parse(jsonContent.replace(/\/\*[\s\S]*?\*\//g, '').replace(/,(\s*[}\]])/g, '\$1')); +config.compilerOptions = config.compilerOptions || {}; +config.compilerOptions.baseUrl = '.'; +config.compilerOptions.paths = { '@/*': ['./src/*'] }; +fs.writeFileSync(path, JSON.stringify(config, null, 2)); +" + +# Update vite.config.ts +echo "⚙️ Updating Vite configuration..." +cat > vite.config.ts << 'EOF' +import path from "path"; +import react from "@vitejs/plugin-react"; +import { defineConfig } from "vite"; + +export default defineConfig({ + plugins: [react()], + resolve: { + alias: { + "@": path.resolve(__dirname, "./src"), + }, + }, +}); +EOF + +# Install all shadcn/ui dependencies +echo "📦 Installing shadcn/ui dependencies..." +pnpm install @radix-ui/react-accordion @radix-ui/react-aspect-ratio @radix-ui/react-avatar @radix-ui/react-checkbox @radix-ui/react-collapsible @radix-ui/react-context-menu @radix-ui/react-dialog @radix-ui/react-dropdown-menu @radix-ui/react-hover-card @radix-ui/react-label @radix-ui/react-menubar @radix-ui/react-navigation-menu @radix-ui/react-popover @radix-ui/react-progress @radix-ui/react-radio-group @radix-ui/react-scroll-area @radix-ui/react-select @radix-ui/react-separator @radix-ui/react-slider @radix-ui/react-slot @radix-ui/react-switch @radix-ui/react-tabs @radix-ui/react-toast @radix-ui/react-toggle @radix-ui/react-toggle-group @radix-ui/react-tooltip +pnpm install sonner cmdk vaul embla-carousel-react react-day-picker react-resizable-panels date-fns react-hook-form @hookform/resolvers zod + +# Extract shadcn components from tarball +echo "📦 Extracting shadcn/ui components..." +tar -xzf "$COMPONENTS_TARBALL" -C src/ + +# Create components.json for reference +echo "📝 Creating components.json config..." +cat > components.json << 'EOF' +{ + "$schema": "https://ui.shadcn.com/schema.json", + "style": "default", + "rsc": false, + "tsx": true, + "tailwind": { + "config": "tailwind.config.js", + "css": "src/index.css", + "baseColor": "slate", + "cssVariables": true, + "prefix": "" + }, + "aliases": { + "components": "@/components", + "utils": "@/lib/utils", + "ui": "@/components/ui", + "lib": "@/lib", + "hooks": "@/hooks" + } +} +EOF + +echo "✅ Setup complete! You can now use Tailwind CSS and shadcn/ui in your project." +echo "" +echo "📦 Included components (40+ total):" +echo " - accordion, alert, aspect-ratio, avatar, badge, breadcrumb" +echo " - button, calendar, card, carousel, checkbox, collapsible" +echo " - command, context-menu, dialog, drawer, dropdown-menu" +echo " - form, hover-card, input, label, menubar, navigation-menu" +echo " - popover, progress, radio-group, resizable, scroll-area" +echo " - select, separator, sheet, skeleton, slider, sonner" +echo " - switch, table, tabs, textarea, toast, toggle, toggle-group, tooltip" +echo "" +echo "To start developing:" +echo " cd $PROJECT_NAME" +echo " pnpm dev" +echo "" +echo "📚 Import components like:" +echo " import { Button } from '@/components/ui/button'" +echo " import { Card, CardHeader, CardTitle, CardContent } from '@/components/ui/card'" +echo " import { Dialog, DialogContent, DialogTrigger } from '@/components/ui/dialog'" diff --git a/.claude/skills/web-artifacts-builder/scripts/shadcn-components.tar.gz b/.claude/skills/web-artifacts-builder/scripts/shadcn-components.tar.gz new file mode 100644 index 0000000..cdbe7cd Binary files /dev/null and b/.claude/skills/web-artifacts-builder/scripts/shadcn-components.tar.gz differ diff --git a/.claude/skills/webapp-testing/LICENSE.txt b/.claude/skills/webapp-testing/LICENSE.txt new file mode 100644 index 0000000..7a4a3ea --- /dev/null +++ b/.claude/skills/webapp-testing/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/.claude/skills/webapp-testing/SKILL.md b/.claude/skills/webapp-testing/SKILL.md new file mode 100644 index 0000000..4726215 --- /dev/null +++ b/.claude/skills/webapp-testing/SKILL.md @@ -0,0 +1,96 @@ +--- +name: webapp-testing +description: Toolkit for interacting with and testing local web applications using Playwright. Supports verifying frontend functionality, debugging UI behavior, capturing browser screenshots, and viewing browser logs. +license: Complete terms in LICENSE.txt +--- + +# Web Application Testing + +To test local web applications, write native Python Playwright scripts. + +**Helper Scripts Available**: +- `scripts/with_server.py` - Manages server lifecycle (supports multiple servers) + +**Always run scripts with `--help` first** to see usage. DO NOT read the source until you try running the script first and find that a customized solution is abslutely necessary. These scripts can be very large and thus pollute your context window. They exist to be called directly as black-box scripts rather than ingested into your context window. + +## Decision Tree: Choosing Your Approach + +``` +User task → Is it static HTML? + ├─ Yes → Read HTML file directly to identify selectors + │ ├─ Success → Write Playwright script using selectors + │ └─ Fails/Incomplete → Treat as dynamic (below) + │ + └─ No (dynamic webapp) → Is the server already running? + ├─ No → Run: python scripts/with_server.py --help + │ Then use the helper + write simplified Playwright script + │ + └─ Yes → Reconnaissance-then-action: + 1. Navigate and wait for networkidle + 2. Take screenshot or inspect DOM + 3. Identify selectors from rendered state + 4. Execute actions with discovered selectors +``` + +## Example: Using with_server.py + +To start a server, run `--help` first, then use the helper: + +**Single server:** +```bash +python scripts/with_server.py --server "npm run dev" --port 5173 -- python your_automation.py +``` + +**Multiple servers (e.g., backend + frontend):** +```bash +python scripts/with_server.py \ + --server "cd backend && python server.py" --port 3000 \ + --server "cd frontend && npm run dev" --port 5173 \ + -- python your_automation.py +``` + +To create an automation script, include only Playwright logic (servers are managed automatically): +```python +from playwright.sync_api import sync_playwright + +with sync_playwright() as p: + browser = p.chromium.launch(headless=True) # Always launch chromium in headless mode + page = browser.new_page() + page.goto('http://localhost:5173') # Server already running and ready + page.wait_for_load_state('networkidle') # CRITICAL: Wait for JS to execute + # ... your automation logic + browser.close() +``` + +## Reconnaissance-Then-Action Pattern + +1. **Inspect rendered DOM**: + ```python + page.screenshot(path='/tmp/inspect.png', full_page=True) + content = page.content() + page.locator('button').all() + ``` + +2. **Identify selectors** from inspection results + +3. **Execute actions** using discovered selectors + +## Common Pitfall + +❌ **Don't** inspect the DOM before waiting for `networkidle` on dynamic apps +✅ **Do** wait for `page.wait_for_load_state('networkidle')` before inspection + +## Best Practices + +- **Use bundled scripts as black boxes** - To accomplish a task, consider whether one of the scripts available in `scripts/` can help. These scripts handle common, complex workflows reliably without cluttering the context window. Use `--help` to see usage, then invoke directly. +- Use `sync_playwright()` for synchronous scripts +- Always close the browser when done +- Use descriptive selectors: `text=`, `role=`, CSS selectors, or IDs +- Add appropriate waits: `page.wait_for_selector()` or `page.wait_for_timeout()` + +## Reference Files + +- **examples/** - Examples showing common patterns: + - `element_discovery.py` - Discovering buttons, links, and inputs on a page + - `static_html_automation.py` - Using file:// URLs for local HTML + - `console_logging.py` - Capturing console logs during automation \ No newline at end of file diff --git a/.claude/skills/webapp-testing/examples/console_logging.py b/.claude/skills/webapp-testing/examples/console_logging.py new file mode 100644 index 0000000..9329b5e --- /dev/null +++ b/.claude/skills/webapp-testing/examples/console_logging.py @@ -0,0 +1,35 @@ +from playwright.sync_api import sync_playwright + +# Example: Capturing console logs during browser automation + +url = 'http://localhost:5173' # Replace with your URL + +console_logs = [] + +with sync_playwright() as p: + browser = p.chromium.launch(headless=True) + page = browser.new_page(viewport={'width': 1920, 'height': 1080}) + + # Set up console log capture + def handle_console_message(msg): + console_logs.append(f"[{msg.type}] {msg.text}") + print(f"Console: [{msg.type}] {msg.text}") + + page.on("console", handle_console_message) + + # Navigate to page + page.goto(url) + page.wait_for_load_state('networkidle') + + # Interact with the page (triggers console logs) + page.click('text=Dashboard') + page.wait_for_timeout(1000) + + browser.close() + +# Save console logs to file +with open('/mnt/user-data/outputs/console.log', 'w') as f: + f.write('\n'.join(console_logs)) + +print(f"\nCaptured {len(console_logs)} console messages") +print(f"Logs saved to: /mnt/user-data/outputs/console.log") \ No newline at end of file diff --git a/.claude/skills/webapp-testing/examples/element_discovery.py b/.claude/skills/webapp-testing/examples/element_discovery.py new file mode 100644 index 0000000..917ba72 --- /dev/null +++ b/.claude/skills/webapp-testing/examples/element_discovery.py @@ -0,0 +1,40 @@ +from playwright.sync_api import sync_playwright + +# Example: Discovering buttons and other elements on a page + +with sync_playwright() as p: + browser = p.chromium.launch(headless=True) + page = browser.new_page() + + # Navigate to page and wait for it to fully load + page.goto('http://localhost:5173') + page.wait_for_load_state('networkidle') + + # Discover all buttons on the page + buttons = page.locator('button').all() + print(f"Found {len(buttons)} buttons:") + for i, button in enumerate(buttons): + text = button.inner_text() if button.is_visible() else "[hidden]" + print(f" [{i}] {text}") + + # Discover links + links = page.locator('a[href]').all() + print(f"\nFound {len(links)} links:") + for link in links[:5]: # Show first 5 + text = link.inner_text().strip() + href = link.get_attribute('href') + print(f" - {text} -> {href}") + + # Discover input fields + inputs = page.locator('input, textarea, select').all() + print(f"\nFound {len(inputs)} input fields:") + for input_elem in inputs: + name = input_elem.get_attribute('name') or input_elem.get_attribute('id') or "[unnamed]" + input_type = input_elem.get_attribute('type') or 'text' + print(f" - {name} ({input_type})") + + # Take screenshot for visual reference + page.screenshot(path='/tmp/page_discovery.png', full_page=True) + print("\nScreenshot saved to /tmp/page_discovery.png") + + browser.close() \ No newline at end of file diff --git a/.claude/skills/webapp-testing/examples/static_html_automation.py b/.claude/skills/webapp-testing/examples/static_html_automation.py new file mode 100644 index 0000000..90bbedc --- /dev/null +++ b/.claude/skills/webapp-testing/examples/static_html_automation.py @@ -0,0 +1,33 @@ +from playwright.sync_api import sync_playwright +import os + +# Example: Automating interaction with static HTML files using file:// URLs + +html_file_path = os.path.abspath('path/to/your/file.html') +file_url = f'file://{html_file_path}' + +with sync_playwright() as p: + browser = p.chromium.launch(headless=True) + page = browser.new_page(viewport={'width': 1920, 'height': 1080}) + + # Navigate to local HTML file + page.goto(file_url) + + # Take screenshot + page.screenshot(path='/mnt/user-data/outputs/static_page.png', full_page=True) + + # Interact with elements + page.click('text=Click Me') + page.fill('#name', 'John Doe') + page.fill('#email', 'john@example.com') + + # Submit form + page.click('button[type="submit"]') + page.wait_for_timeout(500) + + # Take final screenshot + page.screenshot(path='/mnt/user-data/outputs/after_submit.png', full_page=True) + + browser.close() + +print("Static HTML automation completed!") \ No newline at end of file diff --git a/.claude/skills/webapp-testing/scripts/with_server.py b/.claude/skills/webapp-testing/scripts/with_server.py new file mode 100644 index 0000000..431f2eb --- /dev/null +++ b/.claude/skills/webapp-testing/scripts/with_server.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python3 +""" +Start one or more servers, wait for them to be ready, run a command, then clean up. + +Usage: + # Single server + python scripts/with_server.py --server "npm run dev" --port 5173 -- python automation.py + python scripts/with_server.py --server "npm start" --port 3000 -- python test.py + + # Multiple servers + python scripts/with_server.py \ + --server "cd backend && python server.py" --port 3000 \ + --server "cd frontend && npm run dev" --port 5173 \ + -- python test.py +""" + +import subprocess +import socket +import time +import sys +import argparse + +def is_server_ready(port, timeout=30): + """Wait for server to be ready by polling the port.""" + start_time = time.time() + while time.time() - start_time < timeout: + try: + with socket.create_connection(('localhost', port), timeout=1): + return True + except (socket.error, ConnectionRefusedError): + time.sleep(0.5) + return False + + +def main(): + parser = argparse.ArgumentParser(description='Run command with one or more servers') + parser.add_argument('--server', action='append', dest='servers', required=True, help='Server command (can be repeated)') + parser.add_argument('--port', action='append', dest='ports', type=int, required=True, help='Port for each server (must match --server count)') + parser.add_argument('--timeout', type=int, default=30, help='Timeout in seconds per server (default: 30)') + parser.add_argument('command', nargs=argparse.REMAINDER, help='Command to run after server(s) ready') + + args = parser.parse_args() + + # Remove the '--' separator if present + if args.command and args.command[0] == '--': + args.command = args.command[1:] + + if not args.command: + print("Error: No command specified to run") + sys.exit(1) + + # Parse server configurations + if len(args.servers) != len(args.ports): + print("Error: Number of --server and --port arguments must match") + sys.exit(1) + + servers = [] + for cmd, port in zip(args.servers, args.ports): + servers.append({'cmd': cmd, 'port': port}) + + server_processes = [] + + try: + # Start all servers + for i, server in enumerate(servers): + print(f"Starting server {i+1}/{len(servers)}: {server['cmd']}") + + # Use shell=True to support commands with cd and && + process = subprocess.Popen( + server['cmd'], + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + server_processes.append(process) + + # Wait for this server to be ready + print(f"Waiting for server on port {server['port']}...") + if not is_server_ready(server['port'], timeout=args.timeout): + raise RuntimeError(f"Server failed to start on port {server['port']} within {args.timeout}s") + + print(f"Server ready on port {server['port']}") + + print(f"\nAll {len(servers)} server(s) ready") + + # Run the command + print(f"Running: {' '.join(args.command)}\n") + result = subprocess.run(args.command) + sys.exit(result.returncode) + + finally: + # Clean up all servers + print(f"\nStopping {len(server_processes)} server(s)...") + for i, process in enumerate(server_processes): + try: + process.terminate() + process.wait(timeout=5) + except subprocess.TimeoutExpired: + process.kill() + process.wait() + print(f"Server {i+1} stopped") + print("All servers stopped") + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/.claude/skills/xlsx/LICENSE.txt b/.claude/skills/xlsx/LICENSE.txt new file mode 100644 index 0000000..c55ab42 --- /dev/null +++ b/.claude/skills/xlsx/LICENSE.txt @@ -0,0 +1,30 @@ +© 2025 Anthropic, PBC. All rights reserved. + +LICENSE: Use of these materials (including all code, prompts, assets, files, +and other components of this Skill) is governed by your agreement with +Anthropic regarding use of Anthropic's services. If no separate agreement +exists, use is governed by Anthropic's Consumer Terms of Service or +Commercial Terms of Service, as applicable: +https://www.anthropic.com/legal/consumer-terms +https://www.anthropic.com/legal/commercial-terms +Your applicable agreement is referred to as the "Agreement." "Services" are +as defined in the Agreement. + +ADDITIONAL RESTRICTIONS: Notwithstanding anything in the Agreement to the +contrary, users may not: + +- Extract these materials from the Services or retain copies of these + materials outside the Services +- Reproduce or copy these materials, except for temporary copies created + automatically during authorized use of the Services +- Create derivative works based on these materials +- Distribute, sublicense, or transfer these materials to any third party +- Make, offer to sell, sell, or import any inventions embodied in these + materials +- Reverse engineer, decompile, or disassemble these materials + +The receipt, viewing, or possession of these materials does not convey or +imply any license or right beyond those expressly granted above. + +Anthropic retains all right, title, and interest in these materials, +including all copyrights, patents, and other intellectual property rights. diff --git a/.claude/skills/xlsx/SKILL.md b/.claude/skills/xlsx/SKILL.md new file mode 100644 index 0000000..22db189 --- /dev/null +++ b/.claude/skills/xlsx/SKILL.md @@ -0,0 +1,289 @@ +--- +name: xlsx +description: "Comprehensive spreadsheet creation, editing, and analysis with support for formulas, formatting, data analysis, and visualization. When Claude needs to work with spreadsheets (.xlsx, .xlsm, .csv, .tsv, etc) for: (1) Creating new spreadsheets with formulas and formatting, (2) Reading or analyzing data, (3) Modify existing spreadsheets while preserving formulas, (4) Data analysis and visualization in spreadsheets, or (5) Recalculating formulas" +license: Proprietary. LICENSE.txt has complete terms +--- + +# Requirements for Outputs + +## All Excel files + +### Zero Formula Errors +- Every Excel model MUST be delivered with ZERO formula errors (#REF!, #DIV/0!, #VALUE!, #N/A, #NAME?) + +### Preserve Existing Templates (when updating templates) +- Study and EXACTLY match existing format, style, and conventions when modifying files +- Never impose standardized formatting on files with established patterns +- Existing template conventions ALWAYS override these guidelines + +## Financial models + +### Color Coding Standards +Unless otherwise stated by the user or existing template + +#### Industry-Standard Color Conventions +- **Blue text (RGB: 0,0,255)**: Hardcoded inputs, and numbers users will change for scenarios +- **Black text (RGB: 0,0,0)**: ALL formulas and calculations +- **Green text (RGB: 0,128,0)**: Links pulling from other worksheets within same workbook +- **Red text (RGB: 255,0,0)**: External links to other files +- **Yellow background (RGB: 255,255,0)**: Key assumptions needing attention or cells that need to be updated + +### Number Formatting Standards + +#### Required Format Rules +- **Years**: Format as text strings (e.g., "2024" not "2,024") +- **Currency**: Use $#,##0 format; ALWAYS specify units in headers ("Revenue ($mm)") +- **Zeros**: Use number formatting to make all zeros "-", including percentages (e.g., "$#,##0;($#,##0);-") +- **Percentages**: Default to 0.0% format (one decimal) +- **Multiples**: Format as 0.0x for valuation multiples (EV/EBITDA, P/E) +- **Negative numbers**: Use parentheses (123) not minus -123 + +### Formula Construction Rules + +#### Assumptions Placement +- Place ALL assumptions (growth rates, margins, multiples, etc.) in separate assumption cells +- Use cell references instead of hardcoded values in formulas +- Example: Use =B5*(1+$B$6) instead of =B5*1.05 + +#### Formula Error Prevention +- Verify all cell references are correct +- Check for off-by-one errors in ranges +- Ensure consistent formulas across all projection periods +- Test with edge cases (zero values, negative numbers) +- Verify no unintended circular references + +#### Documentation Requirements for Hardcodes +- Comment or in cells beside (if end of table). Format: "Source: [System/Document], [Date], [Specific Reference], [URL if applicable]" +- Examples: + - "Source: Company 10-K, FY2024, Page 45, Revenue Note, [SEC EDGAR URL]" + - "Source: Company 10-Q, Q2 2025, Exhibit 99.1, [SEC EDGAR URL]" + - "Source: Bloomberg Terminal, 8/15/2025, AAPL US Equity" + - "Source: FactSet, 8/20/2025, Consensus Estimates Screen" + +# XLSX creation, editing, and analysis + +## Overview + +A user may ask you to create, edit, or analyze the contents of an .xlsx file. You have different tools and workflows available for different tasks. + +## Important Requirements + +**LibreOffice Required for Formula Recalculation**: You can assume LibreOffice is installed for recalculating formula values using the `recalc.py` script. The script automatically configures LibreOffice on first run + +## Reading and analyzing data + +### Data analysis with pandas +For data analysis, visualization, and basic operations, use **pandas** which provides powerful data manipulation capabilities: + +```python +import pandas as pd + +# Read Excel +df = pd.read_excel('file.xlsx') # Default: first sheet +all_sheets = pd.read_excel('file.xlsx', sheet_name=None) # All sheets as dict + +# Analyze +df.head() # Preview data +df.info() # Column info +df.describe() # Statistics + +# Write Excel +df.to_excel('output.xlsx', index=False) +``` + +## Excel File Workflows + +## CRITICAL: Use Formulas, Not Hardcoded Values + +**Always use Excel formulas instead of calculating values in Python and hardcoding them.** This ensures the spreadsheet remains dynamic and updateable. + +### ❌ WRONG - Hardcoding Calculated Values +```python +# Bad: Calculating in Python and hardcoding result +total = df['Sales'].sum() +sheet['B10'] = total # Hardcodes 5000 + +# Bad: Computing growth rate in Python +growth = (df.iloc[-1]['Revenue'] - df.iloc[0]['Revenue']) / df.iloc[0]['Revenue'] +sheet['C5'] = growth # Hardcodes 0.15 + +# Bad: Python calculation for average +avg = sum(values) / len(values) +sheet['D20'] = avg # Hardcodes 42.5 +``` + +### ✅ CORRECT - Using Excel Formulas +```python +# Good: Let Excel calculate the sum +sheet['B10'] = '=SUM(B2:B9)' + +# Good: Growth rate as Excel formula +sheet['C5'] = '=(C4-C2)/C2' + +# Good: Average using Excel function +sheet['D20'] = '=AVERAGE(D2:D19)' +``` + +This applies to ALL calculations - totals, percentages, ratios, differences, etc. The spreadsheet should be able to recalculate when source data changes. + +## Common Workflow +1. **Choose tool**: pandas for data, openpyxl for formulas/formatting +2. **Create/Load**: Create new workbook or load existing file +3. **Modify**: Add/edit data, formulas, and formatting +4. **Save**: Write to file +5. **Recalculate formulas (MANDATORY IF USING FORMULAS)**: Use the recalc.py script + ```bash + python recalc.py output.xlsx + ``` +6. **Verify and fix any errors**: + - The script returns JSON with error details + - If `status` is `errors_found`, check `error_summary` for specific error types and locations + - Fix the identified errors and recalculate again + - Common errors to fix: + - `#REF!`: Invalid cell references + - `#DIV/0!`: Division by zero + - `#VALUE!`: Wrong data type in formula + - `#NAME?`: Unrecognized formula name + +### Creating new Excel files + +```python +# Using openpyxl for formulas and formatting +from openpyxl import Workbook +from openpyxl.styles import Font, PatternFill, Alignment + +wb = Workbook() +sheet = wb.active + +# Add data +sheet['A1'] = 'Hello' +sheet['B1'] = 'World' +sheet.append(['Row', 'of', 'data']) + +# Add formula +sheet['B2'] = '=SUM(A1:A10)' + +# Formatting +sheet['A1'].font = Font(bold=True, color='FF0000') +sheet['A1'].fill = PatternFill('solid', start_color='FFFF00') +sheet['A1'].alignment = Alignment(horizontal='center') + +# Column width +sheet.column_dimensions['A'].width = 20 + +wb.save('output.xlsx') +``` + +### Editing existing Excel files + +```python +# Using openpyxl to preserve formulas and formatting +from openpyxl import load_workbook + +# Load existing file +wb = load_workbook('existing.xlsx') +sheet = wb.active # or wb['SheetName'] for specific sheet + +# Working with multiple sheets +for sheet_name in wb.sheetnames: + sheet = wb[sheet_name] + print(f"Sheet: {sheet_name}") + +# Modify cells +sheet['A1'] = 'New Value' +sheet.insert_rows(2) # Insert row at position 2 +sheet.delete_cols(3) # Delete column 3 + +# Add new sheet +new_sheet = wb.create_sheet('NewSheet') +new_sheet['A1'] = 'Data' + +wb.save('modified.xlsx') +``` + +## Recalculating formulas + +Excel files created or modified by openpyxl contain formulas as strings but not calculated values. Use the provided `recalc.py` script to recalculate formulas: + +```bash +python recalc.py <excel_file> [timeout_seconds] +``` + +Example: +```bash +python recalc.py output.xlsx 30 +``` + +The script: +- Automatically sets up LibreOffice macro on first run +- Recalculates all formulas in all sheets +- Scans ALL cells for Excel errors (#REF!, #DIV/0!, etc.) +- Returns JSON with detailed error locations and counts +- Works on both Linux and macOS + +## Formula Verification Checklist + +Quick checks to ensure formulas work correctly: + +### Essential Verification +- [ ] **Test 2-3 sample references**: Verify they pull correct values before building full model +- [ ] **Column mapping**: Confirm Excel columns match (e.g., column 64 = BL, not BK) +- [ ] **Row offset**: Remember Excel rows are 1-indexed (DataFrame row 5 = Excel row 6) + +### Common Pitfalls +- [ ] **NaN handling**: Check for null values with `pd.notna()` +- [ ] **Far-right columns**: FY data often in columns 50+ +- [ ] **Multiple matches**: Search all occurrences, not just first +- [ ] **Division by zero**: Check denominators before using `/` in formulas (#DIV/0!) +- [ ] **Wrong references**: Verify all cell references point to intended cells (#REF!) +- [ ] **Cross-sheet references**: Use correct format (Sheet1!A1) for linking sheets + +### Formula Testing Strategy +- [ ] **Start small**: Test formulas on 2-3 cells before applying broadly +- [ ] **Verify dependencies**: Check all cells referenced in formulas exist +- [ ] **Test edge cases**: Include zero, negative, and very large values + +### Interpreting recalc.py Output +The script returns JSON with error details: +```json +{ + "status": "success", // or "errors_found" + "total_errors": 0, // Total error count + "total_formulas": 42, // Number of formulas in file + "error_summary": { // Only present if errors found + "#REF!": { + "count": 2, + "locations": ["Sheet1!B5", "Sheet1!C10"] + } + } +} +``` + +## Best Practices + +### Library Selection +- **pandas**: Best for data analysis, bulk operations, and simple data export +- **openpyxl**: Best for complex formatting, formulas, and Excel-specific features + +### Working with openpyxl +- Cell indices are 1-based (row=1, column=1 refers to cell A1) +- Use `data_only=True` to read calculated values: `load_workbook('file.xlsx', data_only=True)` +- **Warning**: If opened with `data_only=True` and saved, formulas are replaced with values and permanently lost +- For large files: Use `read_only=True` for reading or `write_only=True` for writing +- Formulas are preserved but not evaluated - use recalc.py to update values + +### Working with pandas +- Specify data types to avoid inference issues: `pd.read_excel('file.xlsx', dtype={'id': str})` +- For large files, read specific columns: `pd.read_excel('file.xlsx', usecols=['A', 'C', 'E'])` +- Handle dates properly: `pd.read_excel('file.xlsx', parse_dates=['date_column'])` + +## Code Style Guidelines +**IMPORTANT**: When generating Python code for Excel operations: +- Write minimal, concise Python code without unnecessary comments +- Avoid verbose variable names and redundant operations +- Avoid unnecessary print statements + +**For Excel files themselves**: +- Add comments to cells with complex formulas or important assumptions +- Document data sources for hardcoded values +- Include notes for key calculations and model sections \ No newline at end of file diff --git a/.claude/skills/xlsx/recalc.py b/.claude/skills/xlsx/recalc.py new file mode 100644 index 0000000..102e157 --- /dev/null +++ b/.claude/skills/xlsx/recalc.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +""" +Excel Formula Recalculation Script +Recalculates all formulas in an Excel file using LibreOffice +""" + +import json +import sys +import subprocess +import os +import platform +from pathlib import Path +from openpyxl import load_workbook + + +def setup_libreoffice_macro(): + """Setup LibreOffice macro for recalculation if not already configured""" + if platform.system() == 'Darwin': + macro_dir = os.path.expanduser('~/Library/Application Support/LibreOffice/4/user/basic/Standard') + else: + macro_dir = os.path.expanduser('~/.config/libreoffice/4/user/basic/Standard') + + macro_file = os.path.join(macro_dir, 'Module1.xba') + + if os.path.exists(macro_file): + with open(macro_file, 'r') as f: + if 'RecalculateAndSave' in f.read(): + return True + + if not os.path.exists(macro_dir): + subprocess.run(['soffice', '--headless', '--terminate_after_init'], + capture_output=True, timeout=10) + os.makedirs(macro_dir, exist_ok=True) + + macro_content = '''<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE script:module PUBLIC "-//OpenOffice.org//DTD OfficeDocument 1.0//EN" "module.dtd"> +<script:module xmlns:script="http://openoffice.org/2000/script" script:name="Module1" script:language="StarBasic"> + Sub RecalculateAndSave() + ThisComponent.calculateAll() + ThisComponent.store() + ThisComponent.close(True) + End Sub +</script:module>''' + + try: + with open(macro_file, 'w') as f: + f.write(macro_content) + return True + except Exception: + return False + + +def recalc(filename, timeout=30): + """ + Recalculate formulas in Excel file and report any errors + + Args: + filename: Path to Excel file + timeout: Maximum time to wait for recalculation (seconds) + + Returns: + dict with error locations and counts + """ + if not Path(filename).exists(): + return {'error': f'File {filename} does not exist'} + + abs_path = str(Path(filename).absolute()) + + if not setup_libreoffice_macro(): + return {'error': 'Failed to setup LibreOffice macro'} + + cmd = [ + 'soffice', '--headless', '--norestore', + 'vnd.sun.star.script:Standard.Module1.RecalculateAndSave?language=Basic&location=application', + abs_path + ] + + # Handle timeout command differences between Linux and macOS + if platform.system() != 'Windows': + timeout_cmd = 'timeout' if platform.system() == 'Linux' else None + if platform.system() == 'Darwin': + # Check if gtimeout is available on macOS + try: + subprocess.run(['gtimeout', '--version'], capture_output=True, timeout=1, check=False) + timeout_cmd = 'gtimeout' + except (FileNotFoundError, subprocess.TimeoutExpired): + pass + + if timeout_cmd: + cmd = [timeout_cmd, str(timeout)] + cmd + + result = subprocess.run(cmd, capture_output=True, text=True) + + if result.returncode != 0 and result.returncode != 124: # 124 is timeout exit code + error_msg = result.stderr or 'Unknown error during recalculation' + if 'Module1' in error_msg or 'RecalculateAndSave' not in error_msg: + return {'error': 'LibreOffice macro not configured properly'} + else: + return {'error': error_msg} + + # Check for Excel errors in the recalculated file - scan ALL cells + try: + wb = load_workbook(filename, data_only=True) + + excel_errors = ['#VALUE!', '#DIV/0!', '#REF!', '#NAME?', '#NULL!', '#NUM!', '#N/A'] + error_details = {err: [] for err in excel_errors} + total_errors = 0 + + for sheet_name in wb.sheetnames: + ws = wb[sheet_name] + # Check ALL rows and columns - no limits + for row in ws.iter_rows(): + for cell in row: + if cell.value is not None and isinstance(cell.value, str): + for err in excel_errors: + if err in cell.value: + location = f"{sheet_name}!{cell.coordinate}" + error_details[err].append(location) + total_errors += 1 + break + + wb.close() + + # Build result summary + result = { + 'status': 'success' if total_errors == 0 else 'errors_found', + 'total_errors': total_errors, + 'error_summary': {} + } + + # Add non-empty error categories + for err_type, locations in error_details.items(): + if locations: + result['error_summary'][err_type] = { + 'count': len(locations), + 'locations': locations[:20] # Show up to 20 locations + } + + # Add formula count for context - also check ALL cells + wb_formulas = load_workbook(filename, data_only=False) + formula_count = 0 + for sheet_name in wb_formulas.sheetnames: + ws = wb_formulas[sheet_name] + for row in ws.iter_rows(): + for cell in row: + if cell.value and isinstance(cell.value, str) and cell.value.startswith('='): + formula_count += 1 + wb_formulas.close() + + result['total_formulas'] = formula_count + + return result + + except Exception as e: + return {'error': str(e)} + + +def main(): + if len(sys.argv) < 2: + print("Usage: python recalc.py <excel_file> [timeout_seconds]") + print("\nRecalculates all formulas in an Excel file using LibreOffice") + print("\nReturns JSON with error details:") + print(" - status: 'success' or 'errors_found'") + print(" - total_errors: Total number of Excel errors found") + print(" - total_formulas: Number of formulas in the file") + print(" - error_summary: Breakdown by error type with locations") + print(" - #VALUE!, #DIV/0!, #REF!, #NAME?, #NULL!, #NUM!, #N/A") + sys.exit(1) + + filename = sys.argv[1] + timeout = int(sys.argv[2]) if len(sys.argv) > 2 else 30 + + result = recalc(filename, timeout) + print(json.dumps(result, indent=2)) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..c818851 --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +.DS_Store +__pycache__/ +.idea/ +.vscode/ +.env +.claude/ +.venv/ + diff --git a/COMPLETE_GUIDE.md b/COMPLETE_GUIDE.md new file mode 100644 index 0000000..aff7685 --- /dev/null +++ b/COMPLETE_GUIDE.md @@ -0,0 +1,295 @@ +# Creative Studio - 完整平台使用指南 + +## 🎯 功能概览 + +| 功能模块 | 状态 | 说明 | +|---------|------|------| +| **Skills 管理中心** | ✅ | 查看、测试、配置所有 Skills | +| **Agents 管理中心** | ✅ | 查看 Agent 工作流、配置绑定 | +| **项目创建** | ✅ | 上传剧本、AI 辅助生成、Skill 选择 | +| **执行监控** | ✅ | 实时进度、各阶段状态、日志输出 | +| **项目详情** | ✅ | 查看设定、管理剧集、查看内容 | + +--- + +## 🚀 快速开始 + +### 1. 启动服务 + +```powershell +# 后端 +cd backend +.\start.bat + +# 前端 +cd frontend +npm run dev +``` + +### 2. 访问平台 + +- **前端**: http://localhost:5173 +- **API 文档**: http://localhost:8000/docs + +--- + +## 📖 完整使用流程 + +### 方案 A: 上传剧本改编(推荐) + +适合:已有现成的剧本,想要快速改编 + +#### 步骤 1: 创建项目 +1. 访问 http://localhost:5173 +2. 点击 "创建新项目" +3. 选择创作方式:**上传剧本** +4. 上传你的剧本文件(.txt, .md, .docx) +5. 系统会自动解析剧本 + +#### 步骤 2: AI 辅助完善(可选) +1. 点击 "AI 辅助生成" 按钮 +2. AI 会根据你的剧本内容: + - 提取主要人物 + - 生成人物小传 + - 优化大纲结构 + +#### 步骤 3: 配置 Skills +1. 选择对话创作 Skills(如:古风对话大师) +2. 选择审核 Skills(如:一致性审核器) +3. 查看每个 Skill 的行为指导 + +#### 步骤 4: 执行创作 +1. 进入项目详情页 +2. 选择要创作的集数 +3. 点击 "执行单集" 或 "批量执行" +4. 实时查看 Agent 执行进度 + +--- + +### 方案 B: 从头创作 + AI 辅助 + +适合:只有一个初步想法或框架 + +#### 步骤 1: 创建项目 +1. 点击 "创建新项目" +2. 选择创作方式:**从头创作** +3. 输入项目名称和集数 + +#### 步骤 2: AI 辅助生成 +1. 在 "AI 辅助完善" 页面: + - 输入你的初步想法,例如: + > "一个架空朝代,边关将军与丞相之女的爱情故事,充满政治阴谋" + + - 点击 "AI 生成人物" → AI 自动生成人物设定 + - 点击 "AI 生成大纲" → AI 自动生成完整大纲 + +2. 你可以修改 AI 生成的内容 + +#### 步骤 3-4: 同方案 A + +--- + +## 🎛️ 功能详解 + +### Skills 管理中心 + +访问路径:`http://localhost:5173/skills` + +#### 功能 +- **查看所有 Skills**: 内置 + 用户创建的 +- **测试 Skill**: 输入测试内容,查看 AI 输出 +- **查看行为指导**: 了解 Skill 如何指导 AI +- **分类筛选**: 按编剧、审核、通用等分类 + +#### 内置 Skills + +| Skill ID | 名称 | 用途 | +|---------|------|------| +| `dialogue_writer_ancient` | 古风对话大师 | 创作古风风格对话 | +| `consistency_checker` | 一致性审核器 | 检查内容一致性 | + +--- + +### Agents 管理中心 + +访问路径:`http://localhost:5173/agents` + +#### 功能 +- **查看 Agent 工作流**: 可视化 5 阶段流程 +- **查看 Skill 绑定**: 每个阶段使用什么 Skill +- **配置 Skill**: 可配置阶段更换 Skill + +#### 剧集创作 Agent 工作流 + +``` +1. 加载全局上下文 (固定) + ↓ +2. 结构分析 → 使用 Skill: script-structure-analyzer + ↓ +3. 大纲生成 → 使用 Skill: outline-generator + ↓ +4. 对话创作 → 使用 Skill: dialogue-writer (可配置) ⭐ + ↓ +5. 一致性审核 → 使用 Skill: consistency-checker + ↓ +6. 更新记忆系统 (固定) +``` + +--- + +### 执行监控页面 + +访问路径:`http://localhost:5173/projects/{id}/execute` + +#### 功能 +- **实时进度**: 显示当前执行的阶段 +- **执行日志**: 显示详细的执行日志 +- **批次执行**: 一键执行 EP1-EP3 +- **查看结果**: 点击已完成的剧集查看内容 + +#### 执行日志示例 + +``` +[阶段 1] 加载全局上下文 开始... +[阶段 1] 加载全局上下文 完成 ✓ +[阶段 2] 结构分析 开始... +[阶段 2] 结构分析 完成 ✓ +[阶段 3] 大纲生成 开始... +[阶段 3] 大纲生成 完成 ✓ +[阶段 4] 对话创作 开始... +使用 Skill: dialogue_writer_ancient +[阶段 4] 对话创作 完成 ✓ +[阶段 5] 一致性审核 开始... +[阶段 5] 一致性审核 完成 ✓ + +✅ EP1 创作完成! +``` + +--- + +## 🔧 API 端点说明 + +### AI 辅助生成 API + +| 端点 | 功能 | 使用场景 | +|-----|------|---------| +| `POST /api/v1/ai-assistant/generate/characters` | 生成人物设定 | 用户只有初步想法 | +| `POST /api/v1/ai-assistant/generate/outline` | 生成大纲 | 用户提供框架 | +| `POST /api/v1/ai-assistant/parse/script` | 解析剧本 | 上传剧本后提取 | +| `GET /api/v1/ai-assistant/available-skills` | 获取可用 Skills | 项目创建时选择 | + +### 使用示例:生成人物设定 + +```bash +curl -X POST http://localhost:8000/api/v1/ai-assistant/generate/characters \ + -H "Content-Type: application/json" \ + -d '{ + "idea": "架空朝代,边关将军李云飞与丞相之女苏婉儿的爱情故事" + }' +``` + +--- + +## 💡 使用技巧 + +### 1. 充分利用 AI 辅助 + +- **人物设定**: 输入简短描述,让 AI 扩展成完整小传 +- **大纲生成**: 输入故事框架,让 AI 生成详细大纲 +- **修改完善**: AI 生成后可以手动调整 + +### 2. 选择合适的 Skills + +- **古风剧**: 选择 `dialogue_writer_ancient` +- **现代剧**: 可以创建新的对话风格 Skill +- **审核**: 始终启用 `consistency_checker` + +### 3. 执行策略 + +- **小规模**: 1-3 集,手动查看每个 +- **中规模**: 4-10 集,分批次执行 +- **大规模**: 11+ 集,全自动模式(开发中) + +--- + +## 🐛 故障排查 + +### 问题 1: Skills 未加载 + +**解决**: 重启后端服务,确保 Skills 在正确目录 + +### 问题 2: AI 生成失败 + +**解决**: +1. 检查 `.env` 中的 `ZHIPUAI_API_KEY` +2. 确认网络连接正常 +3. 查看 API Key 是否有额度 + +### 问题 3: 执行卡住 + +**解决**: +1. 检查后端日志 +2. 确认 GLM API 调用正常 +3. 刷新页面重新执行 + +--- + +## 📊 完整创作流程图 + +``` +┌─────────────────────────────────────────────┐ +│ 1. 选择创作方式 │ +├─────────────────────────────────────────────┤ +│ [上传剧本改编] 或 [从头创作+AI辅助] │ +└─────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────┐ +│ 2. AI 辅助完善 (可选) │ +├─────────────────────────────────────────────┤ +│ • AI 生成人物设定 │ +│ • AI 生成完整大纲 │ +│ • 手动调整完善 │ +└─────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────┐ +│ 3. 配置 Skills │ +├─────────────────────────────────────────────┤ +│ • 选择对话创作 Skill (古风/现代等) │ +│ • 选择审核 Skill │ +│ • 查看 Skill 的行为指导 │ +└─────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────┐ +│ 4. 执行创作 │ +├─────────────────────────────────────────────┤ +│ • 选择要创作的集数 │ +│ • 点击执行 │ +│ • 实时查看进度和日志 │ +└─────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────┐ +│ 5. 查看结果 │ +├─────────────────────────────────────────────┤ +│ • 查看剧本内容 │ +│ • 查看质量分数 │ +│ • 查看审核问题 │ +│ • 导出或继续下一集 │ +└─────────────────────────────────────────────┘ +``` + +--- + +## 🎯 下一步功能 + +计划中的功能: + +- [ ] **数据持久化** - MongoDB 集成 +- [ ] **实时进度推送** - WebSocket +- [ ] **更多内置 Skills** - 覆盖更多风格类型 +- [ ] **自定义 Skill 创建器** - 可视化创建 Skill +- [ ] **项目模板** - 预设项目模板 +- [ ] **导出功能** - 导出为 Word/PDF + +--- + +**Creative Studio** - 让 AI 创作更简单、更可控 diff --git a/DEVELOPMENT_GUIDE.md b/DEVELOPMENT_GUIDE.md new file mode 100644 index 0000000..b600465 --- /dev/null +++ b/DEVELOPMENT_GUIDE.md @@ -0,0 +1,753 @@ +# Creative Studio - 开发指南 + +本文档为开发者提供详细的开发指南,包括架构设计、API 文档、开发环境配置、测试和部署等内容。 + +## 目录 + +- [架构概览](#架构概览) +- [技术栈](#技术栈) +- [开发环境配置](#开发环境配置) +- [后端开发](#后端开发) +- [前端开发](#前端开发) +- [API 文档](#api-文档) +- [测试指南](#测试指南) +- [部署指南](#部署指南) +- [性能优化](#性能优化) +- [安全考虑](#安全考虑) + +--- + +## 架构概览 + +### 系统架构 + +``` +┌─────────────────────────────────────────────────────────────┐ +│ 前端 (React) │ +│ ┌──────────┐ ┌──────────┐ ┌──────────┐ ┌──────────┐ │ +│ │ Skill管理 │ │ 项目管理 │ │ 记忆系统 │ │ 审核系统 │ │ +│ └──────────┘ └──────────┘ └──────────┘ └──────────┘ │ +└─────────────────────────────────────────────────────────────┘ + ↓ HTTP/WebSocket +┌─────────────────────────────────────────────────────────────┐ +│ 后端 (FastAPI) │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ API 层 │ │ +│ │ /api/v1/skills /api/v1/projects /api/v1/memory │ │ +│ └──────────────────────────────────────────────────────┘ │ +│ ↓ │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ 核心层 │ │ +│ │ ┌────────────┐ ┌────────────┐ ┌────────────┐ │ │ +│ │ │ GLM Client │ │Skill Mgr │ │ Agent引擎 │ │ │ +│ │ └────────────┘ └────────────┘ └────────────┘ │ │ +│ │ ┌────────────┐ ┌────────────┐ │ │ +│ │ │Memory Mgr │ │Review Mgr │ │ │ +│ │ └────────────┘ └────────────┘ │ │ +│ └──────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ 外部服务 │ +│ ┌────────────┐ ┌────────────┐ ┌────────────┐ │ +│ │ GLM-4.7 │ │ MongoDB │ │ Redis │ │ +│ └────────────┘ └────────────┘ └────────────┘ │ +└─────────────────────────────────────────────────────────────┘ +``` + +### 核心设计原则 + +1. **"Agent 固定,Skill 可配"** - 平台维护 Agent 框架,用户配置 Skills +2. **"Skill 指导行为"** - Skill 行为指导融入提示词,指导 LLM 行为 +3. **"全局上下文 + 记忆系统"** - 确保多集内容一致性 +4. **"全自动 + 人工审核"** - 自动创作与人工审核相结合 + +--- + +## 技术栈 + +### 后端 + +- **Python 3.11+** - 编程语言 +- **FastAPI** - 高性能异步 Web 框架 +- **MongoDB** - 文档数据库 +- **Redis** - 缓存和消息队列 +- **zai-sdk** - 智谱 AI SDK +- **Pydantic** - 数据验证 +- **Uvicorn** - ASGI 服务器 + +### 前端 + +- **React 18** - UI 框架 +- **TypeScript** - 类型安全 +- **Vite** - 构建工具 +- **Ant Design 5** - UI 组件库 +- **Zustand** - 状态管理 +- **React Router** - 客户端路由 +- **Axios** - HTTP 客户端 +- **Recharts** - 数据可视化 + +--- + +## 开发环境配置 + +### 后端环境 + +#### 1. 创建虚拟环境 + +```powershell +# PowerShell +python -m venv .venv +.venv\Scripts\Activate.ps1 + +# CMD +python -m venv .venv +.venv\Scripts\activate.bat + +# Git Bash / WSL +python -m venv .venv +source .venv/Scripts/activate +``` + +#### 2. 安装依赖 + +```bash +cd backend +pip install -e . +``` + +或使用 requirements.txt: + +```bash +pip install -r requirements.txt +``` + +#### 3. 配置环境变量 + +```bash +cp .env.example .env +``` + +编辑 `.env` 文件: + +```env +# GLM API Key (必填) +ZAI_API_KEY=your_api_key_here + +# MongoDB (可选) +MONGODB_URL=mongodb://localhost:27017 +MONGODB_DATABASE=creative_studio + +# Redis (可选) +REDIS_URL=redis://localhost:6379/0 + +# 模型配置 +ZAI_MODEL=glm-4-flash + +# 日志配置 +LOG_LEVEL=INFO +DEBUG=True +``` + +#### 4. 启动开发服务器 + +```bash +uvicorn app.main:app --reload --host 0.0.0.0 --port 8000 +``` + +访问 `http://localhost:8000/docs` 查看 API 文档。 + +### 前端环境 + +#### 1. 安装依赖 + +```bash +cd frontend +npm install +``` + +#### 2. 配置环境变量 + +创建 `.env.local` 文件: + +```env +VITE_API_BASE_URL=http://localhost:8000 +``` + +#### 3. 启动开发服务器 + +```bash +npm run dev +``` + +访问 `http://localhost:5173` 查看前端应用。 + +--- + +## 后端开发 + +### 项目结构 + +``` +backend/ +├── app/ +│ ├── __init__.py +│ ├── main.py # 应用入口 +│ ├── config.py # 配置管理 +│ ├── api/ +│ │ ├── __init__.py +│ │ └── v1/ +│ │ ├── __init__.py +│ │ ├── skills.py # Skill API +│ │ ├── projects.py # Project API +│ │ ├── memory.py # Memory API +│ │ └── review.py # Review API +│ ├── core/ +│ │ ├── __init__.py +│ │ ├── llm/ +│ │ │ ├── __init__.py +│ │ │ └── glm_client.py # GLM 客户端 +│ │ ├── skills/ +│ │ │ ├── __init__.py +│ │ │ └── skill_manager.py # Skill 管理器 +│ │ ├── agents/ +│ │ │ ├── __init__.py +│ │ │ └── series_creation_agent.py # 剧集创作 Agent +│ │ ├── memory/ +│ │ │ ├── __init__.py +│ │ │ └── memory_manager.py # 记忆管理器 +│ │ └── review/ +│ │ ├── __init__.py +│ │ └── review_manager.py # 审核管理器 +│ ├── models/ +│ │ ├── __init__.py +│ │ ├── skill.py # Skill 数据模型 +│ │ ├── project.py # Project 数据模型 +│ │ ├── memory.py # Memory 数据模型 +│ │ └── review.py # Review 数据模型 +│ ├── schemas/ +│ │ ├── __init__.py +│ │ └── ... +│ ├── db/ +│ │ ├── __init__.py +│ │ └── ... +│ └── utils/ +│ ├── __init__.py +│ └── logger.py # 日志配置 +│ +├── skills_storage/ # Skill 文件存储 +│ ├── builtin_skills/ # 内置 Skills +│ └── user_skills/ # 用户 Skills +│ +├── tests/ # 测试 +│ ├── __init__.py +│ ├── conftest.py # Pytest 配置 +│ ├── test_skills.py # Skill 测试 +│ ├── test_projects.py # Project 测试 +│ └── test_api.py # API 测试 +│ +├── requirements.txt # Python 依赖 +├── setup.py # 安装配置 +├── .env.example # 环境变量模板 +└── README.md # 后端文档 +``` + +### 核心模块说明 + +#### GLM Client (`app/core/llm/glm_client.py`) + +封装智谱 AI GLM-4.7 API,提供核心 LLM 调用能力。 + +**核心方法**: + +```python +async def chat_with_skill( + skill_behavior: str, # Skill 的行为指导 + user_input: str, # 用户输入 + context: Dict, # 上下文信息 + temperature: float = 0.7 +) -> str: + """使用 Skill 行为指导进行对话""" +``` + +#### Skill Manager (`app/core/skills/skill_manager.py`) + +管理 Skill 的加载、解析、缓存和 CRUD 操作。 + +**核心方法**: + +```python +async def load_skill(skill_id: str) -> Skill: + """加载 Skill""" + +async def list_skills(skill_type: str = None, category: str = None) -> List[Skill]: + """列出 Skills""" + +async def create_user_skill(skill_data: SkillCreate) -> Skill: + """创建用户 Skill""" +``` + +#### Memory Manager (`app/core/memory/memory_manager.py`) + +管理项目的记忆系统,包括事件时间线、伏笔追踪等。 + +**核心方法**: + +```python +async def extract_events_from_episode( + episode_content: str, + episode_number: int +) -> List[TimelineEvent]: + """从剧集内容中提取事件""" + +async def detect_foreshadowing( + episode_content: str, + episode_number: int +) -> List[ForeshadowingEvent]: + """检测伏笔""" + +async def update_character_states( + episode_content: str, + episode_number: int, + characters: List[str] +) -> Dict[str, CharacterStateChange]: + """更新人物状态""" +``` + +#### Review Manager (`app/core/review/review_manager.py`) + +管理剧集的审核流程,提供多维度质量检查。 + +**核心方法**: + +```python +async def review_episode( + episode: Episode, + project: SeriesProject, + config: ReviewConfig +) -> ReviewResult: + """执行完整审核流程""" +``` + +### 添加新的 API 端点 + +1. 在 `app/api/v1/` 下创建路由文件 +2. 定义路由和处理器 +3. 在 `app/main.py` 中注册路由 + +示例: + +```python +# app/api/v1/my_feature.py +from fastapi import APIRouter, Depends +from app.schemas.my_feature import MyRequest, MyResponse + +router = APIRouter(prefix="/my-feature", tags=["My Feature"]) + +@router.post("/", response_model=MyResponse) +async def create_my_feature( + request: MyRequest +): + """创建我的功能""" + # 实现逻辑 + pass +``` + +### 添加新的 Skill + +1. 在 `skills_storage/user_skills/` 或 `skills_storage/builtin_skills/` 下创建目录 +2. 创建 `SKILL.md` 文件 +3. 按照模板编写内容 + +**Skill 模板**: + +```markdown +# Skill: Skill 名称 + +## 基础信息 +- **ID**: skill-id +- **版本**: 1.0.0 +- **作者**: 你的名字 +- **分类**: 分类名称 + +## 行为指导 (核心) + +这里是 Skill 的核心内容,描述这个 Skill 如何指导 Agent 的行为。 + +### 具体规则 +1. 规则 1 +2. 规则 2 +3. ... + +### 输出格式 +描述期望的输出格式 + +### 注意事项 +其他需要注意的事项 +``` + +--- + +## 前端开发 + +### 项目结构 + +``` +frontend/ +├── src/ +│ ├── main.tsx # 应用入口 +│ ├── App.tsx # 根组件 +│ ├── index.css # 全局样式 +│ │ +│ ├── pages/ # 页面组件 +│ │ ├── HomePage.tsx # 首页 +│ │ ├── SkillManagement.tsx # Skill 管理 +│ │ ├── AgentManagement.tsx # Agent 管理 +│ │ ├── ProjectList.tsx # 项目列表 +│ │ ├── ProjectCreate.tsx # 创建项目 +│ │ ├── ProjectDetail.tsx # 项目详情 +│ │ ├── ExecutionMonitor.tsx # 执行监控 +│ │ ├── MemorySystem.tsx # 记忆系统 +│ │ ├── ReviewConfig.tsx # 审核配置 +│ │ └── ReviewResults.tsx # 审核结果 +│ │ +│ ├── components/ # 通用组件 +│ │ ├── Layout/ +│ │ │ ├── Header.tsx +│ │ │ ├── Footer.tsx +│ │ │ └── Sidebar.tsx +│ │ ├── SkillCard.tsx +│ │ ├── EpisodeCard.tsx +│ │ └── ... +│ │ +│ ├── stores/ # Zustand 状态管理 +│ │ ├── skillStore.ts +│ │ ├── projectStore.ts +│ │ ├── memoryStore.ts +│ │ ├── reviewStore.ts +│ │ └── index.ts +│ │ +│ ├── services/ # API 服务 +│ │ ├── api.ts # Axios 配置 +│ │ ├── skillService.ts +│ │ ├── projectService.ts +│ │ ├── memoryService.ts +│ │ ├── reviewService.ts +│ │ └── index.ts +│ │ +│ ├── hooks/ # 自定义 Hooks +│ │ ├── useSkills.ts +│ │ ├── useProjects.ts +│ │ └── ... +│ │ +│ ├── types/ # TypeScript 类型 +│ │ ├── skill.ts +│ │ ├── project.ts +│ │ ├── memory.ts +│ │ ├── review.ts +│ │ └── index.ts +│ │ +│ └── utils/ # 工具函数 +│ └── ... +│ +├── index.html # HTML 模板 +├── package.json # NPM 依赖 +├── vite.config.ts # Vite 配置 +├── tsconfig.json # TypeScript 配置 +├── tailwind.config.js # Tailwind CSS 配置 +└── README.md # 前端文档 +``` + +### 状态管理 (Zustand) + +使用 Zustand 进行轻量级状态管理。 + +示例: + +```typescript +// stores/skillStore.ts +import { create } from 'zustand'; +import { Skill } from '@/types'; + +interface SkillStore { + skills: Skill[]; + loading: boolean; + error: string | null; + + fetchSkills: () => Promise<void>; + createSkill: (skill: Partial<Skill>) => Promise<void>; + updateSkill: (id: string, skill: Partial<Skill>) => Promise<void>; + deleteSkill: (id: string) => Promise<void>; +} + +export const useSkillStore = create<SkillStore>((set, get) => ({ + skills: [], + loading: false, + error: null, + + fetchSkills: async () => { + set({ loading: true, error: null }); + try { + const skills = await skillService.listSkills(); + set({ skills, loading: false }); + } catch (error) { + set({ error: error.message, loading: false }); + } + }, + + // ... 其他方法 +})); +``` + +### API 服务 + +使用 Axios 进行 HTTP 调用。 + +示例: + +```typescript +// services/skillService.ts +import api from './api'; +import { Skill, SkillCreate, SkillUpdate } from '@/types'; + +export const skillService = { + async listSkills(params?: { skill_type?: string; category?: string }) { + const response = await api.get('/skills', { params }); + return response.data; + }, + + async getSkill(id: string) { + const response = await api.get(`/skills/${id}`); + return response.data; + }, + + async createSkill(data: SkillCreate) { + const response = await api.post('/skills', data); + return response.data; + }, + + // ... 其他方法 +}; +``` + +### 添加新页面 + +1. 在 `src/pages/` 下创建页面组件 +2. 在 `src/App.tsx` 中添加路由 +3. 在导航菜单中添加链接 + +示例: + +```typescript +// src/pages/MyPage.tsx +import React from 'react'; + +export const MyPage: React.FC = () => { + return ( + <div> + <h1>My Page</h1> + {/* 页面内容 */} + </div> + ); +}; + +// src/App.tsx +import { MyPage } from '@/pages/MyPage'; + +// 添加路由 +<Route path="/my-page" element={<MyPage />} /> +``` + +--- + +## API 文档 + +完整的 API 文档请参考 [docs/API.md](docs/API.md)。 + +### 主要 API 端点 + +#### Skill 管理 + +| 端点 | 方法 | 描述 | +|-----|------|------| +| `/api/v1/skills` | GET | 列出所有 Skills | +| `/api/v1/skills/{id}` | GET | 获取 Skill 详情 | +| `/api/v1/skills` | POST | 创建新 Skill | +| `/api/v1/skills/{id}/test` | POST | 测试 Skill | +| `/api/v1/skills/{id}` | PUT | 更新 Skill | +| `/api/v1/skills/{id}` | DELETE | 删除 Skill | + +#### 项目管理 + +| 端点 | 方法 | 描述 | +|-----|------|------| +| `/api/v1/projects` | GET | 列出项目 | +| `/api/v1/projects` | POST | 创建项目 | +| `/api/v1/projects/{id}` | GET | 获取项目详情 | +| `/api/v1/projects/{id}` | PUT | 更新项目 | +| `/api/v1/projects/{id}` | DELETE | 删除项目 | +| `/api/v1/projects/{id}/execute` | POST | 执行剧集创作 | +| `/api/v1/projects/{id}/execute-batch` | POST | 批量执行 | + +#### 记忆系统 + +| 端点 | 方法 | 描述 | +|-----|------|------| +| `/api/v1/projects/{id}/memory` | GET | 获取项目记忆 | +| `/api/v1/projects/{id}/memory/timeline` | GET | 获取事件时间线 | +| `/api/v1/projects/{id}/memory/threads` | GET | 获取待收线问题 | +| `/api/v1/projects/{id}/memory/threads` | POST | 添加待收线问题 | +| `/api/v1/projects/{id}/memory/threads/{thread_id}` | PUT | 更新待收线问题 | + +#### 审核系统 + +| 端点 | 方法 | 描述 | +|-----|------|------| +| `/api/v1/projects/{id}/review-config` | GET | 获取审核配置 | +| `/api/v1/projects/{id}/review-config` | PUT | 更新审核配置 | +| `/api/v1/projects/{id}/episodes/{ep_num}/review` | POST | 执行审核 | + +--- + +## 测试指南 + +### 后端测试 + +使用 pytest 进行测试。 + +```bash +# 运行所有测试 +pytest + +# 运行特定测试文件 +pytest tests/test_skills.py + +# 查看覆盖率 +pytest --cov=app tests/ +``` + +### 前端测试 + +使用 Vitest 进行测试。 + +```bash +# 运行所有测试 +npm run test + +# 运行特定测试文件 +npm run test -- SkillCard.test.tsx + +# 查看覆盖率 +npm run test -- --coverage +``` + +--- + +## 部署指南 + +### 后端部署 + +#### Docker 部署 + +```dockerfile +# Dockerfile +FROM python:3.11-slim + +WORKDIR /app + +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +COPY . . + +CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"] +``` + +构建和运行: + +```bash +docker build -t creative-studio-backend . +docker run -p 8000:8000 --env-file .env creative-studio-backend +``` + +#### 传统部署 + +```bash +# 使用 Gunicorn +gunicorn app.main:app -w 4 -k uvicorn.workers.UvicornWorker --bind 0.0.0.0:8000 +``` + +### 前端部署 + +```bash +# 构建生产版本 +npm run build + +# 输出在 dist/ 目录 +``` + +使用 Nginx 服务静态文件: + +```nginx +server { + listen 80; + server_name your-domain.com; + + root /path/to/frontend/dist; + index index.html; + + location / { + try_files $uri $uri/ /index.html; + } + + location /api { + proxy_pass http://localhost:8000; + } +} +``` + +--- + +## 性能优化 + +### 后端优化 + +1. **Skill 缓存**: 使用内存缓存避免频繁文件读取 +2. **异步处理**: 使用 Celery 处理耗时任务 +3. **连接池**: 复用 MongoDB 和 Redis 连接 +4. **响应压缩**: 启用 Gzip 压缩 + +### 前端优化 + +1. **代码分割**: 使用 React.lazy 和 Suspense +2. **状态管理**: 使用 Zustand 避免不必要的重渲染 +3. **请求缓存**: 使用 SWR 或 React Query +4. **图片优化**: 使用 WebP 格式和懒加载 + +--- + +## 安全考虑 + +1. **API Key 保护**: 使用环境变量,不提交到 Git +2. **输入验证**: 使用 Pydantic 验证所有输入 +3. **CORS 配置**: 限制允许的源 +4. **错误处理**: 不暴露敏感信息 +5. **SQL 注入防护**: 使用参数化查询 +6. **XSS 防护**: 前端使用 React 的内置防护 + +--- + +## 扩展阅读 + +- [FastAPI 文档](https://fastapi.tiangolo.com/) +- [React 文档](https://react.dev/) +- [Ant Design 文档](https://ant.design/) +- [智谱 AI 文档](https://docs.bigmodel.cn/) +- [Zustand 文档](https://zustand-demo.pmnd.rs/) +- [Vite 文档](https://vitejs.dev/) + +--- + +## 许可证 + +MIT License diff --git a/IMPLEMENTATION_PLAN.md b/IMPLEMENTATION_PLAN.md new file mode 100644 index 0000000..a12c72d --- /dev/null +++ b/IMPLEMENTATION_PLAN.md @@ -0,0 +1,1504 @@ +# Creative Studio - Comprehensive Implementation Plan + +**Platform**: 灵感工坊 (Creative Studio) - AI 创作平台 +**Version**: 2.0 +**Date**: 2024-01-24 +**Status**: Active Development + +--- + +## Executive Summary + +This document provides a comprehensive implementation plan for the Creative Studio platform, based on the complete platform specification document. The platform follows a **Skill-centric architecture** where Agents provide fixed workflow frameworks and Skills provide configurable behavioral guidance. + +### Current Implementation Status + +| Component | Status | Completion | +|-----------|--------|------------| +| **Backend Framework** | ✅ Implemented | 90% | +| **Skills CRUD** | ✅ Implemented | 95% | +| **Agents Viewing** | ✅ Implemented | 80% | +| **Projects CRUD** | ⚠️ Partial | 60% | +| **Memory System** | ❌ Missing | 0% | +| **Review System** | ❌ Missing | 0% | +| **Execution Monitoring** | ⚠️ Basic | 40% | +| **Frontend Pages** | ⚠️ Partial | 50% | +| **Skill Marketplace** | ❌ Missing | 0% | + +--- + +## Architecture Overview + +### Core Design Principles + +1. **"Agent 固定,Skill 可配"** - Platform maintains Agent framework, users configure Skills +2. **"Skill 指导行为"** - Skill behavior guides are integrated into prompts to guide LLM behavior +3. **"全局上下文 + 记忆系统"** - Global context + memory system ensures multi-episode consistency +4. **"全自动 + 人工审核"** - Full automatic creation with human review + +### System Hierarchy + +``` +Series Project (剧集项目) + ├── Uses Agent (固定工作流模板) + │ └── Calls Skills (可配置能力单元) + │ + ├── Global Context (全局上下文) + │ ├── World Setting (世界观) + │ ├── Character Profiles (人物小传) + │ ├── Scene Settings (场景设定) + │ └── Overall Outline (整体大纲) + │ + ├── Memory System (记忆系统) + │ ├── Event Timeline (事件时间线) + │ ├── Pending Threads (待收线问题) + │ ├── Foreshadowing (伏笔追踪) + │ └── Character States (人物状态) + │ + └── Episodes (子项目) + ├── EP01, EP02, ... + └── Each episode: Structure → Outline → Content → Review → Memory Update +``` + +--- + +## Phase 1: Core Foundation (Current - Week 1-2) + +### 1.1 Backend Infrastructure ✅ (90% Complete) + +**Status**: Implemented +**Location**: `C:\Users\Wolfycz\huijing\creative_studio\backend\` + +#### Implemented Components: + +- **FastAPI Application** (`app/main.py`) + - CORS configuration + - Lifespan management + - Route registration + - Health checks + +- **Skill Management** (`app/core/skills/skill_manager.py`) + - Load and parse SKILL.md files + - Extract behavior guides + - Cache management + - Builtin vs User Skills separation + - CRUD operations for user Skills + +- **Data Models** (`app/models/`) + - `skill.py`: Skill, SkillTool, SkillConfig + - `project.py`: SeriesProject, Episode, GlobalContext, Memory + +- **LLM Integration** (`app/core/llm/glm_client.py`) + - GLM-4.7 integration + - `chat_with_skill()` method for Skill-guided generation + +- **Series Creation Agent** (`app/core/agents/series_creation_agent.py`) + - Fixed workflow implementation + - 5-stage execution: Structure → Outline → Dialogue → Review → Memory + - Context building + - Basic retry logic + +#### Remaining Tasks: + +- [ ] Enhance error handling and logging +- [ ] Add API rate limiting +- [ ] Implement request/response validation +- [ ] Add API versioning strategy +- [ ] Create comprehensive test suite + +**API Endpoints Implemented**: + +| Endpoint | Method | Status | +|----------|--------|--------| +| `/api/v1/skills/` | GET | ✅ | +| `/api/v1/skills/{id}` | GET | ✅ | +| `/api/v1/skills/` | POST | ✅ | +| `/api/v1/skills/{id}` | PUT | ✅ | +| `/api/v1/skills/{id}` | DELETE | ✅ | +| `/api/v1/skills/{id}/test` | POST | ✅ | +| `/api/v1/projects/` | GET | ✅ | +| `/api/v1/projects/` | POST | ✅ | +| `/api/v1/projects/{id}` | GET | ✅ | +| `/api/v1/projects/{id}/execute` | POST | ✅ | +| `/api/v1/projects/{id}/execute-batch` | POST | ✅ | + +--- + +### 1.2 Frontend Infrastructure ⚠️ (50% Complete) + +**Status**: Partially Implemented +**Location**: `C:\Users\Wolfycz\huijing\creative_studio\frontend\` + +#### Implemented Pages: + +1. **Skill Management** (`src/pages/SkillManagement.tsx`) ✅ + - Skills listing with filtering + - Builtin vs User Skills distinction + - Create, Edit, Delete operations + - Skill testing modal + - Detail drawer with behavior guide view + - Search and category filtering + - Grid/List view toggle + +2. **Agent Management** (`src/pages/AgentManagement.tsx`) ✅ + - Read-only Agent viewing + - Workflow visualization + - Skill bindings display + +3. **Project List** (`src/pages/ProjectList.tsx`) ✅ + - Project listing + - Status indicators + +4. **Project Create** (`src/pages/ProjectCreate.tsx`) ⚠️ + - Basic form + - Needs enhancement for global context + +5. **Execution Monitor** (`src/pages/ExecutionMonitor.tsx`) ⚠️ + - Basic monitoring + - Needs real-time updates + +#### Missing Frontend Components: + +- [ ] Global Context Editor +- [ ] Memory System Viewer +- [ ] Review System UI +- [ ] Episode Detail View +- [ ] Skill Configuration Interface +- [ ] Batch Execution Controls +- [ ] Quality Report Viewer +- [ ] Skill Marketplace + +--- + +## Phase 2: Memory System Implementation (Week 3-4) + +### 2.1 Backend - Memory System + +**Priority**: HIGH +**Complexity**: Medium + +#### Data Models to Implement: + +```python +# Event Timeline +class TimelineEvent(BaseModel): + episode: int + event: str + timestamp: datetime + characters_involved: List[str] = [] + importance: str = "medium" # low, medium, high + +# Pending Threads (Foreshadowing) +class PendingThread(BaseModel): + id: str + description: str + introduced_at: int + importance: str # high, medium, low + resolved: bool = False + resolved_at: Optional[int] = None + reminder_episode: Optional[int] = None + status: str = "pending" # pending,跟进中,已收线 + +# Character State History +class CharacterStateChange(BaseModel): + episode: int + state: str + change: str + emotional_impact: Optional[str] = None + +# Enhanced Memory System +class EnhancedMemory(BaseModel): + eventTimeline: List[TimelineEvent] = [] + pendingThreads: List[PendingThread] = [] + characterStates: Dict[str, List[CharacterStateChange]] = {} + foreshadowing: List[ForeshadowingEvent] = [] + relationships: Dict[str, Dict[str, str]] = {} # character relationships +``` + +#### API Endpoints to Create: + +| Endpoint | Method | Description | +|----------|--------|-------------| +| `/api/v1/projects/{id}/memory` | GET | Get project memory | +| `/api/v1/projects/{id}/memory/timeline` | GET | Get event timeline | +| `/api/v1/projects/{id}/memory/threads` | GET | Get pending threads | +| `/api/v1/projects/{id}/memory/threads` | POST | Add new thread | +| `/api/v1/projects/{id}/memory/threads/{thread_id}` | PUT | Update thread | +| `/api/v1/projects/{id}/memory/characters` | GET | Get character states | +| `/api/v1/projects/{id}/memory/resolve-thread` | POST | Mark thread as resolved | + +#### Memory Manager Implementation: + +```python +# app/core/memory/memory_manager.py + +class MemoryManager: + """Manage project memory system""" + + async def extract_events_from_episode( + self, + episode_content: str, + episode_number: int + ) -> List[TimelineEvent]: + """Extract key events from completed episode""" + # Use LLM to identify and extract events + pass + + async def detect_foreshadowing( + self, + episode_content: str, + episode_number: int + ) -> List[ForeshadowingEvent]: + """Detect new foreshadowing in episode""" + pass + + async def update_character_states( + self, + episode_content: str, + episode_number: int, + characters: List[str] + ) -> Dict[str, CharacterStateChange]: + """Track character state changes""" + pass + + async def check_consistency( + self, + episode_content: str, + memory: Memory + ) -> List[ConsistencyIssue]: + """Check episode consistency with memory""" + pass + + async def suggest_thread_resolution( + self, + thread: PendingThread, + current_episode: int + ) -> ResolutionSuggestion: + """Suggest how to resolve pending threads""" + pass +``` + +### 2.2 Frontend - Memory System UI + +**Components to Build:** + +1. **Memory System Viewer Page** (`src/pages/MemorySystem.tsx`) + +```typescript +interface MemorySystemProps { + projectId: string; +} + +// Features: +// - Tabbed interface: Timeline | Threads | Characters | Foreshadowing +// - Timeline: Chronological event list with episode markers +// - Threads: Cards showing pending threads with urgency indicators +// - Characters: State history for each character +// - Visual relationship graph +``` + +2. **Timeline Component** + +```typescript +// Features: +// - Vertical timeline visualization +// - Episode markers +// - Event cards with character avatars +// - Importance color-coding +// - Filter by character/episode/importance +// - Click to view details +``` + +3. **Pending Threads Component** + +```typescript +// Features: +// - Thread cards with: +// - Description +// - Introduced episode +// - Importance level (High/Medium/Low) +// - Status (Pending/In Progress/Resolved) +// - Suggested resolution episode +// - Manual add/edit thread +// - Mark as resolved +// - Set reminder episode +// - Filter by status/importance +``` + +4. **Character States Component** + +```typescript +// Features: +// - Character list with current states +// - State history timeline per character +// - State change indicators +// - Relationship visualization +// - Emotional state tracking +``` + +--- + +## Phase 3: Review System Implementation (Week 5-6) + +### 3.1 Backend - Review System + +**Priority**: HIGH +**Complexity**: High + +#### Review Configuration Model: + +```python +class ReviewConfig(BaseModel): + """Review configuration - customizable by users""" + + # Enabled review Skills + enabled_review_skills: List[str] = [ + "consistency_checker", + "dialogue_quality_checker", + "plot_logic_checker" + ] + + # Overall strictness + overall_strictness: str = "medium" # loose, medium, strict + + # Dimension-specific settings + dimension_settings: Dict[str, DimensionConfig] = {} + + # Custom rules + custom_rules: List[CustomRule] = [] + + # Presets + preset_name: Optional[str] = None + +class DimensionConfig(BaseModel): + enabled: bool = True + strictness: float = 0.5 # 0.0 to 1.0 + custom_rules: List[CustomRule] = [] + +class CustomRule(BaseModel): + id: str + name: str + description: str + trigger_condition: str + severity: str # low, medium, high + enabled: bool = True +``` + +#### Review Result Model: + +```python +class ReviewResult(BaseModel): + episode_id: str + episode_number: int + + # Overall score + overall_score: float + passed: bool + + # Dimension scores + dimension_scores: Dict[str, float] = {} + + # Issues found + issues: List[ReviewIssue] = [] + + # New foreshadowing detected + new_foreshadowing: List[ForeshadowingEvent] = [] + + # Resolved threads + resolved_threads: List[str] = [] + +class ReviewIssue(BaseModel): + id: str + type: str + dimension: str + severity: str # low, medium, high + location: Location + description: str + original_text: Optional[str] = None + suggestion: str + auto_fix_available: bool = False + +class Location(BaseModel): + episode: int + scene: int + line: Optional[int] = None +``` + +#### Review Manager Implementation: + +```python +# app/core/review/review_manager.py + +class ReviewManager: + """Manage review process""" + + async def review_episode( + self, + episode: Episode, + project: SeriesProject, + config: ReviewConfig + ) -> ReviewResult: + """Execute full review process""" + + # Run each enabled review Skill + dimension_results = [] + for skill_id in config.enabled_review_skills: + result = await self._run_review_skill( + skill_id, episode, project, config + ) + dimension_results.append(result) + + # Aggregate results + review_result = self._aggregate_results(dimension_results) + + # Check custom rules + custom_issues = await self._check_custom_rules( + episode, config.custom_rules + ) + review_result.issues.extend(custom_issues) + + # Determine pass/fail + threshold = config.dimension_settings.get("threshold", 85) + review_result.passed = review_result.overall_score >= threshold + + return review_result + + async def _run_review_skill( + self, + skill_id: str, + episode: Episode, + project: SeriesProject, + config: ReviewConfig + ) -> DimensionResult: + """Run a single review Skill""" + skill = await skill_manager.load_skill(skill_id) + + # Build review prompt + prompt = self._build_review_prompt( + skill, episode, project, config + ) + + # Execute with Skill behavior guide + response = await glm_client.chat_with_skill( + skill_behavior=skill.behavior_guide, + user_input=prompt, + context={ + "episode_content": episode.content, + "global_context": project.globalContext, + "memory": project.memory + }, + temperature=0.3 + ) + + # Parse response + return self._parse_review_response(response, skill_id) + + def _aggregate_results( + self, + dimension_results: List[DimensionResult] + ) -> ReviewResult: + """Aggregate dimension results into overall score""" + # Weighted average based on dimension settings + pass +``` + +#### API Endpoints: + +| Endpoint | Method | Description | +|----------|--------|-------------| +| `/api/v1/projects/{id}/review-config` | GET | Get review config | +| `/api/v1/projects/{id}/review-config` | PUT | Update review config | +| `/api/v1/projects/{id}/review-presets` | GET | List preset configs | +| `/api/v1/projects/{id}/episodes/{ep_num}/review` | POST | Run review | +| `/api/v1/review/dimensions` | GET | List available dimensions | +| `/api/v1/review/custom-rules` | POST | Add custom rule | +| `/api/v1/review/custom-rules/{id}` | DELETE | Delete custom rule | + +### 3.2 Frontend - Review System UI + +**Components to Build:** + +1. **Review Configuration Page** (`src/pages/ReviewConfig.tsx`) + +```typescript +// Features: +// - Dimension selection with toggles +// - Strictness sliders per dimension +// - Custom rules builder +// - Preset selection (Draft/Standard/Strict) +// - Save/load presets +// - Preview configuration impact +``` + +2. **Dimension Configuration Component** + +```typescript +// Features: +// - Enable/disable dimension +// - Strictness slider (0-100) +// - Weight adjustment +// - Custom rules for dimension +// - Examples of what's checked +``` + +3. **Custom Rule Builder** + +```typescript +// Features: +// - Rule name and description +// - Trigger condition builder (natural language) +// - Severity selector +// - Test rule with sample content +// - Rule templates +``` + +4. **Review Results Viewer** (`src/pages/ReviewResults.tsx`) + +```typescript +// Features: +// - Overall score display with gauge +// - Dimension score breakdown (bar chart) +// - Issues list grouped by severity +// - Issue detail cards: +// - Location link +// - Original text +// - Suggestion +// - Auto-fix button (if available) +// - Ignore option +// - Accept/Reject decisions +// - Export report +``` + +5. **Review Dashboard** + +```typescript +// Features: +// - Batch review summary +// - Pass/fail statistics +// - Common issues +// - Trend analysis +// - Quality metrics over time +``` + +--- + +## Phase 4: Advanced Execution Features (Week 7-8) + +### 4.1 Backend - Enhanced Execution + +**Priority**: MEDIUM +**Complexity**: High + +#### Features to Implement: + +1. **Batch Execution Mode** + +```python +# app/core/execution/batch_executor.py + +class BatchExecutor: + """Execute episodes in batches with user review between""" + + async def execute_batch( + self, + project: SeriesProject, + start_episode: int, + end_episode: int, + batch_config: BatchConfig + ) -> BatchResult: + """Execute a batch of episodes""" + + results = [] + for ep_num in range(start_episode, end_episode + 1): + # Execute episode + episode = await agent.execute_episode( + project, ep_num + ) + + # Run review + review = await review_manager.review_episode( + episode, project, project.review_config + ) + + # Check if meets quality threshold + if review.overall_score < batch_config.quality_threshold: + # Auto-retry or mark for review + if episode.retry_count < batch_config.max_retries: + episode = await self._retry_episode( + project, ep_num, review + ) + else: + episode.status = "needs-review" + + results.append(episode) + + # Wait for user approval before next batch + return BatchResult( + episodes=results, + needs_review=[ep for ep in results if ep.status == "needs-review"], + batch_summary=self._generate_summary(results) + ) +``` + +2. **Real-time Progress Streaming** + +```python +# Use WebSocket or Server-Sent Events for real-time updates + +@app.websocket("/ws/projects/{project_id}/execute") +async def execute_episode_stream( + websocket: WebSocket, + project_id: str, + episode_number: int +): + """Stream execution progress in real-time""" + + await websocket.accept() + + # Send stage updates + for stage in agent.execution_stages: + await websocket.send_json({ + "type": "stage_start", + "stage": stage.name, + "message": f"Starting {stage.display_name}..." + }) + + result = await stage.execute() + + await websocket.send_json({ + "type": "stage_complete", + "stage": stage.name, + "result": result + }) + + await websocket.send_json({ + "type": "episode_complete", + "episode": episode_result + }) +``` + +3. **Auto-retry with Feedback** + +```python +class RetryManager: + """Manage automatic retry logic""" + + async def should_retry( + self, + episode: Episode, + review: ReviewResult + ) -> bool: + """Determine if episode should be retried""" + + config = episode.project.autoRetryConfig + + # Check retry count + if episode.retry_count >= config.maxRetries: + return False + + # Check score threshold + if review.overall_score >= config.qualityThreshold: + return False + + # Check if issues are fixable + fixable_issues = [ + issue for issue in review.issues + if issue.auto_fix_available or issue.severity != "high" + ] + + return len(fixable_issues) > 0 + + async def retry_episode( + self, + episode: Episode, + review: ReviewResult + ) -> Episode: + """Retry episode generation with review feedback""" + + # Build retry context + retry_context = { + "attempt": episode.retry_count + 1, + "previous_result": episode.content, + "review_feedback": self._build_feedback(review.issues), + "suggestions": [issue.suggestion for issue in review.issues] + } + + # Re-execute with feedback + new_episode = await agent.execute_episode( + episode.project, + episode.number, + retry_context=retry_context + ) + + new_episode.retry_count = episode.retry_count + 1 + return new_episode +``` + +#### API Endpoints: + +| Endpoint | Method | Description | +|----------|--------|-------------| +| `/api/v1/projects/{id}/execute-batch` | POST | Execute batch | +| `/api/v1/projects/{id}/execute-auto` | POST | Full auto execution | +| `/api/v1/projects/{id}/execution-status` | GET | Get execution status | +| `/api/v1/projects/{id}/stop-execution` | POST | Stop execution | +| `/ws/projects/{id}/execute` | WebSocket | Real-time execution stream | + +### 4.2 Frontend - Enhanced Execution UI + +**Components to Build:** + +1. **Batch Execution Config** (`src/pages/BatchExecution.tsx`) + +```typescript +// Features: +// - Batch size selector +// - Episode range picker +// - Per-episode configuration: +// - Duration target +// - Style preset +// - Focus areas +// - Quality threshold setting +// - Auto-retry configuration +// - Preview batch plan +``` + +2. **Real-time Execution Monitor** (Enhanced) + +```typescript +// Features: +// - Live progress bar +// - Current stage display +// - Real-time content preview (streaming) +// - Stage-by-stage logs +// - Pause/Resume/Stop controls +// - Time estimation +// - Quality metrics live update +``` + +3. **Batch Review Dashboard** + +```typescript +// Features: +// - Batch completion summary +// - Episode cards with status +// - Quality score comparison +// - Issues overview +// - Batch approval decision: +// - Accept all +// - Review issues +// - Retry failed +// - Regenerate batch +// - Next batch configuration +``` + +4. **Quality Report Viewer** (`src/pages/QualityReport.tsx`) + +```typescript +// Features: +// - Overall quality metrics +// - Dimension score charts +// - Trend analysis across episodes +// - Issue statistics +// - Comparison to thresholds +// - Improvement suggestions +// - Export to PDF/Excel +``` + +--- + +## Phase 5: Skill Marketplace (Week 9-10) + +### 5.1 Backend - Marketplace + +**Priority**: MEDIUM +**Complexity**: Medium + +#### Data Models: + +```python +class MarketplaceSkill(BaseModel): + """Skill in marketplace""" + + # Base skill data + skill: Skill + + # Marketplace-specific + author_id: str + author_name: str + published_at: datetime + downloads: int = 0 + rating: float = 0.0 + rating_count: int = 0 + + # Categorization + category: str + tags: List[str] + + # Usage stats + usage_count: int = 0 + success_rate: float = 0.0 + + # Versioning + version: str + changelog: List[str] = [] + parent_skill_id: Optional[str] = None # If forked + +class SkillReview(BaseModel): + skill_id: str + user_id: str + rating: int # 1-5 + comment: str + created_at: datetime + usage_context: Optional[str] = None +``` + +#### Marketplace Manager: + +```python +# app/core/marketplace/marketplace_manager.py + +class MarketplaceManager: + """Manage Skill marketplace""" + + async def publish_skill( + self, + skill_id: str, + user_id: str, + marketplace_data: MarketplaceMetadata + ) -> MarketplaceSkill: + """Publish user skill to marketplace""" + + # Validate skill + skill = await skill_manager.load_skill(skill_id) + if not skill or skill.type != "user": + raise ValueError("Only user skills can be published") + + # Create marketplace listing + marketplace_skill = MarketplaceSkill( + skill=skill, + author_id=user_id, + **marketplace_data.dict() + ) + + # Save to marketplace storage + await self.marketplace_repo.create(marketplace_skill) + + return marketplace_skill + + async def fork_skill( + self, + original_skill_id: str, + user_id: str, + modifications: SkillUpdate + ) -> Skill: + """Create a fork of existing skill""" + + original = await self.get_marketplace_skill(original_skill_id) + + # Create new user skill + new_skill_id = f"{original_skill_id}-fork-{uuid.uuid4().hex[:8]}" + new_skill = await skill_manager.create_user_skill( + SkillCreate( + id=new_skill_id, + name=f"{original.skill.name} (Fork)", + content=original.skill.behavior_guide, + category=original.category, + tags=original.tags + ) + ) + + # Track fork relationship + new_skill.parent_skill_id = original_skill_id + + return new_skill + + async def rate_skill( + self, + skill_id: str, + user_id: str, + rating: int, + comment: str + ) -> SkillReview: + """Rate and review a skill""" + pass + + async def get_recommendations( + self, + project_type: str, + user_history: List[str] + ) -> List[MarketplaceSkill]: + """Get recommended skills based on context""" + pass +``` + +#### API Endpoints: + +| Endpoint | Method | Description | +|----------|--------|-------------| +| `/api/v1/marketplace/skills` | GET | Browse marketplace | +| `/api/v1/marketplace/skills/{id}` | GET | Get skill details | +| `/api/v1/marketplace/skills/{id}/install` | POST | Install skill | +| `/api/v1/marketplace/skills/publish` | POST | Publish skill | +| `/api/v1/marketplace/skills/{id}/fork` | POST | Fork skill | +| `/api/v1/marketplace/skills/{id}/rate` | POST | Rate skill | +| `/api/v1/marketplace/skills/{id}/reviews` | GET | Get reviews | +| `/api/v1/marketplace/recommendations` | GET | Get recommendations | +| `/api/v1/marketplace/categories` | GET | Get categories | + +### 5.2 Frontend - Marketplace UI + +**Components to Build:** + +1. **Marketplace Home** (`src/pages/Marketplace.tsx`) + +```typescript +// Features: +// - Search bar with filters +// - Category browsing +// - Featured skills carousel +// - Popular skills grid +// - New arrivals +// - User's custom skills section +// - Skill cards with: +// - Name, description +// - Rating, downloads +// - Author, category +// - Install/Fork buttons +``` + +2. **Skill Detail Page** (`src/pages/MarketplaceSkillDetail.tsx`) + +```typescript +// Features: +// - Full skill description +// - Behavior guide preview +// - Configuration parameters +// - Usage examples +// - Reviews section +// - Rating distribution +// - Related skills +// - Version history +// - Install/Fork actions +``` + +3. **Skill Publisher** (`src/pages/PublishSkill.tsx`) + +```typescript +// Features: +// - Skill selection +// - Marketplace metadata: +// - Display name +// - Description +// - Category, tags +// - Screenshots +// - Examples +// - Version notes +// - Pricing (if applicable) +// - Preview listing +// - Publish action +``` + +4. **My Skills Dashboard** + +```typescript +// Features: +// - Published skills list +// - Stats: downloads, ratings, usage +// - Fork management +// - Review responses +// - Version management +// - Skill analytics +``` + +--- + +## Phase 6: Advanced Features (Week 11-12) + +### 6.1 Global Context Editor + +**Backend**: +- Enhanced CRUD for GlobalContext +- Version history +- Conflict detection +- Template system + +**Frontend**: +- Rich text editors for each section +- Visual character relationship editor +- Scene builder with previews +- Import/export templates +- Version comparison + +### 6.2 Skill Configuration Interface + +**Backend**: +- Parameter schema validation +- Configuration templates +- Preset management + +**Frontend**: +- Visual parameter editors: + - Sliders for numeric values + - Selects for options + - Toggles for booleans + - Rich text for guides +- Real-time preview +- Configuration save/load +- Preset application + +### 6.3 Export System + +**Backend**: +- Multi-format export: + - Markdown + - PDF + - Word (DOCX) + - Final Draft + - Custom templates + +**Frontend**: +- Export configuration +- Template selection +- Format options +- Preview before export +- Batch export + +### 6.4 Analytics Dashboard + +**Backend**: +- Usage metrics collection +- Performance tracking +- Quality analytics +- Storage optimization + +**Frontend**: +- Usage statistics +- Creation speed charts +- Quality trends +- Storage usage +- API rate limits + +--- + +## Data Models Summary + +### Core Models Status + +| Model | Status | Fields | Completeness | +|-------|--------|--------|--------------| +| **Skill** | ✅ | 15 fields | 95% | +| **SkillTool** | ✅ | 4 fields | 100% | +| **SkillConfig** | ✅ | 3 fields | 90% | +| **SeriesProject** | ⚠️ | 12 fields | 70% | +| **GlobalContext** | ⚠️ | 5 fields | 60% | +| **CharacterProfile** | ⚠️ | 7 fields | 70% | +| **SceneSetting** | ⚠️ | 3 fields | 80% | +| **Memory** | ❌ | 4 fields | 0% | +| **Episode** | ⚠️ | 12 fields | 70% | +| **EpisodeIssue** | ✅ | 4 fields | 100% | +| **ReviewConfig** | ❌ | TBD | 0% | +| **ReviewResult** | ❌ | TBD | 0% | +| **MarketplaceSkill** | ❌ | TBD | 0% | + +--- + +## API Endpoints Summary + +### Implemented (13 endpoints) + +``` +Skills: +✅ GET /api/v1/skills/ +✅ GET /api/v1/skills/{id} +✅ POST /api/v1/skills/ +✅ PUT /api/v1/skills/{id} +✅ DELETE /api/v1/skills/{id} +✅ POST /api/v1/skills/{id}/test +✅ GET /api/v1/skills/categories/list + +Projects: +✅ GET /api/v1/projects/ +✅ POST /api/v1/projects/ +✅ GET /api/v1/projects/{id} +✅ PUT /api/v1/projects/{id} +✅ DELETE /api/v1/projects/{id} +✅ GET /api/v1/projects/{id}/episodes +✅ GET /api/v1/projects/{id}/episodes/{ep_num} +✅ POST /api/v1/projects/{id}/execute +✅ POST /api/v1/projects/{id}/execute-batch +✅ GET /api/v1/projects/{id}/memory +``` + +### To Implement (40+ endpoints) + +``` +Memory System: +❌ GET /api/v1/projects/{id}/memory/timeline +❌ POST /api/v1/projects/{id}/memory/threads +❌ PUT /api/v1/projects/{id}/memory/threads/{thread_id} +❌ POST /api/v1/projects/{id}/memory/resolve-thread +❌ GET /api/v1/projects/{id}/memory/characters + +Review System: +❌ GET /api/v1/projects/{id}/review-config +❌ PUT /api/v1/projects/{id}/review-config +❌ GET /api/v1/projects/{id}/review-presets +❌ POST /api/v1/projects/{id}/episodes/{ep_num}/review +❌ GET /api/v1/review/dimensions +❌ POST /api/v1/review/custom-rules +❌ DELETE /api/v1/review/custom-rules/{id} + +Execution: +❌ POST /api/v1/projects/{id}/execute-auto +❌ GET /api/v1/projects/{id}/execution-status +❌ POST /api/v1/projects/{id}/stop-execution +❌ WS /ws/projects/{id}/execute + +Marketplace: +❌ GET /api/v1/marketplace/skills +❌ GET /api/v1/marketplace/skills/{id} +❌ POST /api/v1/marketplace/skills/{id}/install +❌ POST /api/v1/marketplace/skills/publish +❌ POST /api/v1/marketplace/skills/{id}/fork +❌ POST /api/v1/marketplace/skills/{id}/rate +❌ GET /api/v1/marketplace/skills/{id}/reviews +❌ GET /api/v1/marketplace/recommendations +❌ GET /api/v1/marketplace/categories + +Export: +❌ POST /api/v1/projects/{id}/export +❌ GET /api/v1/projects/{id}/export/{export_id} + +Analytics: +❌ GET /api/v1/analytics/usage +❌ GET /api/v1/analytics/performance +❌ GET /api/v1/analytics/quality +``` + +--- + +## Frontend Components Summary + +### Implemented (4 pages) + +- ✅ SkillManagement - Complete +- ✅ AgentManagement - Read-only +- ✅ ProjectList - Basic +- ⚠️ ProjectCreate - Basic +- ⚠️ ExecutionMonitor - Basic +- ✅ HomePage - Basic + +### To Build (15+ components) + +**Core Pages:** +- ❌ GlobalContextEditor +- ❌ MemorySystemViewer +- ❌ ReviewConfig +- ❌ ReviewResults +- ❌ EpisodeDetail +- ❌ BatchExecution +- ❌ QualityReport +- ❌ Marketplace +- ❌ MarketplaceSkillDetail +- ❌ PublishSkill +- ❌ MySkills +- ❌ ExportPage +- ❌ AnalyticsDashboard +- ❌ Settings + +**Shared Components:** +- ❌ TimelineViewer +- ❌ ThreadTracker +- ❌ CharacterStateViewer +- ❌ RelationshipGraph +- ❌ DimensionConfig +- ❌ CustomRuleBuilder +- ❌ RealTimeMonitor +- ❌ QualityGauge +- ❌ SkillCard +- ❌ EpisodeCard + +--- + +## Implementation Priority Matrix + +### High Priority (Must Have) + +| Feature | Complexity | Impact | Timeline | +|---------|-----------|--------|----------| +| Memory System | Medium | High | Week 3-4 | +| Review System | High | High | Week 5-6 | +| Enhanced Execution | Medium | High | Week 7-8 | +| Global Context Editor | Medium | High | Week 6 | +| Episode Detail View | Low | High | Week 5 | + +### Medium Priority (Should Have) + +| Feature | Complexity | Impact | Timeline | +|---------|-----------|--------|----------| +| Skill Marketplace | Medium | Medium | Week 9-10 | +| Batch Execution UI | Medium | Medium | Week 8 | +| Quality Reports | Low | Medium | Week 8 | +| Export System | Medium | Medium | Week 11 | +| Analytics | Low | Medium | Week 12 | + +### Low Priority (Nice to Have) + +| Feature | Complexity | Impact | Timeline | +|---------|-----------|--------|----------| +| Skill Recommendations | High | Low | Week 10 | +| Custom Themes | Low | Low | Week 12 | +| Team Collaboration | High | Low | Post-MVP | +| API Platform | High | Low | Post-MVP | + +--- + +## Technical Debt & Refactoring + +### Current Issues + +1. **Memory System**: Not implemented - placeholder only +2. **Review System**: Basic consistency check only +3. **Data Persistence**: In-memory only, needs database +4. **Error Handling**: Basic, needs comprehensive coverage +5. **Testing**: No test coverage +6. **Documentation**: Incomplete +7. **Frontend State Management**: Basic, needs optimization + +### Refactoring Priorities + +1. **Week 3-4**: Add database persistence (PostgreSQL/MongoDB) +2. **Week 5-6**: Implement comprehensive error handling +3. **Week 7-8**: Add unit and integration tests +4. **Week 9-10**: Performance optimization +5. **Week 11-12**: Security audit and hardening + +--- + +## Development Guidelines + +### Backend Development + +1. **Follow FastAPI best practices** + - Use async/await throughout + - Implement proper dependency injection + - Use Pydantic for validation + - Add comprehensive error handling + +2. **Code Organization** + ``` + backend/ + ├── app/ + │ ├── api/v1/ # API routes + │ ├── core/ # Core business logic + │ │ ├── agents/ # Agent implementations + │ │ ├── skills/ # Skill management + │ │ ├── memory/ # Memory system + │ │ ├── review/ # Review system + │ │ └── execution/ # Execution engine + │ ├── models/ # Pydantic models + │ ├── db/ # Database repositories + │ └── utils/ # Utilities + └── tests/ # Test suite + ``` + +3. **API Design Principles** + - RESTful conventions + - Consistent response formats + - Proper HTTP status codes + - Comprehensive OpenAPI docs + +### Frontend Development + +1. **Follow React best practices** + - Functional components with hooks + - TypeScript for type safety + - Ant Design for UI components + - Proper state management + +2. **Code Organization** + ``` + frontend/ + ├── src/ + │ ├── pages/ # Page components + │ ├── components/ # Reusable components + │ ├── services/ # API services + │ ├── stores/ # State management + │ ├── types/ # TypeScript types + │ └── utils/ # Utilities + └── tests/ # Test suite + ``` + +3. **UI/UX Principles** + - Responsive design + - Loading states + - Error handling + - User feedback + - Accessibility + +--- + +## Testing Strategy + +### Backend Testing + +1. **Unit Tests** (Target: 80% coverage) + - Model validation + - Business logic + - Utility functions + +2. **Integration Tests** + - API endpoints + - Database operations + - External service calls + +3. **End-to-End Tests** + - Complete workflows + - Multi-step operations + - Error scenarios + +### Frontend Testing + +1. **Component Tests** + - Individual components + - User interactions + - State changes + +2. **Integration Tests** + - Page flows + - API integration + - State management + +3. **E2E Tests** + - User journeys + - Critical paths + - Cross-browser testing + +--- + +## Deployment Strategy + +### Development Environment + +- Backend: FastAPI with uvicorn +- Frontend: Vite dev server +- Database: SQLite (local), PostgreSQL (staging) +- Hot reload enabled + +### Staging Environment + +- Docker containerization +- PostgreSQL database +- Redis for caching +- Load testing + +### Production Environment + +- Kubernetes deployment +- Horizontal scaling +- Database replication +- CDN for static assets +- Monitoring and logging + +--- + +## Success Metrics + +### Technical Metrics + +- API Response Time: < 500ms (p95) +- Frontend Load Time: < 2s +- Test Coverage: > 80% +- Uptime: > 99.9% +- Error Rate: < 0.1% + +### User Metrics + +- Project Creation Success: > 95% +- Episode Generation Quality: > 85% +- User Satisfaction: > 4.5/5 +- Daily Active Users: Track growth +- Feature Adoption: Track usage + +--- + +## Risk Assessment + +### High Risk Items + +1. **LLM API Stability** + - Mitigation: Implement retry logic, fallback models + - Priority: High + +2. **Data Consistency** + - Mitigation: Comprehensive review system + - Priority: High + +3. **Performance at Scale** + - Mitigation: Caching, async processing + - Priority: Medium + +4. **User Experience Complexity** + - Mitigation: Intuitive UI, onboarding + - Priority: Medium + +### Medium Risk Items + +1. **Third-party Dependencies** + - Mitigation: Version pinning, regular updates + - Priority: Medium + +2. **Security Vulnerabilities** + - Mitigation: Regular audits, penetration testing + - Priority: High + +--- + +## Next Steps (Immediate Actions) + +### Week 1-2: Foundation +- [ ] Set up PostgreSQL database +- [ ] Implement data persistence layer +- [ ] Add comprehensive error handling +- [ ] Set up testing framework +- [ ] Create development documentation + +### Week 3-4: Memory System +- [ ] Implement MemoryManager +- [ ] Create memory API endpoints +- [ ] Build MemorySystemViewer UI +- [ ] Add memory extraction from episodes +- [ ] Test memory consistency + +### Week 5-6: Review System +- [ ] Implement ReviewManager +- [ ] Create review API endpoints +- [ ] Build ReviewConfig UI +- [ ] Build ReviewResults UI +- [ ] Add custom rule builder +- [ ] Test review accuracy + +--- + +## Conclusion + +This implementation plan provides a comprehensive roadmap for building the Creative Studio platform. The key focus areas are: + +1. **Memory System**: Ensure multi-episode consistency +2. **Review System**: Maintain quality standards +3. **Enhanced Execution**: Provide flexible creation modes +4. **Skill Marketplace**: Enable skill sharing +5. **User Experience**: Intuitive, powerful interface + +The current implementation has a solid foundation with Skills CRUD, basic Agent framework, and project management. The next phases focus on adding the missing critical features while maintaining code quality and user experience. + +**Estimated Total Timeline**: 12 weeks for full MVP +**Team Size**: 2-3 developers recommended +**Budget**: Consider API costs, infrastructure, and development resources + +--- + +**Document Version**: 1.0 +**Last Updated**: 2024-01-24 +**Next Review**: 2024-02-01 diff --git a/MVP_GUIDE.md b/MVP_GUIDE.md new file mode 100644 index 0000000..f3141d1 --- /dev/null +++ b/MVP_GUIDE.md @@ -0,0 +1,328 @@ +# Creative Studio MVP 使用指南 + +## 快速开始 + +### 1. 启动后端 + +```powershell +cd backend +.\start.bat +``` + +### 2. 启动前端 + +```powershell +cd frontend +npm install +npm run dev +``` + +### 3. 配置 GLM API Key + +编辑 `backend\.env` 文件: + +```env +ZHIPUAI_API_KEY=your_api_key_here +``` + +获取 API Key: https://open.bigmodel.cn/usercenter/apikeys + +--- + +## 完整创作流程演示 + +### 步骤 1: 创建项目 + +1. 访问 http://localhost:5173 +2. 点击 "创建新项目" +3. 填写项目信息: + +``` +项目名称: 宫廷风云 +总集数: 30 + +世界观设定: 架空朝代天启朝,皇权与相权之争 + +风格指南: 古风正剧,对话半文半白 + +主要人物: +李云飞:边关将军,耿直刚正,说话简练直接 +苏婉儿:丞相之女,聪慧隐忍,说话含蓄委婉 + +整体大纲: +EP01-05:相识与相恋,两人被迫分离 +EP06-15:宫中斗争,婉儿周旋,云飞征战 +EP16-25:真相大白,联手对抗丞相 +EP26-30:最终对决,有情人终成眷属 +``` + +### 步骤 2: 开始创作 + +1. 进入项目详情页 +2. 点击 "开始创作 (EP1-EP3)" +3. 等待 AI 创作完成(每集约需 30-60 秒) + +### 步骤 3: 查看结果 + +1. 创作完成后,剧集列表会显示已完成 +2. 点击 "查看内容" 查看剧本 +3. 可以看到大纲、完整剧本、质量分数 + +--- + +## API 使用示例 + +### 使用 Swagger UI + +访问 http://localhost:8000/docs + +#### 1. 查看所有 Skills + +``` +GET /api/v1/skills +``` + +#### 2. 测试 Skill + +``` +POST /api/v1/skills/dialogue_writer_ancient/test +{ + "test_input": "请创作一段古风对话:李云飞向苏婉儿告别", + "temperature": 0.7 +} +``` + +#### 3. 创建项目 + +``` +POST /api/v1/projects +{ + "name": "宫廷风云", + "totalEpisodes": 30, + "globalContext": { + "worldSetting": "架空朝代天启朝...", + "characterProfiles": { + "char_1": { + "name": "李云飞", + "description": "边关将军", + "personality": "耿直刚正", + "speechStyle": "简练直接" + } + } + } +} +``` + +#### 4. 执行剧集创作 + +``` +POST /api/v1/projects/{project_id}/execute +{ + "episodeNumber": 1, + "title": "初识" +} +``` + +--- + +## Python 脚本示例 + +### 完整创作流程脚本 + +```python +import requests +import time + +BASE_URL = "http://localhost:8000/api/v1" + +# 1. 创建项目 +project_data = { + "name": "测试项目", + "totalEpisodes": 3, + "globalContext": { + "worldSetting": "架空朝代,皇权与相权之争", + "overallOutline": "EP1-3:相识与相恋" + } +} + +response = requests.post(f"{BASE_URL}/projects", json=project_data) +project = response.json() +project_id = project["id"] +print(f"✅ 项目创建成功: {project_id}") + +# 2. 执行 EP1 创作 +print("\n🎬 开始创作 EP1...") +start_time = time.time() + +response = requests.post( + f"{BASE_URL}/projects/{project_id}/execute", + json={"episodeNumber": 1, "title": "初识"} +) + +result = response.json() +if result["success"]: + episode = result["episode"] + print(f"✅ EP1 创作完成!") + print(f" 质量分数: {episode['qualityScore']}") + print(f" 创作耗时: {time.time() - start_time:.1f}秒") + print(f"\n📜 剧本预览:") + print(episode["content"][:500] + "...") +else: + print(f"❌ 创作失败: {result['message']}") + +# 3. 获取所有剧集 +response = requests.get(f"{BASE_URL}/projects/{project_id}/episodes") +episodes = response.json() +print(f"\n📊 已完成 {len(episodes)} 集") +for ep in episodes: + print(f" EP{ep['number']}: {ep['status']} - 分数: {ep.get('qualityScore', 'N/A')}") +``` + +### 批量创作脚本 + +```python +import requests +import time + +BASE_URL = "http://localhost:8000/api/v1" +project_id = "your-project-id" + +# 批量创作 EP1-EP5 +for ep_num in range(1, 6): + print(f"\n🎬 创作 EP{ep_num}...") + + start = time.time() + response = requests.post( + f"{BASE_URL}/projects/{project_id}/execute", + json={"episodeNumber": ep_num} + ) + + result = response.json() + elapsed = time.time() - start + + if result["success"]: + episode = result["episode"] + print(f"✅ 完成 - 分数: {episode['qualityScore']} - 耗时: {elapsed:.1f}秒") + else: + print(f"❌ 失败: {result['message']}") +``` + +--- + +## cURL 示例 + +### 创建项目 + +```bash +curl -X POST http://localhost:8000/api/v1/projects \ + -H "Content-Type: application/json" \ + -d '{ + "name": "宫廷风云", + "totalEpisodes": 3, + "globalContext": { + "worldSetting": "架空朝代天启朝", + "overallOutline": "EP1-3:相识与相恋" + } + }' +``` + +### 执行创作 + +```bash +curl -X POST http://localhost:8000/api/v1/projects/{project_id}/execute \ + -H "Content-Type: application/json" \ + -d '{ + "episodeNumber": 1, + "title": "初识" + }' +``` + +--- + +## 预期输出示例 + +### 创作完成的剧本 + +``` +【场景】御花园,傍晚,落霞满天 +【人物】李云飞、苏婉儿 + +李云飞:(拱手)苏姑娘,此地不宜久留。 + +苏婉儿:(轻叹)将军所言极是,只是... + +李云飞:(环顾四周)只是什么? + +苏婉儿:(低头)只是婉儿不知何时才能再与将军相见。 + +李云飞:(沉默片刻)三年,三年后我必回来。 + +苏婉儿:(猛地抬头)三年?太久了... + +李云飞:(坚定)这是我能做到的极限。 + +【场景转换】 +宫门之外,李云飞翻身上马,最后回首望向宫墙深处... + +``` + +--- + +## 故障排查 + +### 问题 1: GLM API 调用失败 + +**症状**: 创作失败,提示 "GLM API 调用失败" + +**解决**: +1. 检查 `.env` 文件中的 `ZHIPUAI_API_KEY` 是否正确 +2. 确认 API Key 有效且有足够额度 +3. 检查网络连接 + +### 问题 2: 创作结果为空 + +**症状**: 创作完成但内容为空 + +**解决**: +1. 查看后端日志 +2. 尝试降低 `temperature` 参数 +3. 检查提示词是否过长 + +### 问题 3: 前端无法连接后端 + +**症状**: 前端报错 "Network Error" + +**解决**: +1. 确认后端服务正在运行 (http://localhost:8000) +2. 检查 CORS 配置 +3. 确认前端代理配置正确 + +--- + +## MVP 功能限制 + +当前 MVP 版本的功能限制: + +| 功能 | 状态 | 说明 | +|-----|------|------| +| 项目管理 | ✅ | 完整支持 | +| 单集执行 | ✅ | 完整支持 | +| 批量执行 | ✅ | 简化实现 | +| 记忆系统 | ⚠️ | 简化版(仅记录事件) | +| 审核系统 | ⚠️ | 简化版(LLM 评分) | +| Agent 管理 | ❌ | 未实现(使用固定 Agent) | +| Skill 配置 | ⚠️ | 仅支持内置 Skills | +| 用户创建 Skill | ✅ | 支持 | +| 数据持久化 | ❌ | 内存存储(重启丢失) | + +--- + +## 下一步 + +MVP 验证通过后,可以继续实现: + +1. **数据持久化** - 集成 MongoDB 存储项目和剧集 +2. **完整记忆系统** - 事件时间线、伏笔追踪 +3. **高级审核** - 多维度审核、问题定位 +4. **Agent 管理** - 可视化 Agent 工作流 +5. **实时执行监控** - WebSocket 推送创作进度 diff --git a/README.md b/README.md new file mode 100644 index 0000000..86bc7b6 --- /dev/null +++ b/README.md @@ -0,0 +1,439 @@ +# Creative Studio + +> 基于 **Agent + Skill 架构** 的 AI 剧集创作平台 +> +> 集成智谱 AI GLM-4.7 模型,提供完整的自动化剧集创作解决方案 + +## 项目概述 + +Creative Studio 是一个创新的 AI 内容创作平台,通过 **可配置的 Skill(技能)** 指导 AI Agent 进行剧集创作。 + +### 核心特性 + +| 特性 | 说明 | +|-----|------| +| **Agent + Skill 架构** | 固定的 Agent 工作流 + 可配置的 Skill 行为指导 | +| **参考文件支持** | Skill 可关联参考文件(MD/TXT/JSON/YAML),融入 LLM 上下文 | +| **全局上下文管理** | 世界观、人物小传、场景设定等全局配置 | +| **记忆系统** | 事件时间线、伏笔追踪、人物状态变化记录 | +| **多集一致性** | 自动维护多集内容的一致性 | +| **审核系统** | 多维度质量审核和自动修复建议 | +| **内容管理** | Markdown 渲染预览、编辑、导出(MD/TXT/PDF/DOCX) | + +### 技术栈 + +``` +后端: FastAPI + GLM-4.7 + MongoDB + Redis +前端: React 18 + TypeScript + Ant Design + Vite + react-markdown + react-syntax-highlighter +``` + +## 快速开始 + +### 前置要求 + +- Python 3.11+ +- Node.js 18+ +- MongoDB 7.0+ (可选,默认使用内存存储) +- Redis 7.0+ (可选,用于 Celery 任务队列) +- 智谱 AI API Key + +### 1. 克隆项目 + +```bash +git clone <repository-url> +cd creative_studio +``` + +### 2. 配置后端 + +```bash +cd backend + +# (推荐) 创建虚拟环境 +python -m venv venv + +# 激活虚拟环境 +# Windows: +venv\Scripts\activate +# Linux/Mac: +source venv/bin/activate + +# 安装依赖 +pip install -r requirements.txt + +# 配置环境变量 +# Windows: +copy .env.example .env +# Linux/Mac: +cp .env.example .env + +# 编辑 .env 文件,必须配置以下内容: +# - ZAI_API_KEY: 你的智谱 AI API Key (必填) +# - SECRET_KEY: 随机生成的密钥 (必填) +# 其他配置可以使用默认值 + +# 启动后端服务 +python -m uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload +``` + +后端服务将运行在 `http://localhost:8000` + +API 文档: `http://localhost:8000/docs` + +健康检查: `http://localhost:8000/health` + +### 3. 配置前端 + +```bash +cd frontend + +# 安装依赖 +npm install + +# 启动开发服务器 +npm run dev +``` + +前端服务将运行在 `http://localhost:5173` + +### 常见问题排查 + +#### 后端启动失败 + +1. **ModuleNotFoundError**: 确保在虚拟环境中安装了所有依赖 + ```bash + pip install -r requirements.txt + ``` + +2. **编码错误 (Windows)**: 日志中的 emoji 显示警告不影响功能,如需修复请设置终端编码为 UTF-8 + +3. **环境变量错误**: 确保 `.env` 文件存在且包含必填项 (`ZAI_API_KEY`, `SECRET_KEY`) + +#### 前端启动失败 + +1. **端口占用**: 如果 5173 端口被占用,Vite 会自动选择其他端口 +2. **依赖安装失败**: 尝试删除 `node_modules` 后重新安装 + ```bash + rm -rf node_modules # Linux/Mac + rmdir /s node_modules # Windows + npm install + ``` + +#### 数据库连接失败 + +如果没有安装 MongoDB 或 Redis,应用仍可正常运行,将使用内存存储。生产环境建议配置数据库。 + +## 项目结构 + +``` +creative_studio/ +├── backend/ # FastAPI 后端 +│ ├── app/ +│ │ ├── main.py # 应用入口 +│ │ ├── config.py # 配置管理 +│ │ ├── api/v1/ # API 路由 +│ │ │ ├── skills.py # Skill 管理 API +│ │ │ ├── projects.py # 项目管理 API +│ │ │ ├── episodes.py # 剧集内容 API +│ │ │ ├── uploads.py # 文件上传 API +│ │ │ └── agents.py # Agent 管理 API +│ │ ├── core/ +│ │ │ ├── llm/ # GLM-4.7 客户端(支持参考文件) +│ │ │ ├── skills/ # Skill 管理器(支持 skill-creator 脚本) +│ │ │ ├── agents/ # Agent 执行引擎 +│ │ │ ├── memory/ # 记忆系统 +│ │ │ └── review/ # 审核系统 +│ │ ├── models/ # 数据模型 +│ │ └── utils/ # 工具函数 +│ ├── skills_storage/ # Skill 文件存储 +│ │ ├── builtin_skills/ # 内置 Skills(平台维护) +│ │ │ └── skill-creator/ # Skill 创建工具(用于创建新 Skills) +│ │ └── user_skills/ # 用户 Skills +│ │ ├── consistency_checker/ +│ │ └── dialogue_writer_ancient/ +│ └── requirements.txt +│ +├── frontend/ # React 前端 +│ ├── src/ +│ │ ├── pages/ # 页面组件 +│ │ │ ├── SkillManagement.tsx +│ │ │ ├── ProjectWorkspace.tsx +│ │ │ ├── ExecutionMonitor.tsx +│ │ │ └── MemorySystem.tsx +│ │ ├── components/ # 通用组件 +│ │ │ ├── MarkdownRenderer.tsx # Markdown 渲染组件 +│ │ │ ├── ContentEditor.tsx # 内容编辑器 +│ │ │ └── SkillCreateWizard.tsx # Skill 创建向导 +│ │ ├── stores/ # Zustand 状态管理 +│ │ └── services/ # API 服务 +│ │ ├── episodeContentService.ts +│ │ ├── uploadService.ts +│ │ └── skillService.ts +│ └── package.json +│ +├── docs/ # 详细文档 +│ ├── ARCHITECTURE.md # 技术架构详解 +│ └── API.md # API 接口文档 +│ +├── creative-studio-platform.md # 原始产品规格文档(参考) +└── README.md # 本文件 +``` + +## 核心概念 + +### Agent + +Agent 是**固定的工作流框架**,由平台维护,用户不可编辑。 + +**剧集创作 Agent 工作流**: +1. 结构分析 - 分析场景构成、关键事件 +2. 大纲生成 - 生成本集大纲 +3. 对话创作 - 使用 Skill 创作剧本(支持参考文件) +4. 一致性审核 - 检查一致性并更新记忆 +5. 记忆更新 - 提取事件、伏笔、人物状态 + +### Skill + +Skill 是**可配置的创作能力单元**,通过行为指导影响 LLM 的输出。 + +**Skill 特性**: +- **行为指导**:指导 LLM 的输出风格和质量 +- **参考文件**:可上传 MD/TXT/JSON/YAML 文件作为参考 +- **Agent-Skill 生命周期**:注册 → 路由 → 执行 → 反馈 +- **skill-creator 标准**:使用 YAML frontmatter 和标准化目录结构 + +**示例 Skill**: + +```markdown +--- +name: dialogue-writer-ancient +description: Expert ancient-style dialogue writer for period dramas. Use this skill when creating character dialogue that must match historical settings. +--- + +## 行为指导 + +你创作对话时必须遵循以下规则: + +1. 使用半文半白的表达,如"何故"、"罢了"、"莫要" +2. 皇室说话威严少语,武将直率简短,文官婉转 +3. 愤怒时用动作表达,不要直接说"我很生气" +4. 重要对话一句不超过 15 字 +``` + +### 参考文件 + +Skill 可以关联参考文件,文件内容会被自动融入 LLM 上下文: + +- **支持的格式**:MD, TXT, JSON, YAML +- **文件大小限制**:100KB(默认) +- **融入方式**:格式化后注入到系统提示词中 + +### 全局上下文 + +项目级共享设定: + +- **世界观设定**: 背景世界描述 +- **人物小传**: 角色设定、性格、说话风格 +- **场景设定**: 常驻场景描述 +- **整体大纲**: 故事线、转折点 + +### 记忆系统 + +自动维护一致性: + +- **事件时间线**: 已发生的关键事件 +- **伏笔追踪**: 新增伏笔、待收线问题 +- **人物状态**: 各人物状态的历史变化 + +### 审核系统 + +多维度质量检查: + +- **人物一致性**: 角色行为是否与设定一致 +- **剧情逻辑**: 故事逻辑是否连贯 +- **对话质量**: 对话是否自然流畅 + +## API 使用示例 + +### 列出所有 Skills + +```bash +curl http://localhost:8000/api/v1/skills +``` + +### 获取 Skill 及其参考文件 + +```bash +curl http://localhost:8000/api/v1/skills/dialogue-writer-ancient/with-references +``` + +### 测试 Skill + +```bash +curl -X POST http://localhost:8000/api/v1/skills/dialogue-writer-ancient/test \ + -H "Content-Type: application/json" \ + -d '{ + "test_input": "请帮我创作一段古风对话", + "temperature": 0.7 + }' +``` + +### 创建项目 + +```bash +curl -X POST http://localhost:8000/api/v1/projects \ + -H "Content-Type: application/json" \ + -d '{ + "name": "我的古风剧集", + "totalEpisodes": 30, + "globalContext": { + "worldSetting": "架空朝代天启朝...", + "characterProfiles": {...} + } + }' +``` + +### 执行剧集创作 + +```bash +curl -X POST http://localhost:8000/api/v1/projects/{project_id}/execute \ + -H "Content-Type: application/json" \ + -d '{ + "episode_number": 1 + }' +``` + +## 主要功能 + +### Skill 管理 + +- 查看 Skill 列表(内置 + 用户自定义) +- 使用 skill-creator 向导创建 Skill +- 上传参考文件(MD/TXT/JSON/YAML) +- 测试 Skill 效果 +- 编辑 Skill 内容和配置 + +### 项目管理 + +- 创建和管理剧集项目 +- 配置全局上下文 +- 选择使用的 Skills +- 查看项目进度和统计 + +### 剧集创作 + +- 单集创作 +- 批量创作 +- 实时进度监控 +- 自动保存到数据库 + +### 内容管理 + +- **Markdown 渲染预览**:使用 react-markdown + react-syntax-highlighter +- **在线编辑**:直接编辑剧集内容 +- **质量审核**:通过/拒绝、添加审核意见 +- **导出功能**:支持 MD/TXT/PDF/DOCX 格式 + +### 记忆系统 + +- 事件时间线查看 +- 伏笔追踪和管理 +- 人物状态变化记录 + +## 开发路线图 + +### Phase 1: 基础设施 ✅ +- [x] FastAPI 框架搭建 +- [x] GLM-4.7 客户端封装 +- [x] Skill 管理系统 +- [x] 前端框架搭建 + +### Phase 2: 核心功能 ✅ +- [x] Agent 执行引擎 +- [x] 项目管理系统 +- [x] 记忆系统 +- [x] 前端核心页面 +- [x] Markdown 渲染预览 +- [x] 参考文件 LLM 融入 + +### Phase 3: 高级功能 ⚠️ +- [x] 内容 CRUD API +- [x] 导出功能(基础) +- [ ] 三种执行模式完善 +- [ ] Skill Marketplace +- [ ] PDF/DOCX 导出完善 + +### Phase 4: 优化完善 📝 +- [ ] UI/UX 优化 +- [ ] 测试覆盖 +- [ ] 部署方案 +- [ ] 文档完善 + +## 文档导航 + +- **[API 文档](docs/API.md)** - 完整的 REST API 接口文档 +- **[架构文档](docs/ARCHITECTURE.md)** - 技术架构详细说明 +- **[原始产品规格](creative-studio-platform.md)** - 原始产品规格文档(参考) + +## 常见问题 + +### 如何获取智谱 AI API Key? + +访问 [智谱AI开放平台](https://open.bigmodel.cn/usercenter/apikeys) 创建 API Key。 + +### 支持哪些模型? + +- `glm-4-flash` - 快速响应模型(推荐) +- `glm-4-plus` - 高质量模型 +- `glm-4-0520` - 稳定版本 + +### Skill 的参考文件是如何工作的? + +参考文件会被 SkillManager 加载,格式化后融入 GLM 客户端的系统提示词中,使 LLM 能够参考这些内容进行创作。 + +### 如何创建新的 Skill? + +**方法一:使用前端向导(推荐)** +1. 进入 Skill 管理页面 +2. 点击"创建新 Skill" +3. 按照向导步骤填写信息、上传参考文件 +4. 预览并保存 + +**方法二:使用 skill-creator 脚本** +```bash +# 初始化 Skill 模板 +cd backend/skills_storage/builtin_skills/skill-creator/scripts +python init_skill.py my-skill --path ../../../user_skills/ + +# 编辑 SKILL.md 文件 +# 添加参考文件到 references/ 目录 + +# 验证 Skill +python quick_validate.py ../../../user_skills/my-skill + +# 打包 Skill(可选) +python package_skill.py ../../../user_skills/my-skill +``` + +**方法三:手动创建** +1. 在 `backend/skills_storage/user_skills/` 下创建目录 +2. 创建 `SKILL.md` 文件,包含 YAML frontmatter: +```markdown +--- +name: my-skill +description: 简短描述此 Skill 的用途 +--- + +## 行为指导 +你的行为指导... +``` +3. 可选:添加 `references/` 目录存放参考文件 +4. 重启后端服务自动加载 + +## 许可证 + +MIT License + +--- + +**Creative Studio** - 让 AI 创作更简单、更可控 diff --git a/backend/.env.example b/backend/.env.example new file mode 100644 index 0000000..34eb711 --- /dev/null +++ b/backend/.env.example @@ -0,0 +1,65 @@ +# Creative Studio Backend Environment Configuration +# 复制此文件为 .env 并填入实际值 + +# ============================================ +# 应用配置 +# ============================================ +APP_NAME=Creative Studio +APP_VERSION=1.0.0 +DEBUG=True +ENVIRONMENT=development + +# 服务器配置 +HOST=0.0.0.0 +PORT=8000 + +# ============================================ +# GLM API 配置 +# ============================================ +# 获取 API Key: https://open.bigmodel.cn/usercenter/apikeys +ZAI_API_KEY=your_api_key_here +ZAI_MODEL=glm-4.7 +# 可选模型: glm-4-flash, glm-4-plus, glm-4-0520 + +# ============================================ +# 数据库配置 +# ============================================ +# MongoDB +MONGODB_URL=mongodb://localhost:27017 +MONGODB_DATABASE=creative_studio + +# Redis +REDIS_URL=redis://localhost:6379/0 + +# ============================================ +# 安全配置 +# ============================================ +SECRET_KEY=your-super-secret-key-change-this-in-production +ALGORITHM=HS256 +ACCESS_TOKEN_EXPIRE_MINUTES=60 +REFRESH_TOKEN_EXPIRE_DAYS=7 + +# ============================================ +# 文件存储配置 +# ============================================ +# 本地存储路径 +STORAGE_PATH=./storage +# 最大文件大小 (MB) +MAX_FILE_SIZE=100 + +# ============================================ +# Celery 配置 +# ============================================ +CELERY_BROKER_URL=redis://localhost:6379/1 +CELERY_RESULT_BACKEND=redis://localhost:6379/2 + +# ============================================ +# CORS 配置 +# ============================================ +ALLOWED_ORIGINS=http://localhost:5173,http://localhost:3000 + +# ============================================ +# 日志配置 +# ============================================ +LOG_LEVEL=INFO +LOG_FILE=./logs/app.log diff --git a/backend/.gitignore b/backend/.gitignore new file mode 100644 index 0000000..c23c0a7 --- /dev/null +++ b/backend/.gitignore @@ -0,0 +1,111 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +*.manifest +*.spec + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# IDEs +.idea/ +.vscode/ +*.swp +*.swo +*~ + +# Project specific +logs/ +storage/ +*.db diff --git a/backend/README.md b/backend/README.md new file mode 100644 index 0000000..58a78fe --- /dev/null +++ b/backend/README.md @@ -0,0 +1,115 @@ +# Creative Studio Backend + +基于 FastAPI 的 AI 剧集创作平台后端服务。 + +## 快速开始 + +详细的快速启动指南请参考项目根目录的 [README.md](../README.md)。 + +### 安装依赖 + +```bash +pip install -r requirements.txt +``` + +### 配置环境变量 + +```bash +cp .env.example .env +# 编辑 .env 文件,填入 ZAI_API_KEY +``` + +### 启动服务 + +```bash +uvicorn app.main:app --reload +``` + +### 访问 API 文档 + +- Swagger UI: http://localhost:8000/docs +- ReDoc: http://localhost:8000/redoc + +## 技术栈 + +- Python 3.11+ +- FastAPI +- MongoDB +- Redis +- zai-sdk (智谱 AI SDK) + +## 主要功能 + +- Skill 管理系统 +- 项目管理 +- 记忆系统 +- 审核系统 +- Agent 执行引擎 + +## 开发指南 + +完整的开发指南请参考 [DEVELOPMENT_GUIDE.md](../DEVELOPMENT_GUIDE.md)。 + +## 项目结构 + +``` +backend/ +├── app/ +│ ├── main.py # 应用入口 +│ ├── config.py # 配置管理 +│ ├── api/v1/ # API 路由 +│ ├── core/ # 核心业务逻辑 +│ │ ├── llm/ # GLM 客户端 +│ │ ├── skills/ # Skill 管理器 +│ │ ├── agents/ # Agent 执行引擎 +│ │ ├── memory/ # 记忆系统 +│ │ └── review/ # 审核系统 +│ ├── models/ # 数据模型 +│ └── utils/ # 工具函数 +│ +├── skills_storage/ # Skill 文件存储 +│ ├── builtin_skills/ # 内置 Skills +│ └── user_skills/ # 用户 Skills +│ +└── requirements.txt +``` + +## API 端点 + +详细的 API 文档请参考 [docs/API.md](../docs/API.md)。 + +### Skill 管理 + +| 端点 | 方法 | 描述 | +|-----|------|------| +| `/api/v1/skills` | GET | 列出所有 Skills | +| `/api/v1/skills/{id}` | GET | 获取 Skill 详情 | +| `/api/v1/skills` | POST | 创建新 Skill | +| `/api/v1/skills/{id}/test` | POST | 测试 Skill | +| `/api/v1/skills/{id}` | PUT | 更新 Skill | +| `/api/v1/skills/{id}` | DELETE | 删除 Skill | + +### 项目管理 + +| 端点 | 方法 | 描述 | +|-----|------|------| +| `/api/v1/projects` | GET | 列出项目 | +| `/api/v1/projects` | POST | 创建项目 | +| `/api/v1/projects/{id}` | GET | 获取项目详情 | +| `/api/v1/projects/{id}/execute` | POST | 执行剧集创作 | + +## 常见问题 + +### 如何获取智谱 AI API Key? + +访问 [智谱AI开放平台](https://open.bigmodel.cn/usercenter/apikeys) 创建 API Key。 + +### 支持哪些模型? + +- `glm-4-flash` - 快速响应模型(推荐) +- `glm-4-plus` - 高质量模型 +- `glm-4-0520` - 稳定版本 + +## 许可证 + +MIT License diff --git a/backend/app/__init__.py b/backend/app/__init__.py new file mode 100644 index 0000000..fae6129 --- /dev/null +++ b/backend/app/__init__.py @@ -0,0 +1,2 @@ +# Creative Studio Backend Application +__version__ = "1.0.0" diff --git a/backend/app/api/__init__.py b/backend/app/api/__init__.py new file mode 100644 index 0000000..9c7f58e --- /dev/null +++ b/backend/app/api/__init__.py @@ -0,0 +1 @@ +# API module diff --git a/backend/app/api/v1/__init__.py b/backend/app/api/v1/__init__.py new file mode 100644 index 0000000..7de4229 --- /dev/null +++ b/backend/app/api/v1/__init__.py @@ -0,0 +1 @@ +# API v1 module diff --git a/backend/app/api/v1/ai_assistant.py b/backend/app/api/v1/ai_assistant.py new file mode 100644 index 0000000..58ef528 --- /dev/null +++ b/backend/app/api/v1/ai_assistant.py @@ -0,0 +1,433 @@ +""" +AI 辅助生成 API + +提供 AI 辅助生成人物、大纲、解析剧本等功能 +支持 Skills 融入:将选定的 Skills 行为指导融入 LLM 调用 +""" +from fastapi import APIRouter, HTTPException +from typing import Dict, Any, List, Optional +from pydantic import BaseModel +from app.core.llm.glm_client import get_glm_client +from app.core.skills.skill_manager import get_skill_manager +from app.utils.logger import get_logger + +logger = get_logger(__name__) + +router = APIRouter(prefix="/ai-assistant", tags=["AI 辅助生成"]) + + +class SkillInfo(BaseModel): + """Skill 信息(由前端传递)""" + id: str + name: str + behavior: str # behavior_guide 内容 + + +class GenerateCharactersRequest(BaseModel): + """生成人物设定请求""" + idea: str # 用户的初步想法或故事框架 + projectName: Optional[str] = None + totalEpisodes: Optional[int] = None + skills: Optional[List[SkillInfo]] = None # 新增:Skills 列表 + + +class GenerateOutlineRequest(BaseModel): + """生成大纲请求""" + idea: str + totalEpisodes: int = 30 + genre: str = "古风" + projectName: Optional[str] = None + skills: Optional[List[SkillInfo]] = None # 新增:Skills 列表 + + +class ParseScriptRequest(BaseModel): + """解析剧本请求""" + content: str + extractCharacters: bool = True + extractOutline: bool = True + skills: Optional[List[SkillInfo]] = None # 新增:Skills 列表 + use_llm: bool = True # 新增:是否使用 LLM 分析(默认 True) + + +# ============================================================================ +# Skills 融入辅助函数 +# ============================================================================ + +async def build_enhanced_system_prompt( + base_role: str, + skills: Optional[List[SkillInfo]] = None, + skill_manager=None +) -> str: + """ + 构建融入 Skills 的增强 System Prompt + + Args: + base_role: 基础角色描述(如 "你是剧集创作专家") + skills: Skills 列表 + skill_manager: Skill Manager 实例(用于加载参考文件) + + Returns: + 增强后的 System Prompt + """ + prompt_parts = [base_role] + + if skills and len(skills) > 0: + prompt_parts.append("\n## 应用技能指导") + + # 添加每个 Skill 的行为指导 + for skill in skills: + prompt_parts.append(f"\n### {skill.name}") + prompt_parts.append(skill.behavior) + + # 尝试加载参考文件(如果有 skill_manager) + if skill_manager: + prompt_parts.append("\n## 参考资料") + + for skill in skills: + try: + references = await skill_manager.load_skill_references(skill.id) + if references: + prompt_parts.append(f"\n### 来自 {skill.name} 的参考文件:") + for filename, content in references.items(): + # 截取过长内容 + if len(content) > 2000: + content = content[:2000] + "\n...(内容过长,已截断)" + prompt_parts.append(f"\n#### {filename}\n{content}") + except Exception as e: + logger.warning(f"加载 Skill {skill.id} 参考文件失败: {str(e)}") + + return "\n".join(prompt_parts) + + +@router.post("/generate/characters") +async def generate_characters(request: GenerateCharactersRequest): + """ + AI 辅助生成人物设定 + + 根据用户的初步想法,使用 AI 生成完整的人物设定 + 支持融入 Skills 的行为指导 + """ + try: + glm_client = get_glm_client() + skill_manager = get_skill_manager() + + # 构建增强的 System Prompt(融入 Skills) + base_role = "你是专业的剧集创作专家,擅长创作丰富立体的人物角色。" + system_prompt = await build_enhanced_system_prompt( + base_role=base_role, + skills=request.skills, + skill_manager=skill_manager + ) + + # 构建用户提示 + extra_info = "" + if request.projectName: + extra_info += f"\n项目名称:{request.projectName}" + if request.totalEpisodes: + extra_info += f"\n总集数:{request.totalEpisodes}" + + prompt = f"""请根据以下想法生成 3-5 个主要人物设定: + +用户想法:{request.idea}{extra_info} + +要求: +1. 每个人物包含:姓名、身份、性格、说话风格、背景故事 +2. 人物之间要有关系冲突 +3. 每个人物 50-100 字 +4. 格式:姓名:身份 - 性格 - 说话风格 - 背景故事 +5. 严格遵守上面【应用技能指导】中的要求 + +请按以下格式输出: +【人物1】 +姓名:xxx +身份:xxx +性格:xxx +说话风格:xxx +背景故事:xxx +【人物2】 +... +""" + + logger.info(f"生成人物设定,使用 {len(request.skills) if request.skills else 0} 个 Skills") + + response = await glm_client.chat( + messages=[ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": prompt} + ], + temperature=0.7 + ) + + content = response["choices"][0]["message"]["content"] + + return { + "success": True, + "characters": content, + "usage": response.get("usage") + } + + except Exception as e: + logger.error(f"生成人物失败: {str(e)}") + raise HTTPException(status_code=500, detail=f"生成失败: {str(e)}") + + +@router.post("/generate/outline") +async def generate_outline(request: GenerateOutlineRequest): + """ + AI 辅助生成大纲 + + 根据用户想法生成完整的剧集大纲 + 支持融入 Skills 的行为指导 + """ + try: + glm_client = get_glm_client() + skill_manager = get_skill_manager() + + # 构建增强的 System Prompt(融入 Skills) + base_role = "你是专业的剧集创作专家,擅长构建引人入胜的剧情结构和故事节奏。" + system_prompt = await build_enhanced_system_prompt( + base_role=base_role, + skills=request.skills, + skill_manager=skill_manager + ) + + # 构建用户提示 + prompt = f"""请根据以下想法生成完整的剧集大纲: + +用户想法:{request.idea} +总集数:{request.totalEpisodes} +类型:{request.genre} +{f'项目名称:{request.projectName}' if request.projectName else ''} + +要求: +1. 将故事分为 4-5 个阶段 +2. 每个阶段包含具体的集数范围 +3. 标注每个阶段的关键事件和转折点 +4. 字数 200-400 字 +5. 严格遵守上面【应用技能指导】中的要求 + +请按以下格式输出: +【阶段1】EPxx-EPxx:阶段名称 +内容概要... + +【阶段2】EPxx-EPxx:阶段名称 +... +""" + + logger.info(f"生成大纲,使用 {len(request.skills) if request.skills else 0} 个 Skills") + + response = await glm_client.chat( + messages=[ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": prompt} + ], + temperature=0.7 + ) + + content = response["choices"][0]["message"]["content"] + + return { + "success": True, + "outline": content, + "usage": response.get("usage") + } + + except Exception as e: + logger.error(f"生成大纲失败: {str(e)}") + raise HTTPException(status_code=500, detail=f"生成失败: {str(e)}") + + +@router.post("/parse/script") +async def parse_script(request: ParseScriptRequest): + """ + 解析上传的剧本文件 + + 提取人物、大纲等关键信息 + 支持两种模式: + 1. LLM 智能分析模式(use_llm=True,默认):使用 LLM 深入分析,支持 Skills 融入 + 2. 快速正则模式(use_llm=False):使用正则表达式快速提取 + """ + try: + # 如果用户选择使用 LLM 分析 + if request.use_llm: + return await _parse_script_with_llm(request) + + # 否则使用快速正则模式 + return await _parse_script_with_regex(request) + + except Exception as e: + logger.error(f"解析剧本失败: {str(e)}") + import traceback + traceback.print_exc() + raise HTTPException(status_code=500, detail=f"解析失败: {str(e)}") + + +async def _parse_script_with_llm(request: ParseScriptRequest) -> Dict[str, Any]: + """ + 使用 LLM 智能分析剧本,支持 Skills 融入 + """ + glm_client = get_glm_client() + skill_manager = get_skill_manager() + + # 构建增强的 System Prompt(融入 Skills) + base_role = """你是专业的剧本分析专家,擅长从剧本中提取人物关系、剧情结构、对话风格等关键信息。 +你能识别人物的出场频率、人物关系、情感走向、对话特点等深层次信息。""" + system_prompt = await build_enhanced_system_prompt( + base_role=base_role, + skills=request.skills, + skill_manager=skill_manager + ) + + # 截取剧本内容(避免过长) + content = request.content + max_length = 8000 # 约 3000-4000 字 + if len(content) > max_length: + content = content[:max_length] + "\n\n...(剧本过长,已截断前部分进行分析)" + + # 构建分析提示 + analysis_requirements = [] + if request.extractCharacters: + analysis_requirements.append(""" +1. 人物分析: + - 识别所有出场人物 + - 统计每个人的出场次数/对话次数 + - 分析人物关系(上下级、敌对、盟友等) + - 提取人物性格特点和说话风格 + - 按重要性排序输出""") + + if request.extractOutline: + analysis_requirements.append(""" +2. 剧情大纲: + - 识别主要剧情阶段 + - 提取关键转折点 + - 分析故事结构(起承转合) + - 总结核心冲突""") + + prompt = f"""请分析以下剧本内容,提取关键信息: + +{content} + +要求: +{''.join(analysis_requirements)} +3. 严格遵守上面【应用技能指导】中的分析要求 + +请以结构化的格式输出,便于后续处理。 +""" + + logger.info(f"使用 LLM 分析剧本,使用 {len(request.skills) if request.skills else 0} 个 Skills") + + response = await glm_client.chat( + messages=[ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": prompt} + ], + temperature=0.3 # 使用较低温度确保分析准确性 + ) + + analysis_result = response["choices"][0]["message"]["content"] + + return { + "success": True, + "method": "llm", + "analysis": analysis_result, # LLM 的完整分析结果 + "characters": [], # 可选:从分析结果中解析 + "outline": "", # 可选:从分析结果中解析 + "summary": f"LLM 智能分析完成,使用 {len(request.skills) if request.skills else 0} 个 Skills", + "usage": response.get("usage") + } + + +async def _parse_script_with_regex(request: ParseScriptRequest) -> Dict[str, Any]: + """ + 使用正则表达式快速提取剧本信息(不使用 LLM,不消耗 token) + """ + result: Dict[str, Any] = { + "success": True, + "method": "regex", + "characters": [], + "outline": "", + "scenes": [], + "summary": "" + } + + content = request.content + + # 提取人物(简单实现) + if request.extractCharacters: + import re + # 修复正则表达式,使用更健壮的模式 + # 支持多种格式:【人物名】、【人物名】:等 + dialogue_pattern = r'【([^】\n]+)[:】?\s*' + matches = re.findall(dialogue_pattern, content) + + # 统计每个人物出现的次数 + character_counts = {} + for match in matches: + name = match.strip() + if name: + character_counts[name] = character_counts.get(name, 0) + 1 + + # 转换为结果格式 + result["characters"] = [ + {"name": name, "lines": count} + for name, count in sorted(character_counts.items(), key=lambda x: x[1], reverse=True) + ] + + result["summary"] = f"共识别 {len(result['characters'])} 个人物(正则模式)" + + # 提取大纲(简单实现) + if request.extractOutline: + # 按场景或集数分割 + import re + scene_patterns = [ + r'第[一二三四五六七八九十百千0-9]+[集场幕]', + r'【[场景场] [^\n]*】', + r'Scene\s*\d+', + ] + + scenes = [] + for pattern in scene_patterns: + found = re.split(pattern, content) + scenes.extend(found[1:]) # 跳过第一个元素 + + if scenes: + result["scenes"] = [s.strip()[:100] for s in scenes if s.strip()] + if not result["summary"]: + result["summary"] = f"共识别 {len(result['scenes'])} 个场景(正则模式)" + + result["contentLength"] = len(content) + + return result + + +@router.get("/available-skills") +async def get_available_skills(): + """ + 获取可用的 Skills 列表,按分类展示 + """ + try: + skill_manager = get_skill_manager() + skills = await skill_manager.list_skills() + + # 按分类组织 + by_category: Dict[str, List[Dict]] = {} + + for skill in skills: + if skill.category not in by_category: + by_category[skill.category] = [] + + by_category[skill.category].append({ + "id": skill.id, + "name": skill.name, + "type": skill.type, + "description": skill.behavior_guide[:100] + "..." if len(skill.behavior_guide) > 100 else skill.behavior_guide + }) + + return { + "categories": list(by_category.keys()), + "skills": by_category + } + + except Exception as e: + logger.error(f"获取 Skills 失败: {str(e)}") + raise HTTPException(status_code=500, detail=f"获取失败: {str(e)}") diff --git a/backend/app/api/v1/episodes.py b/backend/app/api/v1/episodes.py new file mode 100644 index 0000000..a1dd8d6 --- /dev/null +++ b/backend/app/api/v1/episodes.py @@ -0,0 +1,283 @@ +""" +剧集内容管理 API 路由 + +提供剧集内容的 CRUD、审核和导出功能 +""" +from fastapi import APIRouter, Depends, HTTPException, status, Query +from typing import List, Optional +from pydantic import BaseModel +from datetime import datetime + +from app.models.episode_content import ( + EpisodeContent, + EpisodeContentCreate, + EpisodeContentUpdate, + EpisodeContentReview, + ExportFormat, + ExportRequest +) +from app.utils.logger import get_logger + +logger = get_logger(__name__) + +router = APIRouter(prefix="/episodes", tags=["剧集内容管理"]) + + +# 简单的内存存储(实际应该使用 MongoDB) +# TODO: 替换为真实的数据库存储 +_episode_contents: dict = {} +_content_id_counter = 0 + + +def _generate_content_id(project_id: str, episode_number: int) -> str: + """生成内容 ID""" + global _content_id_counter + _content_id_counter += 1 + return f"ep_{project_id}_{episode_number:03d}_{_content_id_counter:03d}" + + +@router.get("/{project_id}", response_model=List[EpisodeContent]) +async def list_episode_contents( + project_id: str, + status: Optional[str] = None, + episode_number: Optional[int] = None +): + """ + 列出项目的所有剧集内容 + + - **project_id**: 项目 ID + - **status**: 筛选状态 + - **episode_number**: 筛选集数 + """ + contents = [] + + for content_id, content in _episode_contents.items(): + if content.get('project_id') != project_id: + continue + + if status and content.get('status') != status: + continue + + if episode_number is not None and content.get('episode_number') != episode_number: + continue + + contents.append(EpisodeContent(**content)) + + # 按集数排序 + contents.sort(key=lambda x: x.episode_number) + + return contents + + +@router.get("/{project_id}/{episode_number}", response_model=EpisodeContent) +async def get_episode_content( + project_id: str, + episode_number: int +): + """ + 获取指定集数的内容 + """ + for content in _episode_contents.values(): + if content.get('project_id') == project_id and content.get('episode_number') == episode_number: + return EpisodeContent(**content) + + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"内容不存在: 项目 {project_id}, 第 {episode_number} 集" + ) + + +@router.post("/", response_model=EpisodeContent, status_code=status.HTTP_201_CREATED) +async def create_episode_content(data: EpisodeContentCreate): + """ + 创建新的剧集内容 + + 通常由 Agent 自动调用,保存生成的内容 + """ + global _content_id_counter + + # 生成 ID + content_id = _generate_content_id(data.project_id, data.episode_number) + + # 创建内容对象 + content = { + "id": content_id, + "project_id": data.project_id, + "episode_number": data.episode_number, + "title": data.title, + "content": data.content, + "status": "draft", + "quality_score": None, + "review_issues": [], + "created_at": datetime.now().isoformat(), + "updated_at": datetime.now().isoformat(), + "created_by": "system", + "generation_params": data.generation_params, + "version": 1, + "parent_version": None + } + + _episode_contents[content_id] = content + + logger.info(f"创建剧集内容: {content_id}") + + return EpisodeContent(**content) + + +@router.put("/{content_id}", response_model=EpisodeContent) +async def update_episode_content( + content_id: str, + data: EpisodeContentUpdate +): + """ + 更新剧集内容 + + 支持用户编辑生成后的内容 + """ + if content_id not in _episode_contents: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"内容不存在: {content_id}" + ) + + content = _episode_contents[content_id].copy() + + # 更新字段 + if data.title is not None: + content['title'] = data.title + + if data.content is not None: + content['content'] = data.content + # 增加版本号 + content['version'] = content.get('version', 1) + 1 + + if data.status is not None: + content['status'] = data.status + + content['updated_at'] = datetime.now().isoformat() + + _episode_contents[content_id] = content + + logger.info(f"更新剧集内容: {content_id}") + + return EpisodeContent(**content) + + +@router.post("/{content_id}/review", response_model=EpisodeContent) +async def review_episode_content( + content_id: str, + review: EpisodeContentReview +): + """ + 审核剧集内容 + + 通过或拒绝生成的内容 + """ + if content_id not in _episode_contents: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"内容不存在: {content_id}" + ) + + content = _episode_contents[content_id].copy() + + # 更新状态 + content['status'] = 'approved' if review.approved else 'rejected' + content['updated_at'] = datetime.now().isoformat() + + if review.comment: + content['review_comment'] = review.comment + + _episode_contents[content_id] = content + + logger.info(f"审核剧集内容: {content_id}, 结果: {content['status']}") + + return EpisodeContent(**content) + + +@router.post("/{project_id}/export") +async def export_episode_contents( + project_id: str, + request: ExportRequest +): + """ + 导出剧集内容 + + 支持多种格式:Markdown, TXT, PDF, DOCX + """ + # 获取内容 + contents = [] + for content in _episode_contents.values(): + if content.get('project_id') != project_id: + continue + + if request.episode_numbers and content.get('episode_number') not in request.episode_numbers: + continue + + contents.append(content) + + if not contents: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"没有找到可导出的内容" + ) + + # 按格式生成导出内容 + if request.format == ExportFormat.MARKDOWN or request.format == ExportFormat.TXT: + # 文本格式(Markdown/TXT) + output_lines = [] + output_lines.append(f"# 剧集内容导出\n") + output_lines.append(f"导出时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n") + output_lines.append(f"总集数: {len(contents)}\n") + output_lines.append("---\n\n") + + for content in sorted(contents, key=lambda x: x.get('episode_number', 0)): + output_lines.append(f"## 第 {content.get('episode_number')} 集") + if content.get('title'): + output_lines.append(f"### {content['title']}") + output_lines.append("") + output_lines.append(content['content']) + output_lines.append("\n\n---\n\n") + + return { + "success": True, + "format": request.format, + "content": "\n".join(output_lines), + "filename": f"episodes_{project_id}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.md" + } + + elif request.format == ExportFormat.PDF: + # PDF 格式(需要额外库支持,这里返回 Markdown 建议前端转换) + return { + "success": True, + "format": request.format, + "message": "PDF 导出功能需要前端渲染后转换", + "markdown_content": "\n\n".join([c['content'] for c in contents]) + } + + elif request.format == ExportFormat.DOCX: + # DOCX 格式(需要 python-docx 库) + return { + "success": True, + "format": request.format, + "message": "DOCX 导出功能需要后端安装 python-docx 库", + "markdown_content": "\n\n".join([c['content'] for c in contents]) + } + + +@router.delete("/{content_id}") +async def delete_episode_content(content_id: str): + """ + 删除剧集内容 + """ + if content_id not in _episode_contents: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"内容不存在: {content_id}" + ) + + del _episode_contents[content_id] + + logger.info(f"删除剧集内容: {content_id}") + + return {"success": True, "message": "删除成功"} diff --git a/backend/app/api/v1/memory.py b/backend/app/api/v1/memory.py new file mode 100644 index 0000000..ee8bc2c --- /dev/null +++ b/backend/app/api/v1/memory.py @@ -0,0 +1,729 @@ +""" +记忆系统 API 路由 + +Memory System API - 提供记忆系统的管理接口 +""" +from fastapi import APIRouter, Depends, HTTPException, status +from typing import List, Dict +from datetime import datetime +import uuid + +from app.models.memory import ( + EnhancedMemory, + TimelineEvent, + PendingThread, + ThreadCreateRequest, + ThreadUpdateRequest, + ThreadResolveRequest, + MemoryUpdateRequest, + MemoryExtractionResult, + CharacterStateChange, + ResolutionSuggestion, + ThreadStatus, + ImportanceLevel +) +from app.models.project import SeriesProject, Episode +from app.core.memory.memory_manager import get_memory_manager, MemoryManager +from app.db.repositories import project_repo, episode_repo +from app.utils.logger import get_logger + +logger = get_logger(__name__) + +router = APIRouter(prefix="/projects/{project_id}/memory", tags=["记忆系统"]) + + +# ============================================ +# 辅助函数 +# ============================================ + +async def verify_project(project_id: str) -> SeriesProject: + """验证项目存在性""" + project = await project_repo.get(project_id) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + return project + + +# ============================================ +# 记忆系统主端点 +# ============================================ + +@router.get("/", response_model=EnhancedMemory) +async def get_project_memory(project_id: str): + """ + 获取项目完整记忆系统 + + 返回项目的所有记忆数据,包括: + - 事件时间线 + - 待收线问题 + - 角色状态历史 + - 伏笔追踪 + - 角色关系 + """ + project = await verify_project(project_id) + memory_manager = get_memory_manager() + enhanced_memory = memory_manager._convert_to_enhanced_memory(project.memory) + return enhanced_memory + + +@router.post("/update", response_model=MemoryExtractionResult) +async def update_memory_from_episode( + project_id: str, + request: MemoryUpdateRequest +): + """ + 从剧集更新记忆系统 + + 自动执行以下操作: + - 提取关键事件 + - 检测伏笔和待收线 + - 更新角色状态 + - 检查一致性 + + Args: + request: 记忆更新请求 + """ + project = await verify_project(project_id) + + # 获取剧集 + episodes = await episode_repo.list_by_project(project_id) + episode = None + for ep in episodes: + if ep.number == request.episode_number: + episode = ep + break + + if not episode: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"剧集不存在: EP{request.episode_number}" + ) + + if not episode.content: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="剧集内容为空,无法更新记忆" + ) + + try: + memory_manager = get_memory_manager() + result = await memory_manager.update_memory_from_episode(project, episode) + + # 保存更新后的记忆 + await project_repo.update(project_id, { + "memory": project.memory.dict() + }) + + return result + + except Exception as e: + logger.error(f"更新记忆失败: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"更新记忆失败: {str(e)}" + ) + + +# ============================================ +# 事件时间线 +# ============================================ + +@router.get("/timeline", response_model=List[TimelineEvent]) +async def get_event_timeline( + project_id: str, + skip: int = 0, + limit: int = 100, + episode: int = None, + importance: ImportanceLevel = None, + character: str = None +): + """ + 获取事件时间线 + + 支持过滤: + - episode: 按集数过滤 + - importance: 按重要程度过滤 + - character: 按角色过滤 + """ + project = await verify_project(project_id) + memory_manager = get_memory_manager() + enhanced_memory = memory_manager._convert_to_enhanced_memory(project.memory) + + events = enhanced_memory.eventTimeline + + # 应用过滤 + if episode is not None: + events = [e for e in events if e.episode == episode] + + if importance is not None: + events = [e for e in events if e.importance == importance] + + if character: + events = [e for e in events if character in e.characters_involved] + + # 按集数排序 + events = sorted(events, key=lambda x: x.episode) + + # 分页 + return events[skip:skip + limit] + + +@router.get("/timeline/episodes", response_model=List[dict]) +async def get_timeline_by_episodes(project_id: str): + """ + 按集数分组获取时间线 + + 返回格式:[{episode: 1, events: [...]}, ...] + """ + project = await verify_project(project_id) + memory_manager = get_memory_manager() + enhanced_memory = memory_manager._convert_to_enhanced_memory(project.memory) + + # 按集数分组 + episodes_dict = {} + for event in enhanced_memory.eventTimeline: + ep = event.episode + if ep not in episodes_dict: + episodes_dict[ep] = [] + episodes_dict[ep].append(event) + + # 转换为列表并排序 + result = [ + {"episode": ep, "events": events} + for ep, events in sorted(episodes_dict.items()) + ] + + return result + + +# ============================================ +# 待收线管理 +# ============================================ + +@router.get("/threads", response_model=List[PendingThread]) +async def get_pending_threads( + project_id: str, + status: ThreadStatus = None, + importance: ImportanceLevel = None +): + """ + 获取待收线列表 + + 支持过滤: + - status: 按状态过滤 + - importance: 按重要程度过滤 + """ + project = await verify_project(project_id) + memory_manager = get_memory_manager() + enhanced_memory = memory_manager._convert_to_enhanced_memory(project.memory) + + threads = enhanced_memory.pendingThreads + + # 应用过滤 + if status is not None: + threads = [t for t in threads if t.status == status] + + if importance is not None: + threads = [t for t in threads if t.importance == importance] + + # 按引入集数排序 + threads = sorted(threads, key=lambda x: x.introduced_at) + + return threads + + +@router.post("/threads", response_model=PendingThread, status_code=status.HTTP_201_CREATED) +async def create_thread( + project_id: str, + request: ThreadCreateRequest +): + """ + 手动添加待收线问题 + + 允许用户手动添加待收线,用于补充 LLM 未检测到的重要伏笔 + """ + project = await verify_project(project_id) + + # 获取当前最大集数 + memory_manager = get_memory_manager() + enhanced_memory = memory_manager._convert_to_enhanced_memory(project.memory) + current_episode = enhanced_memory.last_episode_processed + + thread = PendingThread( + id=str(uuid.uuid4()), + description=request.description, + introduced_at=current_episode, + importance=request.importance, + reminder_episode=request.reminder_episode, + characters_involved=request.characters_involved, + notes=request.notes, + created_at=datetime.now(), + updated_at=datetime.now() + ) + + # 添加到记忆 + enhanced_memory.pendingThreads.append(thread) + project.memory = memory_manager._convert_to_memory(enhanced_memory) + + # 保存 + await project_repo.update(project_id, { + "memory": project.memory.dict() + }) + + logger.info(f"创建待收线: {thread.id} - {thread.description}") + return thread + + +@router.put("/threads/{thread_id}", response_model=PendingThread) +async def update_thread( + project_id: str, + thread_id: str, + request: ThreadUpdateRequest +): + """ + 更新待收线信息 + + 允许修改待收线的描述、状态、重要程度等 + """ + project = await verify_project(project_id) + memory_manager = get_memory_manager() + enhanced_memory = memory_manager._convert_to_enhanced_memory(project.memory) + + # 查找待收线 + thread = None + for t in enhanced_memory.pendingThreads: + if t.id == thread_id: + thread = t + break + + if not thread: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"待收线不存在: {thread_id}" + ) + + # 更新字段 + if request.description is not None: + thread.description = request.description + + if request.importance is not None: + thread.importance = request.importance + + if request.status is not None: + thread.status = request.status + + if request.reminder_episode is not None: + thread.reminder_episode = request.reminder_episode + + if request.characters_involved is not None: + thread.characters_involved = request.characters_involved + + if request.notes is not None: + thread.notes = request.notes + + thread.updated_at = datetime.now() + + # 保存 + project.memory = memory_manager._convert_to_memory(enhanced_memory) + await project_repo.update(project_id, { + "memory": project.memory.dict() + }) + + logger.info(f"更新待收线: {thread_id}") + return thread + + +@router.post("/resolve-thread", response_model=dict) +async def resolve_thread( + project_id: str, + request: ThreadResolveRequest +): + """ + 标记待收线为已解决 + + 记录待收线的收线信息 + """ + project = await verify_project(project_id) + memory_manager = get_memory_manager() + enhanced_memory = memory_manager._convert_to_enhanced_memory(project.memory) + + # 查找待收线 + thread = None + for t in enhanced_memory.pendingThreads: + if t.id == request.thread_id: + thread = t + break + + if not thread: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"待收线不存在: {request.thread_id}" + ) + + # 更新状态 + thread.resolved = True + thread.resolved_at = request.resolved_at + thread.status = ThreadStatus.RESOLVED + thread.updated_at = datetime.now() + + # 添加收线摘要到 notes + if request.resolution_summary: + thread.notes += f"\n[收线摘要 EP{request.resolved_at}]: {request.resolution_summary}" + + # 保存 + project.memory = memory_manager._convert_to_memory(enhanced_memory) + await project_repo.update(project_id, { + "memory": project.memory.dict() + }) + + logger.info(f"收线完成: {request.thread_id} @ EP{request.resolved_at}") + + return { + "success": True, + "message": f"待收线已收线", + "thread_id": request.thread_id, + "resolved_at": request.resolved_at + } + + +@router.delete("/threads/{thread_id}", status_code=status.HTTP_204_NO_CONTENT) +async def delete_thread( + project_id: str, + thread_id: str +): + """ + 删除待收线 + + 移除不需要的待收线记录 + """ + project = await verify_project(project_id) + memory_manager = get_memory_manager() + enhanced_memory = memory_manager._convert_to_enhanced_memory(project.memory) + + # 查找并删除 + original_count = len(enhanced_memory.pendingThreads) + enhanced_memory.pendingThreads = [ + t for t in enhanced_memory.pendingThreads + if t.id != thread_id + ] + + if len(enhanced_memory.pendingThreads) == original_count: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"待收线不存在: {thread_id}" + ) + + # 保存 + project.memory = memory_manager._convert_to_memory(enhanced_memory) + await project_repo.update(project_id, { + "memory": project.memory.dict() + }) + + logger.info(f"删除待收线: {thread_id}") + return None + + +@router.get("/threads/{thread_id}/suggestion", response_model=ResolutionSuggestion) +async def get_thread_resolution_suggestion( + project_id: str, + thread_id: str +): + """ + 获取待收线的收线建议 + + 使用 LLM 生成如何收线的建议 + """ + project = await verify_project(project_id) + memory_manager = get_memory_manager() + enhanced_memory = memory_manager._convert_to_enhanced_memory(project.memory) + + # 查找待收线 + thread = None + for t in enhanced_memory.pendingThreads: + if t.id == thread_id: + thread = t + break + + if not thread: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"待收线不存在: {thread_id}" + ) + + try: + suggestion = await memory_manager.suggest_thread_resolution( + thread=thread, + current_episode=enhanced_memory.last_episode_processed, + memory=enhanced_memory + ) + + return suggestion + + except Exception as e: + logger.error(f"生成收线建议失败: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"生成收线建议失败: {str(e)}" + ) + + +# ============================================ +# 角色状态 +# ============================================ + +@router.get("/characters", response_model=Dict[str, List[CharacterStateChange]]) +async def get_character_states( + project_id: str, + character: str = None +): + """ + 获取角色状态历史 + + Args: + character: 可选,筛选特定角色 + """ + project = await verify_project(project_id) + memory_manager = get_memory_manager() + enhanced_memory = memory_manager._convert_to_enhanced_memory(project.memory) + + if character: + if character not in enhanced_memory.characterStates: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"角色不存在: {character}" + ) + return {character: enhanced_memory.characterStates[character]} + + return enhanced_memory.characterStates + + +@router.get("/characters/{character_name}", response_model=List[CharacterStateChange]) +async def get_character_state_history( + project_id: str, + character_name: str +): + """ + 获取特定角色的状态变化历史 + + 返回该角色在各集的状态变化记录 + """ + project = await verify_project(project_id) + memory_manager = get_memory_manager() + enhanced_memory = memory_manager._convert_to_enhanced_memory(project.memory) + + if character_name not in enhanced_memory.characterStates: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"角色不存在: {character_name}" + ) + + return enhanced_memory.characterStates[character_name] + + +# ============================================ +# 伏笔追踪 +# ============================================ + +@router.get("/foreshadowing") +async def get_foreshadowing( + project_id: str, + payoff_status: bool = None +): + """ + 获取伏笔列表 + + Args: + payoff_status: 可选,筛选是否已呼应 + """ + project = await verify_project(project_id) + memory_manager = get_memory_manager() + enhanced_memory = memory_manager._convert_to_enhanced_memory(project.memory) + + foreshadowing = enhanced_memory.foreshadowing + + if payoff_status is not None: + foreshadowing = [f for f in foreshadowing if f.is_payed_off == payoff_status] + + return foreshadowing + + +# ============================================ +# 一致性检查 +# ============================================ + +@router.post("/check-consistency") +async def run_consistency_check( + project_id: str, + episode_number: int +): + """ + 对指定剧集进行一致性检查 + + 检查剧集与已有记忆的一致性 + """ + project = await verify_project(project_id) + + # 获取剧集 + episodes = await episode_repo.list_by_project(project_id) + episode = None + for ep in episodes: + if ep.number == episode_number: + episode = ep + break + + if not episode: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"剧集不存在: EP{episode_number}" + ) + + if not episode.content: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="剧集内容为空" + ) + + try: + memory_manager = get_memory_manager() + enhanced_memory = memory_manager._convert_to_enhanced_memory(project.memory) + + issues = await memory_manager.check_consistency( + episode.content, + episode_number, + enhanced_memory + ) + + # 添加到记忆 + enhanced_memory.consistencyIssues.extend(issues) + project.memory = memory_manager._convert_to_memory(enhanced_memory) + + # 保存 + await project_repo.update(project_id, { + "memory": project.memory.dict() + }) + + return { + "episode_number": episode_number, + "issues_found": len(issues), + "issues": issues + } + + except Exception as e: + logger.error(f"一致性检查失败: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"一致性检查失败: {str(e)}" + ) + + +@router.get("/consistency-issues") +async def get_consistency_issues( + project_id: str, + severity: ImportanceLevel = None +): + """ + 获取一致性问题列表 + + Args: + severity: 可选,按严重程度筛选 + """ + project = await verify_project(project_id) + memory_manager = get_memory_manager() + enhanced_memory = memory_manager._convert_to_enhanced_memory(project.memory) + + issues = enhanced_memory.consistencyIssues + + if severity is not None: + issues = [i for i in issues if i.severity == severity] + + return issues + + +# ============================================ +# 角色关系 +# ============================================ + +@router.get("/relationships") +async def get_relationships(project_id: str): + """ + 获取角色关系网络 + + 返回角色之间的关系映射 + """ + project = await verify_project(project_id) + return project.memory.relationships + + +@router.put("/relationships") +async def update_relationships( + project_id: str, + relationships: Dict[str, Dict[str, str]] +): + """ + 更新角色关系网络 + + 允许手动更新角色之间的关系 + """ + project = await verify_project(project_id) + + project.memory.relationships = relationships + + await project_repo.update(project_id, { + "memory": project.memory.dict() + }) + + logger.info(f"更新角色关系网络") + return project.memory.relationships + + +# ============================================ +# 统计信息 +# ============================================ + +@router.get("/stats") +async def get_memory_stats(project_id: str): + """ + 获取记忆系统统计信息 + + 返回: + - 总事件数 + - 待收线数量 + - 已收线数量 + - 角色数量 + - 伏笔数量 + - 一致性问题数量 + """ + project = await verify_project(project_id) + memory_manager = get_memory_manager() + enhanced_memory = memory_manager._convert_to_enhanced_memory(project.memory) + + pending_count = len([ + t for t in enhanced_memory.pendingThreads + if not t.resolved + ]) + + resolved_count = len([ + t for t in enhanced_memory.pendingThreads + if t.resolved + ]) + + foreshadowing_pending = len([ + f for f in enhanced_memory.foreshadowing + if not f.is_payed_off + ]) + + return { + "total_events": len(enhanced_memory.eventTimeline), + "pending_threads": pending_count, + "resolved_threads": resolved_count, + "total_threads": len(enhanced_memory.pendingThreads), + "characters_tracked": len(enhanced_memory.characterStates), + "foreshadowing_total": len(enhanced_memory.foreshadowing), + "foreshadowing_pending": foreshadowing_pending, + "consistency_issues": len(enhanced_memory.consistencyIssues), + "last_updated": enhanced_memory.last_updated, + "last_episode_processed": enhanced_memory.last_episode_processed + } diff --git a/backend/app/api/v1/projects.py b/backend/app/api/v1/projects.py new file mode 100644 index 0000000..8505e10 --- /dev/null +++ b/backend/app/api/v1/projects.py @@ -0,0 +1,776 @@ +""" +项目管理 API 路由 + +提供项目的 CRUD 操作和剧集执行功能 +""" +from fastapi import APIRouter, Depends, HTTPException, status, BackgroundTasks +from typing import List, Optional +from pydantic import BaseModel, Field + +from app.models.project import ( + SeriesProject, + SeriesProjectCreate, + Episode, + EpisodeExecuteRequest, + EpisodeExecuteResponse +) +from app.models.review import ReviewConfig +from app.models.skill_config import ( + ProjectSkillConfigUpdate, + SkillConfigResponse, + EpisodeSkillConfigUpdate +) +from app.core.agents.series_creation_agent import get_series_agent +from app.core.execution.batch_executor import get_batch_executor +from app.core.execution.retry_manager import get_retry_manager, RetryConfig +from app.db.repositories import project_repo, episode_repo +from app.utils.logger import get_logger + +logger = get_logger(__name__) + +router = APIRouter(prefix="/projects", tags=["项目管理"]) + + +# ============================================ +# 项目管理 +# ============================================ + +@router.post("/", response_model=SeriesProject, status_code=status.HTTP_201_CREATED) +async def create_project(project_data: SeriesProjectCreate): + """创建新项目""" + try: + project = await project_repo.create(project_data) + return project + except Exception as e: + logger.error(f"创建项目失败: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"创建项目失败: {str(e)}" + ) + + +@router.get("/", response_model=List[SeriesProject]) +async def list_projects(skip: int = 0, limit: int = 100): + """列出所有项目""" + projects = await project_repo.list(skip=skip, limit=limit) + return projects + + +@router.get("/{project_id}", response_model=SeriesProject) +async def get_project(project_id: str): + """获取项目详情""" + project = await project_repo.get(project_id) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + return project + + +@router.put("/{project_id}", response_model=SeriesProject) +async def update_project(project_id: str, project_data: dict): + """更新项目""" + project = await project_repo.update(project_id, project_data) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + return project + + +@router.delete("/{project_id}", status_code=status.HTTP_204_NO_CONTENT) +async def delete_project(project_id: str): + """删除项目""" + success = await project_repo.delete(project_id) + if not success: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + return None + + +# ============================================ +# 剧集管理 +# ============================================ + +@router.get("/{project_id}/episodes", response_model=List[Episode]) +async def list_episodes(project_id: str): + """列出项目的所有剧集""" + # 先验证项目存在 + project = await project_repo.get(project_id) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + + episodes = await episode_repo.list_by_project(project_id) + return episodes + + +@router.get("/{project_id}/episodes/{episode_number}", response_model=Episode) +async def get_episode(project_id: str, episode_number: int): + """获取指定集数""" + episodes = await episode_repo.list_by_project(project_id) + for ep in episodes: + if ep.number == episode_number: + return ep + + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"剧集不存在: EP{episode_number}" + ) + + +# ============================================ +# 剧集执行(核心功能) +# ============================================ + +@router.post("/{project_id}/execute", response_model=EpisodeExecuteResponse) +async def execute_episode( + project_id: str, + request: EpisodeExecuteRequest +): + """ + 执行单集创作 + + 这是核心功能端点,调用 Agent 执行完整的创作流程 + """ + # 获取项目 + project = await project_repo.get(project_id) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + + try: + # 获取 Agent + agent = get_series_agent() + + # 执行创作 + logger.info(f"开始执行创作: 项目 {project_id}, EP{request.episodeNumber}") + episode = await agent.execute_episode( + project=project, + episode_number=request.episodeNumber, + title=request.title + ) + + # 保存剧集 + episode.projectId = project_id + await episode_repo.create(episode) + + # 更新项目记忆 + await project_repo.update(project_id, { + "memory": project.memory.dict() + }) + + return EpisodeExecuteResponse( + episode=episode, + success=True, + message=f"EP{request.episodeNumber} 创作完成" + ) + + except Exception as e: + logger.error(f"执行创作失败: {str(e)}") + return EpisodeExecuteResponse( + episode=Episode( + projectId=project_id, + number=request.episodeNumber, + status="needs-review" + ), + success=False, + message=f"创作失败: {str(e)}" + ) + + +# ============================================ +# 请求/响应模型 +# ============================================ + +class BatchExecuteRequest(BaseModel): + """批量执行请求""" + start_episode: int = Field(..., ge=1, description="起始集数") + end_episode: int = Field(..., ge=1, description="结束集数") + enable_review: bool = Field(True, description="是否启用质量检查") + enable_retry: bool = Field(True, description="是否启用自动重试") + max_retries: int = Field(2, ge=0, le=5, description="最大重试次数") + quality_threshold: float = Field(75.0, ge=0, le=100, description="质量阈值") + + +class AutoExecuteRequest(BaseModel): + """自动执行请求""" + start_episode: int = Field(1, ge=1, description="起始集数") + episode_count: Optional[int] = Field(None, ge=1, description="执行集数(不指定则执行到项目总集数)") + enable_review: bool = Field(True, description="是否启用质量检查") + enable_retry: bool = Field(True, description="是否启用自动重试") + max_retries: int = Field(2, ge=0, le=5, description="最大重试次数") + quality_threshold: float = Field(75.0, ge=0, le=100, description="质量阈值") + stop_on_failure: bool = Field(False, description="失败时是否停止") + + +class StopExecutionRequest(BaseModel): + """停止执行请求""" + batch_id: str = Field(..., description="批次ID") + + +# ============================================ +# 增强的批量执行 +# ============================================ + +@router.post("/{project_id}/execute-batch", response_model=dict) +async def execute_batch_enhanced( + project_id: str, + request: BatchExecuteRequest, + background_tasks: BackgroundTasks +): + """ + 增强的批量执行 + + 新功能: + - 集成质量检查 + - 自动重试失败剧集 + - 实时进度推送(通过 WebSocket) + - 生成详细执行摘要 + + 返回批次ID,可通过 WebSocket 或状态端点追踪进度 + """ + project = await project_repo.get(project_id) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + + if request.end_episode < request.start_episode: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="结束集数必须大于或等于起始集数" + ) + + # 获取批量执行器 + batch_executor = get_batch_executor() + + # 定义进度回调(发送 WebSocket 消息) + async def on_progress(progress_data): + from app.api.v1.websocket import broadcast_batch_progress + await broadcast_batch_progress( + batch_id=progress_data["batch_id"], + current_episode=progress_data["current_episode"], + total_episodes=progress_data["total_episodes"], + completed=progress_data["completed_episodes"], + failed=progress_data["failed_episodes"], + data=progress_data.get("current_episode_result", {}) + ) + + # 定义剧集完成回调 + async def on_episode_complete(episode_result): + from app.api.v1.websocket import broadcast_episode_complete + await broadcast_episode_complete( + project_id=project_id, + episode_number=episode_result["episode_number"], + success=episode_result["success"], + quality_score=episode_result.get("quality_score", 0), + data=episode_result + ) + + # 定义错误回调 + async def on_error(error_data): + from app.api.v1.websocket import broadcast_error + await broadcast_error( + project_id=project_id, + episode_number=error_data.get("episode_number"), + error=error_data.get("error", "未知错误"), + error_type="batch_execution_error" + ) + + # 构建审核配置 + review_config = None + if request.enable_review: + from app.models.review import ReviewConfig, DimensionConfig, DimensionType + review_config = ReviewConfig( + enabled_review_skills=["consistency_checker"], + overall_strictness=0.7, + pass_threshold=request.quality_threshold + ) + # 添加默认维度 + for dim_type in [DimensionType.consistency, DimensionType.quality, DimensionType.dialogue]: + review_config.dimension_settings[dim_type] = DimensionConfig( + enabled=True, + strictness=0.7, + weight=1.0 + ) + + # 在后台执行批量创作 + async def run_batch(): + try: + summary = await batch_executor.execute_batch( + project=project, + start_episode=request.start_episode, + end_episode=request.end_episode, + review_config=review_config, + enable_retry=request.enable_retry, + max_retries=request.max_retries, + on_progress=on_progress, + on_episode_complete=on_episode_complete, + on_error=on_error + ) + + # 广播完成消息 + from app.api.v1.websocket import broadcast_batch_complete + await broadcast_batch_complete(summary["batch_id"], summary) + + except Exception as e: + logger.error(f"批量执行后台任务失败: {str(e)}") + from app.api.v1.websocket import broadcast_error + await broadcast_error( + project_id=project_id, + episode_number=None, + error=f"批量执行失败: {str(e)}", + error_type="batch_error" + ) + + # 添加后台任务 + background_tasks.add_task(run_batch) + + # 立即返回批次ID + # 注意:由于是在后台执行,我们需要先创建一个占位的批次ID + import uuid + batch_id = str(uuid.uuid4()) + + return { + "batch_id": batch_id, + "project_id": project_id, + "start_episode": request.start_episode, + "end_episode": request.end_episode, + "status": "started", + "message": "批量执行已启动,请通过 WebSocket 或状态端点追踪进度", + "websocket_url": f"/ws/batches/{batch_id}/execute" + } + + +@router.post("/{project_id}/execute-auto", response_model=dict) +async def execute_auto( + project_id: str, + request: AutoExecuteRequest, + background_tasks: BackgroundTasks +): + """ + 自动执行模式 + + 自动执行指定数量的剧集,具有以下特性: + - 智能质量检查和重试 + - 失败时可选停止或继续 + - 实时进度通知 + - 完整的执行摘要 + + 这是推荐的执行方式,适合大部分使用场景 + """ + project = await project_repo.get(project_id) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + + # 确定结束集数 + end_episode = request.start_episode + (request.episode_count or 10) - 1 + if end_episode > project.totalEpisodes: + end_episode = project.totalEpisodes + + # 获取执行器 + batch_executor = get_batch_executor() + + # 定义回调 + async def on_progress(progress_data): + from app.api.v1.websocket import broadcast_batch_progress + await broadcast_batch_progress( + batch_id=progress_data["batch_id"], + current_episode=progress_data["current_episode"], + total_episodes=progress_data["total_episodes"], + completed=progress_data["completed_episodes"], + failed=progress_data["failed_episodes"], + data=progress_data.get("current_episode_result", {}) + ) + + async def on_episode_complete(episode_result): + from app.api.v1.websocket import broadcast_episode_complete + await broadcast_episode_complete( + project_id=project_id, + episode_number=episode_result["episode_number"], + success=episode_result["success"], + quality_score=episode_result.get("quality_score", 0), + data=episode_result + ) + + async def on_error(error_data): + from app.api.v1.websocket import broadcast_error + await broadcast_error( + project_id=project_id, + episode_number=error_data.get("episode_number"), + error=error_data.get("error", "未知错误"), + error_type="auto_execution_error" + ) + + # 如果设置为失败时停止,则停止批次执行 + if request.stop_on_failure: + batch_id = error_data.get("batch_id") + if batch_id: + await batch_executor.stop_batch(batch_id) + + # 构建审核配置 + review_config = None + if request.enable_review: + from app.models.review import ReviewConfig, DimensionConfig, DimensionType + review_config = ReviewConfig( + enabled_review_skills=["consistency_checker"], + overall_strictness=0.7, + pass_threshold=request.quality_threshold + ) + for dim_type in [DimensionType.consistency, DimensionType.quality, DimensionType.dialogue]: + review_config.dimension_settings[dim_type] = DimensionConfig( + enabled=True, + strictness=0.7, + weight=1.0 + ) + + # 后台执行 + async def run_auto(): + try: + summary = await batch_executor.execute_batch( + project=project, + start_episode=request.start_episode, + end_episode=end_episode, + review_config=review_config, + enable_retry=request.enable_retry, + max_retries=request.max_retries, + on_progress=on_progress, + on_episode_complete=on_episode_complete, + on_error=on_error + ) + + from app.api.v1.websocket import broadcast_batch_complete + await broadcast_batch_complete(summary["batch_id"], summary) + + except Exception as e: + logger.error(f"自动执行失败: {str(e)}") + from app.api.v1.websocket import broadcast_error + await broadcast_error( + project_id=project_id, + episode_number=None, + error=f"自动执行失败: {str(e)}", + error_type="auto_error" + ) + + background_tasks.add_task(run_auto) + + import uuid + batch_id = str(uuid.uuid4()) + + return { + "batch_id": batch_id, + "project_id": project_id, + "start_episode": request.start_episode, + "end_episode": end_episode, + "total_episodes": end_episode - request.start_episode + 1, + "status": "started", + "stop_on_failure": request.stop_on_failure, + "message": "自动执行已启动", + "websocket_url": f"/ws/batches/{batch_id}/execute" + } + + +@router.get("/{project_id}/execution-status") +async def get_execution_status( + project_id: str, + batch_id: Optional[str] = None +): + """ + 获取执行状态 + + Args: + project_id: 项目ID + batch_id: 批次ID(可选,不提供则返回所有活跃批次) + + Returns: + 执行状态信息 + """ + project = await project_repo.get(project_id) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + + batch_executor = get_batch_executor() + + if batch_id: + # 获取指定批次状态 + status = batch_executor.get_batch_status(batch_id) + if not status: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"批次不存在: {batch_id}" + ) + return status + else: + # 返回所有活跃批次(简化实现) + # 在实际应用中,可能需要维护项目到批次的映射 + return { + "project_id": project_id, + "active_batches": [], + "message": "请提供具体的批次ID" + } + + +@router.post("/{project_id}/stop-execution") +async def stop_execution(project_id: str, request: StopExecutionRequest): + """ + 停止执行 + + 停止正在运行的批量执行任务 + + Args: + project_id: 项目ID + request: 停止请求(包含批次ID) + + Returns: + 停止结果 + """ + project = await project_repo.get(project_id) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + + batch_executor = get_batch_executor() + success = await batch_executor.stop_batch(request.batch_id) + + if success: + return { + "batch_id": request.batch_id, + "project_id": project_id, + "status": "stopping", + "message": "已发送停止信号" + } + else: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"批次不存在或已完成: {request.batch_id}" + ) + + +# ============================================ +# 旧的批量执行端点(保留兼容性) +# ============================================ + +@router.post("/{project_id}/execute-batch-legacy") +async def execute_batch_legacy( + project_id: str, + start_episode: int = 1, + end_episode: int = 3 +): + """ + 旧的批量执行端点(保留用于向后兼容) + + 创作指定范围的剧集(用于分批次模式) + """ + project = await project_repo.get(project_id) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + + agent = get_series_agent() + results = [] + + for ep_num in range(start_episode, end_episode + 1): + try: + episode = await agent.execute_episode(project, ep_num) + episode.projectId = project_id + await episode_repo.create(episode) + results.append({ + "episode": ep_num, + "success": True, + "qualityScore": episode.qualityScore + }) + except Exception as e: + results.append({ + "episode": ep_num, + "success": False, + "error": str(e) + }) + + return { + "projectId": project_id, + "results": results, + "total": len(results), + "success": sum(1 for r in results if r["success"]) + } + + +# ============================================ +# 记忆系统 +# ============================================ + +@router.get("/{project_id}/memory") +async def get_project_memory(project_id: str): + """获取项目记忆系统""" + project = await project_repo.get(project_id) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + return project.memory + + +# ============================================ +# Skills 配置管理 +# ============================================ + +@router.get("/{project_id}/skill-config", response_model=SkillConfigResponse) +async def get_project_skill_config(project_id: str): + """获取项目的 Skills 配置""" + project = await project_repo.get(project_id) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + + return { + "defaultTaskSkills": project.defaultTaskSkills, + "episodeSkillOverrides": project.episodeSkillOverrides + } + + +@router.put("/{project_id}/skill-config") +async def update_project_skill_config( + project_id: str, + config: ProjectSkillConfigUpdate +): + """更新项目的 Skills 配置""" + project = await project_repo.get(project_id) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + + try: + # 转换配置为字典格式(用于保存) + update_data = { + "defaultTaskSkills": [t.dict() for t in config.defaultTaskSkills], + "episodeSkillOverrides": { + k: v.dict() for k, v in config.episodeSkillOverrides.items() + } + } + + await project_repo.update(project_id, update_data) + + return { + "success": True, + "message": "Skills 配置已更新", + "defaultTaskSkills": config.defaultTaskSkills, + "episodeSkillOverrides": config.episodeSkillOverrides + } + + except Exception as e: + logger.error(f"更新 Skills 配置失败: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"更新失败: {str(e)}" + ) + + +@router.put("/{project_id}/episodes/{episode_number}/skill-config") +async def update_episode_skill_config( + project_id: str, + episode_number: int, + config: EpisodeSkillConfigUpdate +): + """更新单集的 Skills 覆盖配置""" + project = await project_repo.get(project_id) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + + try: + # 获取现有覆盖配置 + overrides = dict(project.episodeSkillOverrides) + + # 更新或添加单集配置 + overrides[episode_number] = { + "episode_number": config.episode_number, + "task_configs": [t.dict() for t in config.task_configs], + "use_project_default": config.use_project_default + } + + await project_repo.update(project_id, { + "episodeSkillOverrides": overrides + }) + + return { + "success": True, + "message": f"EP{episode_number} 的 Skills 配置已更新", + "config": config + } + + except Exception as e: + logger.error(f"更新单集 Skills 配置失败: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"更新失败: {str(e)}" + ) + + +@router.delete("/{project_id}/episodes/{episode_number}/skill-config") +async def delete_episode_skill_config( + project_id: str, + episode_number: int +): + """删除单集的 Skills 覆盖配置(恢复使用项目默认)""" + project = await project_repo.get(project_id) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + + try: + overrides = dict(project.episodeSkillOverrides) + + if episode_number not in overrides: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"EP{episode_number} 没有自定义配置" + ) + + # 删除单集配置 + del overrides[episode_number] + + await project_repo.update(project_id, { + "episodeSkillOverrides": overrides + }) + + return { + "success": True, + "message": f"EP{episode_number} 的配置已删除,将使用项目默认配置" + } + + except HTTPException: + raise + except Exception as e: + logger.error(f"删除单集 Skills 配置失败: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"删除失败: {str(e)}" + ) diff --git a/backend/app/api/v1/review.py b/backend/app/api/v1/review.py new file mode 100644 index 0000000..8839aee --- /dev/null +++ b/backend/app/api/v1/review.py @@ -0,0 +1,669 @@ +""" +Review System API Routes + +审核系统 API 路由 + +提供审核配置、执行、维度和自定义规则的管理接口 +""" +from fastapi import APIRouter, Depends, HTTPException, status +from typing import List, Optional +import uuid +from datetime import datetime + +from app.models.review import ( + ReviewConfig, + ReviewConfigUpdate, + ReviewResult, + ReviewRequest, + ReviewResponse, + DimensionInfo, + DimensionConfig, + DimensionType, + CustomRule, + CustomRuleCreate, + CustomRuleUpdate, + ReviewPreset +) +from app.models.project import SeriesProject, Episode +from app.core.review.review_manager import ReviewManager, get_review_manager +from app.db.repositories import project_repo, episode_repo +from app.utils.logger import get_logger + + +logger = get_logger(__name__) + +router = APIRouter(prefix="/review", tags=["审核系统"]) + + +# ============================================ +# 项目审核配置管理 +# ============================================ + +@router.get("/projects/{project_id}/review-config", response_model=ReviewConfig) +async def get_review_config(project_id: str): + """ + 获取项目的审核配置 + + Args: + project_id: 项目ID + + Returns: + ReviewConfig: 审核配置 + """ + project = await project_repo.get(project_id) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + + # 从项目配置中获取审核配置 + # 如果还没有配置,返回默认配置 + review_config = getattr(project, 'reviewConfig', None) + if not review_config: + review_config = ReviewConfig() + + return review_config + + +@router.put("/projects/{project_id}/review-config", response_model=ReviewConfig) +async def update_review_config( + project_id: str, + config_update: ReviewConfigUpdate +): + """ + 更新项目的审核配置 + + Args: + project_id: 项目ID + config_update: 配置更新数据 + + Returns: + ReviewConfig: 更新后的配置 + """ + project = await project_repo.get(project_id) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + + try: + # 获取现有配置 + current_config = getattr(project, 'reviewConfig', ReviewConfig()) + + # 更新配置 + update_data = config_update.dict(exclude_unset=True) + + for field, value in update_data.items(): + setattr(current_config, field, value) + + # 保存到项目 + await project_repo.update(project_id, { + "reviewConfig": current_config.dict() + }) + + logger.info(f"更新项目审核配置: {project_id}") + + return current_config + + except Exception as e: + logger.error(f"更新审核配置失败: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"更新配置失败: {str(e)}" + ) + + +@router.get("/projects/{project_id}/review-presets", response_model=List[ReviewPreset]) +async def get_review_presets(project_id: str = None): + """ + 获取审核预设列表 + + Args: + project_id: 可选的项目ID,用于返回项目特定的预设 + + Returns: + List[ReviewPreset]: 预设列表 + """ + # 内置预设 + presets = [ + ReviewPreset( + id="strict", + name="严格审核", + description="适用于高质量要求,检查所有维度", + category="quality", + config=ReviewConfig( + enabled_review_skills=["consistency_checker"], + overall_strictness=0.9, + dimension_settings={ + DimensionType.consistency: DimensionConfig(enabled=True, strictness=0.9, weight=1.2), + DimensionType.quality: DimensionConfig(enabled=True, strictness=0.8, weight=1.0), + DimensionType.pacing: DimensionConfig(enabled=True, strictness=0.7, weight=0.8), + DimensionType.dialogue: DimensionConfig(enabled=True, strictness=0.8, weight=1.0), + DimensionType.character: DimensionConfig(enabled=True, strictness=0.9, weight=1.1), + DimensionType.plot: DimensionConfig(enabled=True, strictness=0.9, weight=1.2) + }, + pass_threshold=85.0 + ) + ), + ReviewPreset( + id="balanced", + name="平衡审核", + description="适用于一般创作需求,平衡质量和效率", + category="general", + config=ReviewConfig( + enabled_review_skills=["consistency_checker"], + overall_strictness=0.7, + dimension_settings={ + DimensionType.consistency: DimensionConfig(enabled=True, strictness=0.7, weight=1.2), + DimensionType.quality: DimensionConfig(enabled=True, strictness=0.6, weight=1.0), + DimensionType.pacing: DimensionConfig(enabled=False, strictness=0.5, weight=0.8), + DimensionType.dialogue: DimensionConfig(enabled=True, strictness=0.6, weight=1.0), + DimensionType.character: DimensionConfig(enabled=True, strictness=0.7, weight=1.1), + DimensionType.plot: DimensionConfig(enabled=True, strictness=0.7, weight=1.2) + }, + pass_threshold=75.0 + ) + ), + ReviewPreset( + id="quick", + name="快速审核", + description="只检查核心维度,适用于快速迭代", + category="efficiency", + config=ReviewConfig( + enabled_review_skills=["consistency_checker"], + overall_strictness=0.5, + dimension_settings={ + DimensionType.consistency: DimensionConfig(enabled=True, strictness=0.7, weight=1.5), + DimensionType.quality: DimensionConfig(enabled=False, strictness=0.5, weight=1.0), + DimensionType.pacing: DimensionConfig(enabled=False, strictness=0.5, weight=0.8), + DimensionType.dialogue: DimensionConfig(enabled=False, strictness=0.5, weight=1.0), + DimensionType.character: DimensionConfig(enabled=False, strictness=0.5, weight=1.1), + DimensionType.plot: DimensionConfig(enabled=True, strictness=0.6, weight=1.5) + }, + pass_threshold=70.0 + ) + ), + ReviewPreset( + id="dialogue_focused", + name="对话专项", + description="专注于对话质量和人物口吻", + category="specialized", + config=ReviewConfig( + enabled_review_skills=[], + overall_strictness=0.8, + dimension_settings={ + DimensionType.consistency: DimensionConfig(enabled=True, strictness=0.6, weight=0.8), + DimensionType.dialogue: DimensionConfig(enabled=True, strictness=0.9, weight=1.5), + DimensionType.character: DimensionConfig(enabled=True, strictness=0.8, weight=1.2) + }, + pass_threshold=75.0 + ) + ) + ] + + return presets + + +# ============================================ +# 审核执行 +# ============================================ + +@router.post("/projects/{project_id}/episodes/{episode_number}/review", response_model=ReviewResponse) +async def review_episode( + project_id: str, + episode_number: int, + request: ReviewRequest, + review_mgr: ReviewManager = Depends(get_review_manager) +): + """ + 执行剧集审核 + + Args: + project_id: 项目ID + episode_number: 集数 + request: 审核请求 + review_mgr: 审核管理器(依赖注入) + + Returns: + ReviewResponse: 审核结果 + """ + # 获取项目 + project = await project_repo.get(project_id) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + + # 获取剧集 + episodes = await episode_repo.list_by_project(project_id) + episode = None + for ep in episodes: + if ep.number == episode_number: + episode = ep + break + + if not episode: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"剧集不存在: EP{episode_number}" + ) + + # 检查剧集是否有内容 + if not episode.content: + return ReviewResponse( + success=False, + message=f"剧集 EP{episode_number} 没有内容,无法审核", + episode_id=episode.id + ) + + try: + # 获取审核配置 + config = getattr(project, 'reviewConfig', ReviewConfig()) + + # 执行审核 + logger.info(f"开始审核: 项目 {project_id}, EP{episode_number}") + + result = await review_mgr.review_episode( + project=project, + episode=episode, + config=config, + dimensions=request.dimensions + ) + + # 保存审核结果到剧集 + await episode_repo.update(project_id, { + "reviewResult": result.dict(), + "status": "completed" if result.passed else "needs-review" + }) + + logger.info(f"审核完成: EP{episode_number}, 分数={result.overall_score:.1f}") + + return ReviewResponse( + success=True, + review_result=result, + message=f"审核完成,总分: {result.overall_score:.1f}", + episode_id=episode.id + ) + + except Exception as e: + logger.error(f"审核失败: {str(e)}") + return ReviewResponse( + success=False, + message=f"审核失败: {str(e)}", + episode_id=episode.id + ) + + +@router.get("/projects/{project_id}/episodes/{episode_number}/review-result", response_model=Optional[ReviewResult]) +async def get_review_result(project_id: str, episode_number: int): + """ + 获取剧集的审核结果 + + Args: + project_id: 项目ID + episode_number: 集数 + + Returns: + ReviewResult: 审核结果,如果未审核则返回null + """ + episodes = await episode_repo.list_by_project(project_id) + episode = None + for ep in episodes: + if ep.number == episode_number: + episode = ep + break + + if not episode: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"剧集不存在: EP{episode_number}" + ) + + # 返回审核结果(如果有) + return getattr(episode, 'reviewResult', None) + + +# ============================================ +# 维度管理 +# ============================================ + +@router.get("/dimensions", response_model=List[DimensionInfo]) +async def list_dimensions(): + """ + 列出所有可用的审核维度 + + Returns: + List[DimensionInfo]: 维度信息列表 + """ + from app.core.review.review_manager import ReviewManager + + temp_manager = ReviewManager() + + dimensions = [] + for dim_type, dim_info in temp_manager.DIMENSIONS.items(): + dimension_info = DimensionInfo( + id=dim_type, + name=dim_info["name"], + description=dim_info["description"], + default_strictness=dim_info["default_strictness"], + supported_skill_ids=dim_info["skill_ids"] + ) + dimensions.append(dimension_info) + + return dimensions + + +@router.get("/dimensions/{dimension_id}", response_model=DimensionInfo) +async def get_dimension_info(dimension_id: DimensionType): + """ + 获取单个维度的详细信息 + + Args: + dimension_id: 维度ID + + Returns: + DimensionInfo: 维度信息 + """ + from app.core.review.review_manager import ReviewManager + + temp_manager = ReviewManager() + dim_info = temp_manager.DIMENSIONS.get(dimension_id) + + if not dim_info: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"维度不存在: {dimension_id}" + ) + + return DimensionInfo( + id=dimension_id, + name=dim_info["name"], + description=dim_info["description"], + default_strictness=dim_info["default_strictness"], + supported_skill_ids=dim_info["skill_ids"] + ) + + +# ============================================ +# 自定义规则管理 +# ============================================ + +@router.get("/custom-rules", response_model=List[CustomRule]) +async def list_custom_rules( + project_id: Optional[str] = None, + dimension: Optional[DimensionType] = None +): + """ + 列出自定义规则 + + Args: + project_id: 可选的项目ID,只返回该项目的规则 + dimension: 可选的维度筛选 + + Returns: + List[CustomRule]: 自定义规则列表 + """ + if project_id: + # 从项目获取规则 + project = await project_repo.get(project_id) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + + config = getattr(project, 'reviewConfig', ReviewConfig()) + rules = config.custom_rules + else: + # 返回空列表(暂时不支持全局规则) + rules = [] + + # 按维度筛选 + if dimension: + rules = [r for r in rules if r.dimension == dimension] + + return rules + + +@router.post("/custom-rules", response_model=CustomRule, status_code=status.HTTP_201_CREATED) +async def create_custom_rule( + rule_data: CustomRuleCreate, + project_id: str +): + """ + 创建自定义规则 + + Args: + rule_data: 规则创建数据 + project_id: 项目ID + + Returns: + CustomRule: 创建的规则 + """ + project = await project_repo.get(project_id) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + + try: + # 创建规则 + new_rule = CustomRule( + id=str(uuid.uuid4()), + name=rule_data.name, + description=rule_data.description, + trigger_condition=rule_data.trigger_condition, + dimension=rule_data.dimension, + severity=rule_data.severity, + enabled=True + ) + + # 添加到项目配置 + config = getattr(project, 'reviewConfig', ReviewConfig()) + config.custom_rules.append(new_rule) + + # 保存 + await project_repo.update(project_id, { + "reviewConfig": config.dict() + }) + + logger.info(f"创建自定义规则: {new_rule.id}") + + return new_rule + + except Exception as e: + logger.error(f"创建自定义规则失败: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"创建规则失败: {str(e)}" + ) + + +@router.put("/custom-rules/{rule_id}", response_model=CustomRule) +async def update_custom_rule( + rule_id: str, + rule_update: CustomRuleUpdate, + project_id: str +): + """ + 更新自定义规则 + + Args: + rule_id: 规则ID + rule_update: 规则更新数据 + project_id: 项目ID + + Returns: + CustomRule: 更新后的规则 + """ + project = await project_repo.get(project_id) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + + config = getattr(project, 'reviewConfig', ReviewConfig()) + + # 查找规则 + rule = None + for r in config.custom_rules: + if r.id == rule_id: + rule = r + break + + if not rule: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"规则不存在: {rule_id}" + ) + + try: + # 更新规则 + update_data = rule_update.dict(exclude_unset=True) + + for field, value in update_data.items(): + setattr(rule, field, value) + + rule.updated_at = datetime.now() + + # 保存 + await project_repo.update(project_id, { + "reviewConfig": config.dict() + }) + + logger.info(f"更新自定义规则: {rule_id}") + + return rule + + except Exception as e: + logger.error(f"更新自定义规则失败: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"更新规则失败: {str(e)}" + ) + + +@router.delete("/custom-rules/{rule_id}", status_code=status.HTTP_204_NO_CONTENT) +async def delete_custom_rule(rule_id: str, project_id: str): + """ + 删除自定义规则 + + Args: + rule_id: 规则ID + project_id: 项目ID + + Returns: + None + """ + project = await project_repo.get(project_id) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + + config = getattr(project, 'reviewConfig', ReviewConfig()) + + # 查找并删除规则 + original_count = len(config.custom_rules) + config.custom_rules = [r for r in config.custom_rules if r.id != rule_id] + + if len(config.custom_rules) == original_count: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"规则不存在: {rule_id}" + ) + + try: + # 保存 + await project_repo.update(project_id, { + "reviewConfig": config.dict() + }) + + logger.info(f"删除自定义规则: {rule_id}") + + return None + + except Exception as e: + logger.error(f"删除自定义规则失败: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"删除规则失败: {str(e)}" + ) + + +@router.post("/custom-rules/{rule_id}/test") +async def test_custom_rule( + rule_id: str, + episode_id: str, + project_id: str, + review_mgr: ReviewManager = Depends(get_review_manager) +): + """ + 测试自定义规则 + + Args: + rule_id: 规则ID + episode_id: 剧集ID + project_id: 项目ID + review_mgr: 审核管理器 + + Returns: + 测试结果 + """ + project = await project_repo.get(project_id) + if not project: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"项目不存在: {project_id}" + ) + + episode = await episode_repo.get(episode_id) + if not episode: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"剧集不存在: {episode_id}" + ) + + config = getattr(project, 'reviewConfig', ReviewConfig()) + + # 查找规则 + rule = None + for r in config.custom_rules: + if r.id == rule_id: + rule = r + break + + if not rule: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"规则不存在: {rule_id}" + ) + + try: + # 测试规则 + issues = await review_mgr._check_single_rule( + episode=episode, + rule=rule, + config=config + ) + + return { + "rule_id": rule_id, + "rule_name": rule.name, + "episode_id": episode_id, + "violations": len(issues), + "issues": [issue.dict() for issue in issues] + } + + except Exception as e: + logger.error(f"测试规则失败: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"测试失败: {str(e)}" + ) diff --git a/backend/app/api/v1/skills.py b/backend/app/api/v1/skills.py new file mode 100644 index 0000000..5f26a8a --- /dev/null +++ b/backend/app/api/v1/skills.py @@ -0,0 +1,831 @@ +""" +Skill 管理 API 路由 + +提供: +1. Skill 的 CRUD 操作 +2. Agent-Skill 生命周期端点 +3. AI 辅助 Skill 创建(完整流程) +4. Skill 选择和路由(LLM 与 Skills 结合) +5. Agent 工作流配置 +""" +from fastapi import APIRouter, Depends, HTTPException, status +from typing import List, Optional, Dict, Any +from pydantic import BaseModel + +from app.models.skill import ( + Skill, + SkillCreate, + SkillUpdate, + SkillConfigUpdate, + SkillTestRequest, + SkillTestResponse, + SkillGenerateRequest, + SkillGenerateResponse, +) +from app.models.skill_integration import ( + SkillMetadata, + SkillTriggerType, + SkillStructure, + SkillGenerationRequest, + SkillGenerationResponse, + SkillAnalysis, + SkillPlanning, + SkillRefinementRequest, + SkillRefinementResponse, + SkillSelectionCriteria, + SkillSelectionResult, + SkillExecutionContext, + SkillExecutionResponse, + AgentWorkflowConfig, + AgentWorkflowStep, +) +from app.core.skills.skill_manager import get_skill_manager, SkillManager +from app.core.llm.glm_client import get_glm_client, GLMClient +from app.utils.logger import get_logger + +logger = get_logger(__name__) + +router = APIRouter(prefix="/skills", tags=["Skill管理"]) + + +# ============================================================================ +# 基础 CRUD 端点 +# ============================================================================ + + +@router.get("/", response_model=List[Skill]) +async def list_skills( + skill_type: Optional[str] = None, + category: Optional[str] = None, + skill_manager: SkillManager = Depends(get_skill_manager) +): + """ + 列出所有 Skills + + - **skill_type**: 筛选类型 (builtin/user) + - **category**: 筛选分类 + """ + skills = await skill_manager.list_skills(skill_type=skill_type, category=category) + return skills + + +@router.get("/{skill_id}", response_model=Skill) +async def get_skill( + skill_id: str, + skill_manager: SkillManager = Depends(get_skill_manager) +): + """获取 Skill 详情""" + skill = await skill_manager.load_skill(skill_id) + if not skill: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Skill 不存在: {skill_id}" + ) + return skill + + +@router.post("/", response_model=Skill, status_code=status.HTTP_201_CREATED) +async def create_skill( + skill_data: SkillCreate, + skill_manager: SkillManager = Depends(get_skill_manager) +): + """创建新的用户 Skill""" + try: + skill = await skill_manager.create_user_skill(skill_data) + return skill + except Exception as e: + logger.error(f"创建 Skill 失败: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"创建 Skill 失败: {str(e)}" + ) + + +@router.put("/{skill_id}", response_model=Skill) +async def update_skill( + skill_id: str, + skill_data: SkillUpdate, + skill_manager: SkillManager = Depends(get_skill_manager) +): + """更新 Skill 内容""" + skill = await skill_manager.update_user_skill(skill_id, skill_data) + if not skill: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Skill 不存在或不允许更新: {skill_id}" + ) + return skill + + +@router.put("/{skill_id}/config", response_model=Skill) +async def update_skill_config( + skill_id: str, + config_update: SkillConfigUpdate, + skill_manager: SkillManager = Depends(get_skill_manager) +): + """更新 Skill 配置""" + skill = await skill_manager.load_skill(skill_id) + if not skill: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Skill 不存在: {skill_id}" + ) + + # 更新配置 + if config_update.preset is not None: + skill.config.preset = config_update.preset + if config_update.parameters is not None: + skill.config.parameters.update(config_update.parameters) + if config_update.weights is not None: + skill.config.weights = config_update.weights + + return skill + + +@router.post("/{skill_id}/test", response_model=SkillTestResponse) +async def test_skill( + skill_id: str, + test_request: SkillTestRequest, + skill_manager: SkillManager = Depends(get_skill_manager), + glm_client: GLMClient = Depends(get_glm_client) +): + """ + 测试 Skill + + 使用 Skill 的行为指导来处理测试输入,返回 LLM 的响应 + """ + skill = await skill_manager.load_skill(skill_id) + if not skill: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Skill 不存在: {skill_id}" + ) + + try: + # 使用 GLM 客户端的 chat_with_skill 方法 + response = await glm_client.chat_with_skill( + skill_behavior=skill.behavior_guide, + user_input=test_request.test_input, + context=test_request.context or {}, + temperature=test_request.temperature + ) + + return SkillTestResponse( + skill_id=skill_id, + skill_name=skill.name, + response=response + ) + + except Exception as e: + logger.error(f"测试 Skill 失败: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"测试 Skill 失败: {str(e)}" + ) + + +@router.delete("/{skill_id}", status_code=status.HTTP_204_NO_CONTENT) +async def delete_skill( + skill_id: str, + skill_manager: SkillManager = Depends(get_skill_manager) +): + """删除用户 Skill""" + success = await skill_manager.delete_user_skill(skill_id) + if not success: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Skill 不存在或不允许删除: {skill_id}" + ) + return None + + +@router.post("/{skill_id}/reload", response_model=Skill) +async def reload_skill( + skill_id: str, + skill_manager: SkillManager = Depends(get_skill_manager) +): + """重新加载 Skill(清除缓存)""" + skill = await skill_manager.reload_skill(skill_id) + if not skill: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Skill 不存在: {skill_id}" + ) + return skill + + +@router.get("/categories/list", response_model=List[str]) +async def list_categories( + skill_manager: SkillManager = Depends(get_skill_manager) +): + """列出所有 Skill 分类""" + skills = await skill_manager.list_skills() + categories = set(skill.category for skill in skills) + return sorted(list(categories)) + + +@router.post("/generate", response_model=SkillGenerateResponse) +async def generate_skill_with_ai( + request: SkillGenerateRequest, + skill_manager: SkillManager = Depends(get_skill_manager), + glm_client: GLMClient = Depends(get_glm_client) +): + """ + 使用 AI 生成 Skill(类似 Claude Code 的 skill-creator) + + 流程: + 1. 加载 skill-creator 的行为指导 + 2. 将用户需求和 skill-creator 标准一起发送给 GLM-4.7 + 3. AI 生成符合标准的 Skill 内容 + + Args: + request: 包含用户描述、分类、标签的请求 + + Returns: + 生成的 Skill 内容和建议信息 + """ + try: + # 1. 加载 skill-creator 的行为指导 + skill_creator = await skill_manager.load_skill("skill-creator") + if not skill_creator: + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="skill-creator 未找到,请确保内置 Skills 正确安装" + ) + + # 2. 构建提示词 + user_requirements = f"""用户想要创建的 Skill 描述: +{request.description} +""" + + if request.category: + user_requirements += f"\n指定分类:{request.category}" + + if request.tags: + user_requirements += f"\n指定标签:{', '.join(request.tags)}" + + system_prompt = f"""你是一个专业的 Skill 创建专家。 + +以下是 skill-creator 的行为指导(关于如何创建有效 Skill 的指南): +{'━' * 60} +{skill_creator.behavior_guide} +{'━' * 60} + +你的任务是根据用户的需求,创建一个符合上述标准的 Skill。 + +**重要要求**: +1. SKILL.md 必须以 YAML frontmatter 开始,包含 name 和 description 字段 +2. description 应该清晰说明此 Skill 的用途和使用场景 +3. 行为指导部分应该简洁、具体,避免冗余的解释 +4. 使用 markdown 格式 +5. 返回完整的 SKILL.md 内容 + +请以 JSON 格式返回结果,包含以下字段: +- suggested_id: 建议 Skill ID(kebab-case,如 dialogue-writer-ancient) +- suggested_name: 建议 Skill 名称(简短中文) +- skill_content: 完整的 SKILL.md 内容 +- category: 分类(如"编剧"、"审核"、"通用"等) +- suggested_tags: 建议标签数组 +- explanation: 对生成的 Skill 的简要说明(中文) +""" + + # 3. 调用 GLM-4.7 生成 + response = await glm_client.chat( + messages=[ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_requirements} + ], + temperature=request.temperature + ) + + # 4. 解析响应 + import json + import re + + response_text = response["choices"][0]["message"]["content"] + + # 尝试提取 JSON + json_match = re.search(r'\{[\s\S]*\}', response_text) + if json_match: + result = json.loads(json_match.group()) + else: + # 如果没有找到 JSON,创建一个默认响应 + result = { + "suggested_id": "custom-skill", + "suggested_name": "自定义 Skill", + "skill_content": response_text, + "category": request.category or "通用", + "suggested_tags": request.tags or ["自定义"], + "explanation": "AI 生成的 Skill 内容" + } + + return SkillGenerateResponse(**result) + + except HTTPException: + raise + except Exception as e: + logger.error(f"AI 生成 Skill 失败: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"AI 生成 Skill 失败: {str(e)}" + ) + + +@router.get("/{skill_id}/with-references", response_model=dict) +async def get_skill_with_references( + skill_id: str, + skill_manager: SkillManager = Depends(get_skill_manager), + include_references: bool = True +): + """ + 获取 Skill 及其参考文件 + + 这是实现参考文件融入 LLM 的关键端点: + - 返回 Skill 对象 + - 返回所有参考文件的内容 + + Args: + skill_id: Skill ID + include_references: 是否包含参考文件内容 + """ + try: + skill = await skill_manager.load_skill(skill_id) + if not skill: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Skill 不存在: {skill_id}" + ) + + result = {"skill": skill.model_dump()} + + # 加载参考文件(如果需要) + if include_references: + references = await skill_manager.load_skill_references(skill.id) + result["references"] = references + + return result + + except HTTPException: + raise + except Exception as e: + logger.error(f"获取 Skill 失败: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"获取 Skill 失败: {str(e)}" + ) + + +@router.post("/analyze", response_model=SkillAnalysis) +async def analyze_skill_requirements( + request: SkillGenerationRequest, + skill_manager: SkillManager = Depends(get_skill_manager), + glm_client: GLMClient = Depends(get_glm_client) +): + """ + 分析 Skill 创建需求(完整流程 Step 1: Understanding) + + Args: + request: 包含用户意图、描述、用例的请求 + + Returns: + SkillAnalysis: 需求分析结果 + """ + try: + # Step 1: 加载 skill-creator + skill_creator = await skill_manager.load_skill("skill-creator") + if not skill_creator: + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="skill-creator 未找到,请确保内置 Skills 正确安装" + ) + + # Step 2: 需求分析 + user_intent = request.user_intent or "创建新的 Skill" + system_prompt = f"""你是一个专业的 Skill 创建分析师。 + +以下是 skill-creator 的行为指导(关于如何创建有效 Skill 的指南): +{'━' * 60} +{skill_creator.behavior_guide} +{'━' * 60} + +你的任务是分析用户的需求:{user_intent} + +请从以下几个方面分析: +1. **核心功能**:这个 Skill 主要用于什么? +2. **目标用户**:谁会使用这个 Skill? +3. **技术要求**:前端、后端、数据分析等 +4. **特殊要求**:多语言支持、高精度输出、实时处理 + +分析用户需求,并提出 Skill 创作建议。 +请以 JSON 格式返回分析结果,包含以下字段: +- intent_analysis: 用户的意图分析 +- suggested_category: 建议的分类(如 "dialogue", "analysis", "writing") +- suggested_features: 建议的核心功能列表 +- complexity_assessment: 复杂度评估(simple/medium/complex) +- recommended_approach: 推荐的实现方式 +""" + + # 构建分析请求 + analysis_request = { + "user_intent": user_intent, + "user_description": request.user_intent, + "use_cases": request.use_cases or [] + } + + # 调用 LLM 进行需求分析 + analysis_response = await glm_client.chat( + messages=[ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": f"分析以下需求:{json.dumps(analysis_request, ensure_ascii=False)}"} + ], + temperature=0.5 + ) + + # 解析分析结果 + try: + import json + analysis_result = json.loads(analysis_response["choices"][0]["message"]["content"]) + + # 提取结构化数据 + intent_analysis = analysis_result.get("intent_analysis", {}) + suggested_category = analysis_result.get("suggested_category", "general") + suggested_features = analysis_result.get("suggested_features", []) + complexity_assessment = analysis_result.get("complexity_assessment", "medium") + recommended_approach = analysis_result.get("recommended_approach", "standard") + + except json.JSONDecodeError: + # 如果无法解析,使用默认分析 + intent_analysis = {"primary": "创建新的 Skill", "details": "用户想要创建自定义 Skill"} + suggested_category = "general" + suggested_features = ["standard_dialogue_creation", "basic_content_editing"] + complexity_assessment = "medium" + recommended_approach = "standard" + + return SkillAnalysis( + intent_analysis=intent_analysis, + suggested_category=suggested_category, + suggested_features=suggested_features, + complexity_assessment=complexity_assessment, + recommended_approach=recommended_approach + ) + + except HTTPException: + raise + except Exception as e: + logger.error(f"需求分析失败: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"需求分析失败: {str(e)}" + ) + + +# ============================================================================ +# Skill 选择和路由(LLM 自动选择合适的 Skills) +# ============================================================================ + +@router.post("/select-skills", response_model=SkillSelectionResult) +async def select_skills_for_context( + criteria: SkillSelectionCriteria, + skill_manager: SkillManager = Depends(get_skill_manager), + glm_client: GLMClient = Depends(get_glm_client) +): + """ + LLM 根据描述自动选择最合适的 Skills + + 流程: + 1. 分析用户输入 + 2. 匹配所有 Skills 的 metadata + 3. 评分和排序 + 4. 返回选择结果 + + Args: + criteria: 选择条件(用户意图、上下文、约束) + """ + try: + # 加载所有可用 Skills + all_skills = await skill_manager.list_skills(skill_type="user") + + # 提取 Skills 的 metadata + skills_metadata = [] + for skill in all_skills: + metadata = { + "id": skill.id, + "name": skill.name, + "description": skill.description, + "category": skill.category, + "tags": skill.tags, + "keywords": skill.description.split(), + "capabilities": skill.behavior_guide[:200] if skill.behavior_guide else "" + } + skills_metadata.append(metadata) + + # 构建提示词 + context_info = "" + if criteria.context: + context_info = f"\n上下文信息:{json.dumps(criteria.context, ensure_ascii=False)}\n" + if criteria.exclude_skills: + exclude_info = f"\n排除的 Skills:{', '.join(criteria.exclude_skills)}\n" + + system_prompt = f"""你是一个智能 Skill 选择助手。 + +以下是可以使用的 Skills 及其元数据: + +{'━' * 60} +""" + for i, skill_meta in enumerate(skills_metadata, 1): + system_prompt += f"\n{i+1}. {skill_meta['name']}: {skill_meta['description']}\n" + if skill_meta['capabilities']: + system_prompt += f"\n 能力:{skill_meta['capabilities']}\n" + system_prompt += f"\n" + + system_prompt += f"""你的任务是根据用户的需求和偏好,从以下 Skills 中选择最合适的。 + +用户需求: +- {criteria.user_intent} +{criteria.exclude_info if criteria.exclude_skills else ""} +{criteria.context_info if criteria.context else ""} +{criteria.required_category if criteria.required_category else ""} +{criteria.required_tags if criteria.required_tags else ""} + +选择标准: +1. 相关性(description/keywords 匹配度) +2. 功能覆盖度(capabilities 是否满足需求) +3. 难度适配(complexity_assessment 是否匹配) + +请返回 JSON 数组,包含选中的 Skill ID 列表和选择理由。 +每个元素应包含: +- skill_id: Skill ID +- score: 匹配分数(0-100) +- reason: 选择理由 +""" + + # 调用 LLM 进行选择 + selection_response = await glm_client.chat( + messages=[ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": f"请分析以下 Skills,并返回最合适的 3-5 个:{json.dumps(skills_metadata, ensure_ascii=False)}"} + ], + temperature=0.3 + ) + + # 解析选择结果 + try: + import json + selected_ids = json.loads(selection_response["choices"][0]["message"]["content"]) + except: + selected_ids = [] + + # 如果是复杂场景,需要 LLM 进行智能选择 + if len(skills_metadata) > 5 or criteria.user_intent == "复杂需求分析": + # 使用结构化输出确保可解析性 + structured_prompt = """ +请以以下 JSON 格式返回选择,必须是可以解析的 JSON 数组: +[ + {{"skill_id": "...", "score": 85, "reason": "..."}}, + ... +] +确保输出是有效的 JSON 格式。 +""" + selection_response = await glm_client.chat( + messages=[ + {"role": "system", "content": structured_prompt}, + {"role": "user", "content": f"用户需求:{criteria.user_intent or '标准 Skill 创建'}\n\n\nAvailable Skills: {json.dumps(skills_metadata, ensure_ascii=False)}"} + ], + temperature=0.2 + ) + + try: + selected_ids = json.loads(selection_response["choices"][0]["message"]["content"]) + except: + selected_ids = [] + + return SkillSelectionResult( + selected_skills=[s for s in all_skills if s.id in selected_ids], + selection_reason=f"基于需求分析选择了最相关的 Skills", + confidence_scores={} + ) + + except HTTPException: + logger.error(f"Skill 选择失败: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Skill 选择失败: {str(e)}" + ) + + +# ============================================================================ +# Agent 工作流配置 API +# ============================================================================ + +@router.post("/agent-workflow/configure", response_model=AgentWorkflowConfig) +async def configure_agent_workflow( + config: AgentWorkflowConfig, + skill_manager: SkillManager = Depends(get_skill_manager) +): + """ + 配置 Agent 工作流的 Skill 使用方式 + + 这是实现 Agent 与 Skill 深度结合的关键配置: + - 定义哪些工作流步骤使用哪些 Skills + - 设置默认温度 + - 启用/禁用 Skill 替代 + + Args: + config: 包含工作流步骤配置 + """ + try: + # 加载 skill-creator 获取指导 + skill_creator = await skill_manager.load_skill("skill-creator") + + # 保存配置 + await skill_manager.save_workflow_config(config) + + return { + "success": True, + "message": "Agent 工作流配置已更新", + "config": config.model_dump() + } + + except Exception as e: + logger.error(f"配置失败: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"配置失败: {str(e)}" + ) + + +# ============================================================================ +# 基础 CRUD 端点继续 +# ============================================================================ + +# ============================================================================ +# Agent-Skill 生命周期端点 - 使用 skill-creator 脚本 +# ============================================================================ + +class SkillInitRequest(BaseModel): + """初始化 Skill 模板请求""" + skill_name: str # Skill 名称(hyphen-case) + output_path: Optional[str] = None # 输出路径 + + +class SkillValidateScriptRequest(BaseModel): + """脚本验证 Skill 请求""" + skill_id: str # 要验证的 Skill ID + + +class SkillPackageRequest(BaseModel): + """打包 Skill 请求""" + skill_id: str # 要打包的 Skill ID + output_dir: Optional[str] = None # 输出目录 + + +@router.post("/init", response_model=dict) +async def init_skill_template( + request: SkillInitRequest, + skill_manager: SkillManager = Depends(get_skill_manager) +): + """ + 初始化 Skill 模板(使用 skill-creator 的 init_skill.py 脚本) + + 这是 Agent-Skill 生命周期的正确实现: + - 调用 skill-creator 的 init_skill.py 脚本 + - 创建标准的 Skill 目录结构(SKILL.md、scripts/、references/、assets/) + """ + try: + result = await skill_manager.init_skill_template( + skill_name=request.skill_name, + output_path=request.output_path + ) + + if result["success"]: + logger.info(f"成功初始化 Skill 模板: {request.skill_name}") + return { + "success": True, + "message": f"Skill 模板 '{request.skill_name}' 初始化成功", + "output": result["output"] + } + else: + logger.error(f"初始化 Skill 失败: {result.get('error')}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=result.get("error", "初始化失败") + ) + + except Exception as e: + logger.error(f"初始化 Skill 异常: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"初始化 Skill 异常: {str(e)}" + ) + + +@router.post("/validate-script", response_model=dict) +async def validate_skill_script( + request: SkillValidateScriptRequest, + skill_manager: SkillManager = Depends(get_skill_manager) +): + """ + 验证 Skill 结构(使用 skill-creator 的 quick_validate.py 脚本) + + 这是 Agent-Skill 生命周期的正确实现: + - 调用 skill-creator 的 quick_validate.py 脚本 + - 检查 YAML frontmatter、命名规范等 + """ + try: + result = await skill_manager.validate_skill_structure(request.skill_id) + + if result["success"]: + logger.info(f"Skill 验证通过: {request.skill_id}") + return { + "success": True, + "valid": True, + "message": result["output"] + } + else: + logger.warning(f"Skill 验证失败: {request.skill_id}") + return { + "success": True, + "valid": False, + "error": result.get("error") + } + + except Exception as e: + logger.error(f"验证 Skill 异常: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"验证 Skill 异常: {str(e)}" + ) + + +@router.post("/package", response_model=dict) +async def package_skill( + request: SkillPackageRequest, + skill_manager: SkillManager = Depends(get_skill_manager) +): + """ + 打包 Skill(使用 skill-creator 的 package_skill.py 脚本) + + 这是 Agent-Skill 生命周期的正确实现: + - 调用 skill-creator 的 package_skill.py 脚本 + - 将 Skill 打包成 .skill 文件用于分发 + """ + try: + result = await skill_manager.package_skill( + skill_id=request.skill_id, + output_dir=request.output_dir + ) + + if result["success"]: + logger.info(f"成功打包 Skill: {request.skill_id}") + return { + "success": True, + "message": f"Skill '{request.skill_id}' 打包成功", + "output": result["output"] + } + else: + logger.error(f"打包 Skill 失败: {request.skill_id}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=result.get("error", "打包失败") + ) + + except Exception as e: + logger.error(f"打包 Skill 异常: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"打包 Skill 异常: {str(e)}" + ) + + +@router.get("/tools", response_model=List[dict]) +async def list_available_tools( + skill_manager: SkillManager = Depends(get_skill_manager) +): + """ + 列出所有已注册的工具(Agent-Skill 生命周期:注册阶段) + + 返回所有已注册工具的 JSON Schema,用于: + - 注入到 LLM System Prompt + - 或作为 API 的 tools 参数 + """ + return skill_manager.get_available_tools_for_llm() + + +@router.post("/execute-tool", response_model=dict) +async def execute_tool( + tool_name: str, + parameters: dict, + skill_manager: SkillManager = Depends(get_skill_manager) +): + """ + 执行工具(Agent-Skill 生命周期:路由 → 执行 → 反馈) + + 实现: + - 路由:根据 tool_name 找到对应的 handler + - 执行:调用 handler 执行实际操作 + - 反馈:返回结果给调用者(通常是 LLM) + + 这是 Agent-Skill 生命周期的核心实现 + """ + result = await skill_manager.route_and_execute_tool(tool_name, parameters) + return result diff --git a/backend/app/api/v1/uploads.py b/backend/app/api/v1/uploads.py new file mode 100644 index 0000000..190b5f2 --- /dev/null +++ b/backend/app/api/v1/uploads.py @@ -0,0 +1,218 @@ +""" +文件上传 API 路由 + +处理 Skill 参考文件的上传、管理和删除 +""" +from fastapi import APIRouter, Depends, HTTPException, status, UploadFile, File +from typing import List, Optional +from pydantic import BaseModel +import shutil +from pathlib import Path + +from app.core.skills.skill_manager import get_skill_manager, SkillManager +from app.utils.logger import get_logger + +logger = get_logger(__name__) + +router = APIRouter(prefix="/uploads", tags=["文件上传"]) + + +class FileListResponse(BaseModel): + """文件列表响应""" + skill_id: str + files: List[dict] + + +@router.post("/skills/{skill_id}/references") +async def upload_reference_file( + skill_id: str, + file: UploadFile = File(...), + skill_manager: SkillManager = Depends(get_skill_manager) +): + """ + 上传 Skill 参考文件 + + 上传的文件将保存到 skill 的 references/ 目录 + 支持的文件类型:.md, .txt, .json, .yaml, .yml + """ + # 验证 Skill 存在且为 user 类型 + skill = await skill_manager.load_skill(skill_id) + if not skill: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Skill 不存在: {skill_id}" + ) + + if skill.type == 'builtin': + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="不允许上传文件到 builtin Skill" + ) + + # 验证文件类型 + allowed_extensions = {'.md', '.txt', '.json', '.yaml', '.yml', '.pdf', '.doc', '.docx'} + file_ext = Path(file.filename).suffix.lower() + if file_ext not in allowed_extensions: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=f"不支持的文件类型: {file_ext}。支持的类型: {', '.join(allowed_extensions)}" + ) + + # 确保 references 目录存在 + skill_path = skill_manager._find_skill_path(skill_id) + references_dir = skill_path.parent / "references" + references_dir.mkdir(exist_ok=True) + + # 保存文件 + file_path = references_dir / file.filename + try: + with file_path.open("wb") as buffer: + shutil.copyfileobj(file.file, buffer) + + logger.info(f"文件上传成功: {skill_id}/references/{file.filename}") + + return { + "success": True, + "message": "文件上传成功", + "file": { + "name": file.filename, + "path": str(file_path.relative_to(skill_manager.skills_dir)), + "size": file_path.stat().st_size + } + } + + except Exception as e: + logger.error(f"文件上传失败: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"文件上传失败: {str(e)}" + ) + + +@router.get("/skills/{skill_id}/references", response_model=FileListResponse) +async def list_reference_files( + skill_id: str, + skill_manager: SkillManager = Depends(get_skill_manager) +): + """ + 列出 Skill 的所有参考文件 + """ + skill = await skill_manager.load_skill(skill_id) + if not skill: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Skill 不存在: {skill_id}" + ) + + skill_path = skill_manager._find_skill_path(skill_id) + references_dir = skill_path.parent / "references" + + files = [] + if references_dir.exists(): + for file_path in references_dir.iterdir(): + if file_path.is_file(): + files.append({ + "name": file_path.name, + "size": file_path.stat().st_size, + "modified": file_path.stat().st_mtime + }) + + return FileListResponse(skill_id=skill_id, files=files) + + +@router.delete("/skills/{skill_id}/references/{filename}") +async def delete_reference_file( + skill_id: str, + filename: str, + skill_manager: SkillManager = Depends(get_skill_manager) +): + """ + 删除 Skill 的参考文件 + """ + skill = await skill_manager.load_skill(skill_id) + if not skill: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Skill 不存在: {skill_id}" + ) + + if skill.type == 'builtin': + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="不允许删除 builtin Skill 的文件" + ) + + skill_path = skill_manager._find_skill_path(skill_id) + references_dir = skill_path.parent / "references" + file_path = references_dir / filename + + if not file_path.exists(): + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"文件不存在: {filename}" + ) + + try: + file_path.unlink() + logger.info(f"文件删除成功: {skill_id}/references/{filename}") + + return {"success": True, "message": "文件删除成功"} + + except Exception as e: + logger.error(f"文件删除失败: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"文件删除失败: {str(e)}" + ) + + +@router.get("/skills/{skill_id}/references/{filename}") +async def get_reference_file( + skill_id: str, + filename: str, + skill_manager: SkillManager = Depends(get_skill_manager) +): + """ + 读取 Skill 参考文件内容(用于预览) + """ + skill = await skill_manager.load_skill(skill_id) + if not skill: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Skill 不存在: {skill_id}" + ) + + skill_path = skill_manager._find_skill_path(skill_id) + references_dir = skill_path.parent / "references" + file_path = references_dir / filename + + if not file_path.exists(): + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"文件不存在: {filename}" + ) + + try: + content = file_path.read_text(encoding='utf-8') + return { + "success": True, + "filename": filename, + "content": content + } + + except UnicodeDecodeError: + # 二进制文件(如 PDF)无法读取为文本 + return { + "success": True, + "filename": filename, + "content": None, + "binary": True, + "message": "这是二进制文件,无法预览内容" + } + + except Exception as e: + logger.error(f"文件读取失败: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"文件读取失败: {str(e)}" + ) diff --git a/backend/app/api/v1/websocket.py b/backend/app/api/v1/websocket.py new file mode 100644 index 0000000..4fc7683 --- /dev/null +++ b/backend/app/api/v1/websocket.py @@ -0,0 +1,461 @@ +""" +WebSocket Streaming API + +提供实时执行进度更新的 WebSocket 端点 +""" +from fastapi import APIRouter, WebSocket, WebSocketDisconnect, Depends, Query +from typing import Dict, Set, Optional, Any +import json +import asyncio +from datetime import datetime + +from app.utils.logger import get_logger + +logger = get_logger(__name__) + +router = APIRouter() + + +# ============================================ +# WebSocket 连接管理 +# ============================================ + +class ConnectionManager: + """WebSocket 连接管理器""" + + def __init__(self): + # 项目ID -> WebSocket连接集合 + self.project_connections: Dict[str, Set[WebSocket]] = {} + # 批次ID -> WebSocket连接集合 + self.batch_connections: Dict[str, Set[WebSocket]] = {} + + async def connect_to_project(self, websocket: WebSocket, project_id: str): + """连接到项目执行流""" + await websocket.accept() + if project_id not in self.project_connections: + self.project_connections[project_id] = set() + self.project_connections[project_id].add(websocket) + logger.info(f"WebSocket 已连接到项目: {project_id}") + + async def connect_to_batch(self, websocket: WebSocket, batch_id: str): + """连接到批次执行流""" + await websocket.accept() + if batch_id not in self.batch_connections: + self.batch_connections[batch_id] = set() + self.batch_connections[batch_id].add(websocket) + logger.info(f"WebSocket 已连接到批次: {batch_id}") + + def disconnect(self, websocket: WebSocket): + """断开连接""" + # 从所有项目连接中移除 + for project_id, connections in self.project_connections.items(): + if websocket in connections: + connections.remove(websocket) + logger.info(f"WebSocket 已从项目断开: {project_id}") + if not connections: + del self.project_connections[project_id] + + # 从所有批次连接中移除 + for batch_id, connections in self.batch_connections.items(): + if websocket in connections: + connections.remove(websocket) + logger.info(f"WebSocket 已从批次断开: {batch_id}") + if not connections: + del self.batch_connections[batch_id] + + async def send_to_project( + self, + project_id: str, + message: Dict[str, Any], + exclude: Optional[WebSocket] = None + ): + """向项目的所有连接发送消息""" + if project_id in self.project_connections: + disconnected = set() + for connection in self.project_connections[project_id]: + if connection != exclude: + try: + await connection.send_json(message) + except Exception as e: + logger.error(f"发送消息失败: {str(e)}") + disconnected.add(connection) + + # 清理断开的连接 + for connection in disconnected: + self.disconnect(connection) + + async def send_to_batch( + self, + batch_id: str, + message: Dict[str, Any], + exclude: Optional[WebSocket] = None + ): + """向批次的所有连接发送消息""" + if batch_id in self.batch_connections: + disconnected = set() + for connection in self.batch_connections[batch_id]: + if connection != exclude: + try: + await connection.send_json(message) + except Exception as e: + logger.error(f"发送消息失败: {str(e)}") + disconnected.add(connection) + + # 清理断开的连接 + for connection in disconnected: + self.disconnect(connection) + + def get_project_connections_count(self, project_id: str) -> int: + """获取项目的连接数""" + return len(self.project_connections.get(project_id, set())) + + def get_batch_connections_count(self, batch_id: str) -> int: + """获取批次的连接数""" + return len(self.batch_connections.get(batch_id, set())) + + +# 全局连接管理器 +manager = ConnectionManager() + + +# ============================================ +# WebSocket 端点 +# ============================================ + +@router.websocket("/ws/projects/{project_id}/execute") +async def websocket_project_execution( + websocket: WebSocket, + project_id: str +): + """ + 项目执行 WebSocket 端点 + + 实时接收项目执行进度更新,包括: + - 执行开始/完成事件 + - 各阶段进度(结构分析、大纲生成、对话创作等) + - 质量检查结果 + - 错误信息 + + 消息格式: + { + "type": "stage_start|stage_progress|stage_complete|error|complete", + "data": {...}, + "timestamp": "ISO 8601" + } + """ + await manager.connect_to_project(websocket, project_id) + + try: + # 发送连接确认 + await websocket.send_json({ + "type": "connected", + "data": { + "project_id": project_id, + "message": "已连接到项目执行流", + "timestamp": datetime.now().isoformat() + } + }) + + # 保持连接并接收客户端消息 + while True: + try: + # 接收客户端消息(可用于心跳、控制命令等) + data = await websocket.receive_text() + message = json.loads(data) + + # 处理客户端消息 + await _handle_client_message(websocket, project_id, message) + + except WebSocketDisconnect: + logger.info(f"WebSocket 客户端主动断开: {project_id}") + break + except json.JSONDecodeError: + await websocket.send_json({ + "type": "error", + "data": { + "message": "无效的 JSON 格式" + } + }) + except Exception as e: + logger.error(f"处理 WebSocket 消息错误: {str(e)}") + await websocket.send_json({ + "type": "error", + "data": { + "message": f"处理消息错误: {str(e)}" + } + }) + + except Exception as e: + logger.error(f"WebSocket 连接错误: {str(e)}") + + finally: + manager.disconnect(websocket) + + +@router.websocket("/ws/batches/{batch_id}/execute") +async def websocket_batch_execution( + websocket: WebSocket, + batch_id: str +): + """ + 批量执行 WebSocket 端点 + + 实时接收批量执行进度更新,包括: + - 批次开始/完成事件 + - 各剧集执行进度 + - 整体进度百分比 + - 质量统计信息 + - 错误信息 + + 消息格式: + { + "type": "batch_start|episode_start|episode_complete|progress|batch_complete|error", + "data": {...}, + "timestamp": "ISO 8601" + } + """ + await manager.connect_to_batch(websocket, batch_id) + + try: + # 发送连接确认 + await websocket.send_json({ + "type": "connected", + "data": { + "batch_id": batch_id, + "message": "已连接到批量执行流", + "timestamp": datetime.now().isoformat() + } + }) + + # 保持连接 + while True: + try: + data = await websocket.receive_text() + message = json.loads(data) + await _handle_client_message(websocket, batch_id, message) + + except WebSocketDisconnect: + logger.info(f"WebSocket 客户端主动断开: {batch_id}") + break + except json.JSONDecodeError: + await websocket.send_json({ + "type": "error", + "data": {"message": "无效的 JSON 格式"} + }) + except Exception as e: + logger.error(f"处理 WebSocket 消息错误: {str(e)}") + + finally: + manager.disconnect(websocket) + + +# ============================================ +# 消息处理 +# ============================================ + +async def _handle_client_message( + websocket: WebSocket, + id: str, + message: Dict[str, Any] +): + """ + 处理客户端发送的消息 + + Args: + websocket: WebSocket 连接 + id: 项目ID或批次ID + message: 客户端消息 + """ + message_type = message.get("type") + + if message_type == "ping": + # 心跳响应 + await websocket.send_json({ + "type": "pong", + "data": { + "timestamp": datetime.now().isoformat() + } + }) + + elif message_type == "get_status": + # 请求状态 + from app.core.execution.batch_executor import get_batch_executor + executor = get_batch_executor() + status = executor.get_batch_status(id) + + await websocket.send_json({ + "type": "status", + "data": status or {"message": "未找到执行状态"} + }) + + else: + await websocket.send_json({ + "type": "error", + "data": { + "message": f"未知消息类型: {message_type}" + } + }) + + +# ============================================ +# 辅助函数 - 用于从其他模块发送消息 +# ============================================ + +async def broadcast_stage_update( + project_id: str, + episode_number: int, + stage: str, + data: Dict[str, Any] +): + """ + 广播阶段更新消息 + + Args: + project_id: 项目ID + episode_number: 集数 + stage: 阶段名称 + data: 阶段数据 + """ + message = { + "type": "stage_update", + "data": { + "project_id": project_id, + "episode_number": episode_number, + "stage": stage, + **data + }, + "timestamp": datetime.now().isoformat() + } + + await manager.send_to_project(project_id, message) + + +async def broadcast_episode_complete( + project_id: str, + episode_number: int, + success: bool, + quality_score: float, + data: Dict[str, Any] +): + """ + 广播剧集完成消息 + + Args: + project_id: 项目ID + episode_number: 集数 + success: 是否成功 + quality_score: 质量分数 + data: 额外数据 + """ + message = { + "type": "episode_complete", + "data": { + "project_id": project_id, + "episode_number": episode_number, + "success": success, + "quality_score": quality_score, + **data + }, + "timestamp": datetime.now().isoformat() + } + + await manager.send_to_project(project_id, message) + + +async def broadcast_batch_progress( + batch_id: str, + current_episode: int, + total_episodes: int, + completed: int, + failed: int, + data: Dict[str, Any] +): + """ + 广播批量执行进度 + + Args: + batch_id: 批次ID + current_episode: 当前集数 + total_episodes: 总集数 + completed: 已完成数 + failed: 失败数 + data: 额外数据 + """ + message = { + "type": "batch_progress", + "data": { + "batch_id": batch_id, + "current_episode": current_episode, + "total_episodes": total_episodes, + "completed_episodes": completed, + "failed_episodes": failed, + "progress_percentage": (completed / total_episodes * 100) if total_episodes > 0 else 0, + **data + }, + "timestamp": datetime.now().isoformat() + } + + await manager.send_to_batch(batch_id, message) + + +async def broadcast_error( + project_id: str, + episode_number: Optional[int], + error: str, + error_type: str = "execution_error" +): + """ + 广播错误消息 + + Args: + project_id: 项目ID + episode_number: 集数(可选) + error: 错误信息 + error_type: 错误类型 + """ + message = { + "type": "error", + "data": { + "project_id": project_id, + "episode_number": episode_number, + "error": error, + "error_type": error_type + }, + "timestamp": datetime.now().isoformat() + } + + await manager.send_to_project(project_id, message) + + +async def broadcast_batch_complete( + batch_id: str, + summary: Dict[str, Any] +): + """ + 广播批量执行完成 + + Args: + batch_id: 批次ID + summary: 执行摘要 + """ + message = { + "type": "batch_complete", + "data": { + "batch_id": batch_id, + **summary + }, + "timestamp": datetime.now().isoformat() + } + + await manager.send_to_batch(batch_id, message) + + +# 导出连接管理器和辅助函数 +__all__ = [ + "manager", + "broadcast_stage_update", + "broadcast_episode_complete", + "broadcast_batch_progress", + "broadcast_error", + "broadcast_batch_complete" +] diff --git a/backend/app/config.py b/backend/app/config.py new file mode 100644 index 0000000..5f97717 --- /dev/null +++ b/backend/app/config.py @@ -0,0 +1,100 @@ +""" +Creative Studio 配置管理 +支持环境变量和配置文件 +""" +from pydantic_settings import BaseSettings +from functools import lru_cache +from typing import List +import os + + +class Settings(BaseSettings): + """应用配置""" + + # ============================================ + # 应用配置 + # ============================================ + app_name: str = "Creative Studio" + app_version: str = "1.0.0" + debug: bool = True + environment: str = "development" + + # 服务器配置 + host: str = "0.0.0.0" + port: int = 8000 + + # ============================================ + # GLM API 配置 + # ============================================ + zai_api_key: str + zai_model: str = "glm-4.7" + + # ============================================ + # 数据库配置 + # ============================================ + mongodb_url: str = "mongodb://localhost:27017" + mongodb_database: str = "creative_studio" + + redis_url: str = "redis://localhost:6379/0" + + # ============================================ + # 安全配置 + # ============================================ + secret_key: str + algorithm: str = "HS256" + access_token_expire_minutes: int = 60 + refresh_token_expire_days: int = 7 + + # ============================================ + # 文件存储配置 + # ============================================ + storage_path: str = "./storage" + max_file_size: int = 100 # MB + + # ============================================ + # Celery 配置 + # ============================================ + celery_broker_url: str = "redis://localhost:6379/1" + celery_result_backend: str = "redis://localhost:6379/2" + + # ============================================ + # CORS 配置 + # ============================================ + allowed_origins: str = "http://localhost:5173,http://localhost:3000" + + # ============================================ + # 日志配置 + # ============================================ + log_level: str = "INFO" + log_file: str = "./logs/app.log" + + # ============================================ + # Skill 存储配置 + # ============================================ + skills_dir: str = "./skills_storage" + + @property + def cors_origins(self) -> List[str]: + """解析 CORS 允许的源""" + return [origin.strip() for origin in self.allowed_origins.split(",")] + + class Config: + env_file = ".env" + env_file_encoding = "utf-8" + case_sensitive = False + + +@lru_cache() +def get_settings() -> Settings: + """ + 获取配置单例 + + 使用: + from app.config import settings + settings = get_settings() + """ + return Settings() + + +# 导出配置实例 +settings = get_settings() diff --git a/backend/app/core/__init__.py b/backend/app/core/__init__.py new file mode 100644 index 0000000..3e83c63 --- /dev/null +++ b/backend/app/core/__init__.py @@ -0,0 +1 @@ +# Core module diff --git a/backend/app/core/agents/__init__.py b/backend/app/core/agents/__init__.py new file mode 100644 index 0000000..26fc649 --- /dev/null +++ b/backend/app/core/agents/__init__.py @@ -0,0 +1,6 @@ +""" +Agents 模块 +""" +from app.core.agents.series_creation_agent import SeriesCreationAgent, get_series_agent + +__all__ = ["SeriesCreationAgent", "get_series_agent"] diff --git a/backend/app/core/agents/series_creation_agent.py b/backend/app/core/agents/series_creation_agent.py new file mode 100644 index 0000000..2d72111 --- /dev/null +++ b/backend/app/core/agents/series_creation_agent.py @@ -0,0 +1,381 @@ +""" +剧集创作 Agent + +固定工作流: +1. 加载全局上下文和历史记忆 +2. 结构分析 +3. 大纲生成 +4. 对话创作 +5. 一致性审核 +6. 更新记忆系统 +""" +from typing import Dict, Any, Optional +from datetime import datetime +from app.models.project import SeriesProject, Episode, EpisodeIssue, TaskType +from app.core.skills.skill_manager import get_skill_manager +from app.core.skills.skill_resolver import get_skill_resolver +from app.core.llm.glm_client import get_glm_client +from app.utils.logger import get_logger +import json + +logger = get_logger(__name__) + + +class SeriesCreationAgent: + """ + 剧集创作 Agent + + 负责执行完整的剧集创作工作流 + """ + + def __init__(self): + self.skill_manager = get_skill_manager() + self.skill_resolver = get_skill_resolver() + self.glm_client = get_glm_client() + logger.info("剧集创作 Agent 初始化完成") + + async def execute_episode( + self, + project: SeriesProject, + episode_number: int, + title: str = "" + ) -> Episode: + """ + 执行单集创作 + + Args: + project: 剧集项目 + episode_number: 集数 + title: 集标题(可选) + + Returns: + 创作的 Episode 对象 + """ + logger.info(f"开始创作 EP{episode_number}") + + # 创建 Episode 对象 + episode = Episode( + projectId=project.id, + number=episode_number, + title=title or f"第{episode_number}集", + status="writing" + ) + + try: + # ============================================ + # 阶段 1: 结构分析 + # ============================================ + logger.info(f"EP{episode_number} - 阶段 1: 结构分析") + structure = await self._analyze_structure(project, episode_number) + episode.structure = structure + + # ============================================ + # 阶段 2: 大纲生成 + # ============================================ + logger.info(f"EP{episode_number} - 阶段 2: 大纲生成") + outline = await self._generate_outline(project, episode_number, structure) + episode.outline = outline + + # ============================================ + # 阶段 3: 对话创作(核心) + # ============================================ + logger.info(f"EP{episode_number} - 阶段 3: 对话创作") + content = await self._write_dialogue(project, episode_number, outline) + episode.content = content + + # ============================================ + # 阶段 4: 一致性审核 + # ============================================ + logger.info(f"EP{episode_number} - 阶段 4: 一致性审核") + review = await self._review_consistency(project, episode_number, content) + episode.qualityScore = review.get("score", 0) + episode.issues = review.get("issues", []) + + # ============================================ + # 阶段 5: 更新记忆系统 + # ============================================ + logger.info(f"EP{episode_number} - 阶段 5: 更新记忆系统") + await self._update_memory(project, episode_number, content) + + # 完成 + episode.status = "completed" + episode.completedAt = datetime.now() + logger.info(f"EP{episode_number} 创作完成,质量分数: {episode.qualityScore}") + + except Exception as e: + logger.error(f"EP{episode_number} 创作失败: {str(e)}") + episode.status = "needs-review" + episode.issues = [ + EpisodeIssue( + type="execution_error", + description=str(e), + severity="high" + ) + ] + + return episode + + async def _analyze_structure( + self, + project: SeriesProject, + episode_number: int + ) -> Dict[str, Any]: + """分析本集结构""" + # 使用默认的结构分析逻辑 + structure = { + "episodeNumber": episode_number, + "scenes": [], + "keyEvents": [] + } + return structure + + async def _generate_outline( + self, + project: SeriesProject, + episode_number: int, + structure: Dict[str, Any] + ) -> str: + """生成剧集大纲""" + # 构建提示词 + context = self._build_context(project, episode_number) + + prompt = f"""请为《{project.name}》创作第 {episode_number} 集的大纲。 + +{context} + +要求: +1. 大纲应包含 3-5 个场景 +2. 每个场景简要描述情节 +3. 字数 200-300 字 +""" + + try: + # 使用 SkillResolver 解析 Skills + resolved_skills = await self.skill_resolver.resolve_skills_for_task( + project=project, + task_type=TaskType.OUTLINE, + episode_number=episode_number + ) + + if resolved_skills: + skill_data = resolved_skills[0] + skill = skill_data["skill"] + parameters = skill_data["parameters"] + references = skill_data["references"] + + logger.info(f"EP{episode_number} 大纲生成使用 Skill: {skill.id}") + + return await self.glm_client.chat_with_skill( + skill_behavior=skill.behavior_guide, + user_input=prompt, + context=context, + temperature=parameters.get("temperature", 0.7), + references=references if references else None + ) + + # 回退到基本调用 + response = await self.glm_client.chat( + messages=[ + {"role": "system", "content": "你是剧集创作专家。"}, + {"role": "user", "content": prompt} + ] + ) + return response["choices"][0]["message"]["content"] + + except Exception as e: + logger.error(f"大纲生成失败: {str(e)}") + return f"第{episode_number}集大纲:本集讲述..." + + async def _write_dialogue( + self, + project: SeriesProject, + episode_number: int, + outline: str + ) -> str: + """创作对话内容(核心阶段)""" + context = self._build_context(project, episode_number) + + prompt = f"""请根据以下大纲创作第 {episode_number} 集的完整剧本: + +大纲: +{outline} + +{context} + +要求: +1. 使用【场景】标记场景开始 +2. 使用【人物】标记出场人物 +3. 包含对话和动作描写 +4. 保持人物性格和说话风格一致 +""" + + try: + # 使用 SkillResolver 解析 Skills + resolved_skills = await self.skill_resolver.resolve_skills_for_task( + project=project, + task_type=TaskType.DIALOGUE, + episode_number=episode_number + ) + + if not resolved_skills: + # 回退方案 + logger.warning(f"EP{episode_number} 未找到对话创作 Skill,使用默认方案") + response = await self.glm_client.chat( + messages=[ + {"role": "system", "content": "你是专业的剧集编剧。"}, + {"role": "user", "content": prompt} + ], + temperature=0.7 + ) + return response["choices"][0]["message"]["content"] + + # 使用第一个(最高优先级)Skill + skill_data = resolved_skills[0] + skill = skill_data["skill"] + parameters = skill_data["parameters"] + references = skill_data["references"] + + logger.info( + f"EP{episode_number} 对话创作使用 Skill: {skill.id}, " + f"参数: {parameters}, 参考文件数: {len(references)}" + ) + + # 使用 Skill 行为指导 + 参考文件 + return await self.glm_client.chat_with_skill( + skill_behavior=skill.behavior_guide, + user_input=prompt, + context=context, + temperature=parameters.get("temperature", 0.7), + references=references if references else None + ) + + except Exception as e: + logger.error(f"对话创作失败: {str(e)}") + return f"第{episode_number}集内容创作失败: {str(e)}" + + async def _review_consistency( + self, + project: SeriesProject, + episode_number: int, + content: str + ) -> Dict[str, Any]: + """审核一致性""" + try: + from app.core.memory.memory_manager import get_memory_manager + from app.models.memory import EnhancedMemory + + memory_manager = get_memory_manager() + + # 转换为 EnhancedMemory + enhanced_memory = memory_manager._convert_to_enhanced_memory(project.memory) + + # 使用 MemoryManager 检查一致性 + issues = await memory_manager.check_consistency( + episode_content=content, + episode_number=episode_number, + memory=enhanced_memory + ) + + # 计算质量分数 + high_issues = [i for i in issues if i.severity.value == "high"] + medium_issues = [i for i in issues if i.severity.value == "medium"] + + score = 100 - (len(high_issues) * 15) - (len(medium_issues) * 5) + score = max(0, min(100, score)) + + # 将问题转换为 EpisodeIssue 格式 + episode_issues = [ + EpisodeIssue( + type=issue.type, + description=issue.description, + severity=issue.severity.value, + suggestion=issue.suggestion + ) + for issue in issues + ] + + logger.info( + f"一致性审核完成: 分数={score}, " + f"问题={len(issues)} (高={len(high_issues)}, 中={len(medium_issues)})" + ) + + return { + "score": score, + "issues": episode_issues + } + + except Exception as e: + logger.error(f"一致性审核失败: {str(e)}") + return {"score": 80, "issues": []} + + async def _update_memory( + self, + project: SeriesProject, + episode_number: int, + content: str + ): + """更新记忆系统""" + from app.core.memory.memory_manager import get_memory_manager + + # 创建 Episode 对象 + episode = Episode( + projectId=project.id, + number=episode_number, + title=f"第{episode_number}集", + status="completed", + content=content, + completedAt=datetime.now() + ) + + # 调用 MemoryManager 更新记忆 + memory_manager = get_memory_manager() + result = await memory_manager.update_memory_from_episode(project, episode) + + logger.info( + f"记忆更新完成: 事件={result.events_extracted}, " + f"伏笔={result.foreshadowing_detected}, " + f"待收线={result.threads_suggested}, " + f"角色状态={result.character_states_updated}, " + f"一致性问题={result.consistency_issues_found}" + ) + + return result + + def _build_context( + self, + project: SeriesProject, + episode_number: int + ) -> str: + """构建上下文字符串""" + context_parts = [] + + # 世界观 + if project.globalContext.worldSetting: + context_parts.append(f"世界观:{project.globalContext.worldSetting}") + + # 人物设定 + if project.globalContext.characterProfiles: + context_parts.append("\n人物设定:") + for char_id, char in project.globalContext.characterProfiles.items(): + context_parts.append(f"- {char.name}:{char.personality},说话风格:{char.speechStyle}") + + # 历史记忆(最近3集) + if project.memory.eventTimeline: + context_parts.append("\n历史剧情:") + recent_events = project.memory.eventTimeline[-3:] + for event in recent_events: + context_parts.append(f"- {event.get('event', '')}") + + return "\n".join(context_parts) + + +# 全局单例 +_agent_instance: Optional[SeriesCreationAgent] = None + + +def get_series_agent() -> SeriesCreationAgent: + """获取剧集创作 Agent 单例""" + global _agent_instance + if _agent_instance is None: + _agent_instance = SeriesCreationAgent() + return _agent_instance diff --git a/backend/app/core/execution/__init__.py b/backend/app/core/execution/__init__.py new file mode 100644 index 0000000..5a048b9 --- /dev/null +++ b/backend/app/core/execution/__init__.py @@ -0,0 +1,14 @@ +""" +Enhanced Execution Package + +提供批量执行、自动重试、实时进度追踪等增强执行功能 +""" +from app.core.execution.batch_executor import BatchExecutor, get_batch_executor +from app.core.execution.retry_manager import RetryManager, get_retry_manager + +__all__ = [ + "BatchExecutor", + "get_batch_executor", + "RetryManager", + "get_retry_manager" +] diff --git a/backend/app/core/execution/batch_executor.py b/backend/app/core/execution/batch_executor.py new file mode 100644 index 0000000..1fd64e6 --- /dev/null +++ b/backend/app/core/execution/batch_executor.py @@ -0,0 +1,549 @@ +""" +Batch Executor + +批量执行器 - 负责批量执行剧集创作,支持质量检查和自动重试 +""" +from typing import List, Dict, Any, Optional, Callable, Awaitable +from datetime import datetime +import asyncio +import uuid + +from app.models.project import SeriesProject, Episode, EpisodeIssue +from app.models.review import ReviewConfig, ReviewResult +from app.core.agents.series_creation_agent import get_series_agent +from app.core.review.review_manager import get_review_manager, ReviewManager +from app.core.memory.memory_manager import get_memory_manager, MemoryManager +from app.db.repositories import episode_repo, project_repo +from app.utils.logger import get_logger + +logger = get_logger(__name__) + + +class BatchExecutionStatus: + """批量执行状态""" + + def __init__( + self, + batch_id: str, + project_id: str, + start_episode: int, + end_episode: int + ): + self.batch_id = batch_id + self.project_id = project_id + self.start_episode = start_episode + self.end_episode = end_episode + self.total_episodes = end_episode - start_episode + 1 + + # 执行状态 + self.status = "running" # running, completed, stopped, failed + self.current_episode = start_episode + self.completed_episodes = 0 + self.failed_episodes = 0 + + # 结果追踪 + self.episode_results: Dict[int, Dict[str, Any]] = {} + self.start_time = datetime.now() + self.end_time: Optional[datetime] = None + + # 回调函数 + self.on_progress: Optional[Callable] = None + self.on_episode_complete: Optional[Callable] = None + self.on_error: Optional[Callable] = None + + +class BatchExecutor: + """ + 批量执行器 + + 核心功能: + 1. 批量执行剧集创作 + 2. 集成质量检查 + 3. 自动重试失败剧集 + 4. 生成执行摘要 + 5. 实时进度通知 + """ + + def __init__( + self, + review_manager: Optional[ReviewManager] = None, + memory_manager: Optional[MemoryManager] = None + ): + """ + 初始化批量执行器 + + Args: + review_manager: 审核管理器(可选) + memory_manager: 记忆管理器(可选) + """ + self.series_agent = get_series_agent() + self.review_manager = review_manager or get_review_manager() + self.memory_manager = memory_manager or get_memory_manager() + + # 执行状态追踪 + self.active_batches: Dict[str, BatchExecutionStatus] = {} + self._stop_flags: Dict[str, bool] = {} + + logger.info("批量执行器初始化完成") + + async def execute_batch( + self, + project: SeriesProject, + start_episode: int, + end_episode: int, + review_config: Optional[ReviewConfig] = None, + enable_retry: bool = True, + max_retries: int = 2, + on_progress: Optional[Callable[[Dict[str, Any]], Awaitable[None]]] = None, + on_episode_complete: Optional[Callable[[Dict[str, Any]], Awaitable[None]]] = None, + on_error: Optional[Callable[[Dict[str, Any]], Awaitable[None]]] = None + ) -> Dict[str, Any]: + """ + 执行批量剧集创作 + + Args: + project: 剧集项目 + start_episode: 起始集数 + end_episode: 结束集数 + review_config: 审核配置(可选,不提供则使用项目配置) + enable_retry: 是否启用自动重试 + max_retries: 最大重试次数 + on_progress: 进度回调函数 + on_episode_complete: 剧集完成回调 + on_error: 错误回调 + + Returns: + 执行结果摘要 + """ + batch_id = str(uuid.uuid4()) + logger.info(f"开始批量执行: {batch_id}, EP{start_episode}-{end_episode}") + + # 创建执行状态 + execution_status = BatchExecutionStatus( + batch_id=batch_id, + project_id=project.id, + start_episode=start_episode, + end_episode=end_episode + ) + execution_status.on_progress = on_progress + execution_status.on_episode_complete = on_episode_complete + execution_status.on_error = on_error + + self.active_batches[batch_id] = execution_status + self._stop_flags[batch_id] = False + + try: + # 遍历执行每一集 + for episode_num in range(start_episode, end_episode + 1): + # 检查是否停止 + if self._stop_flags.get(batch_id, False): + logger.info(f"批量执行被用户停止: {batch_id}") + execution_status.status = "stopped" + break + + execution_status.current_episode = episode_num + + try: + # 执行单集创作 + episode_result = await self._execute_single_episode( + project=project, + episode_number=episode_num, + review_config=review_config, + enable_retry=enable_retry, + max_retries=max_retries, + batch_id=batch_id + ) + + # 记录结果 + execution_status.episode_results[episode_num] = episode_result + execution_status.completed_episodes += 1 + + # 保存剧集 + await episode_repo.create(episode_result["episode"]) + + # 通知进度 + if on_progress: + await self._send_progress_update(execution_status, episode_result) + + # 通知剧集完成 + if on_episode_complete: + await on_episode_complete(episode_result) + + logger.info( + f"EP{episode_num} 完成: " + f"质量分数={episode_result.get('quality_score', 0)}, " + f"通过={episode_result.get('passed', False)}" + ) + + except Exception as e: + logger.error(f"EP{episode_num} 执行失败: {str(e)}") + execution_status.failed_episodes += 1 + + error_result = { + "episode_number": episode_num, + "success": False, + "error": str(e), + "batch_id": batch_id + } + + execution_status.episode_results[episode_num] = error_result + + if on_error: + await on_error(error_result) + + # 更新项目记忆 + await project_repo.update(project.id, { + "memory": project.memory.dict(), + "updatedAt": datetime.now() + }) + + # 完成批量执行 + execution_status.status = "completed" + execution_status.end_time = datetime.now() + + # 生成摘要 + summary = await self._generate_summary(execution_status, project) + + logger.info( + f"批量执行完成: {batch_id}, " + f"成功={summary['successful']}, " + f"失败={summary['failed']}, " + f"耗时={summary['duration_seconds']:.1f}秒" + ) + + return summary + + except Exception as e: + logger.error(f"批量执行异常: {str(e)}") + execution_status.status = "failed" + execution_status.end_time = datetime.now() + + return { + "batch_id": batch_id, + "success": False, + "error": str(e), + "project_id": project.id, + "start_episode": start_episode, + "end_episode": end_episode + } + + finally: + # 清理 + if batch_id in self._stop_flags: + del self._stop_flags[batch_id] + + async def _execute_single_episode( + self, + project: SeriesProject, + episode_number: int, + review_config: Optional[ReviewConfig], + enable_retry: bool, + max_retries: int, + batch_id: str + ) -> Dict[str, Any]: + """ + 执行单集创作(包含质量检查和重试) + + Args: + project: 项目 + episode_number: 集数 + review_config: 审核配置 + enable_retry: 是否启用重试 + max_retries: 最大重试次数 + batch_id: 批次ID + + Returns: + 执行结果 + """ + retry_count = 0 + last_result = None + feedback_history: List[str] = [] + + while retry_count <= max_retries: + try: + # 1. 执行创作 + logger.info(f"创作 EP{episode_number} (尝试 {retry_count + 1}/{max_retries + 1})") + + # 如果有反馈历史,添加到上下文 + if feedback_history: + # 使用反馈作为额外上下文(在真实实现中会传递给 Agent) + logger.info(f"使用 {len(feedback_history)} 条反馈进行重试") + + episode = await self.series_agent.execute_episode( + project=project, + episode_number=episode_number, + title=f"第{episode_number}集" + ) + + # 2. 更新记忆系统 + if episode.content: + await self.memory_manager.update_memory_from_episode(project, episode) + + # 3. 执行质量检查 + review_passed = True + review_result = None + quality_score = episode.qualityScore or 0 + + if review_config: + # 从项目获取审核配置 + from app.models.review import ReviewConfig as RC + config = review_config or RC() + + review_result = await self.review_manager.review_episode( + project=project, + episode=episode, + config=config + ) + + review_passed = review_result.passed + quality_score = review_result.overall_score + episode.qualityScore = quality_score + episode.issues = [ + EpisodeIssue( + type=issue.type.value, + description=issue.description, + severity=issue.severity.value, + suggestion=issue.suggestion + ) + for issue in review_result.issues + ] + + # 4. 判断是否需要重试 + if review_passed or not enable_retry or retry_count >= max_retries: + # 成功或达到最大重试次数 + episode.status = "completed" if review_passed else "needs-review" + episode.completedAt = datetime.now() + episode.retryCount = retry_count + + return { + "episode_number": episode_number, + "success": review_passed or retry_count >= max_retries, + "episode": episode.dict(), + "quality_score": quality_score, + "passed": review_passed, + "retry_count": retry_count, + "review_result": review_result.dict() if review_result else None, + "batch_id": batch_id + } + + # 5. 准备重试 + logger.warning( + f"EP{episode_number} 未通过质量检查 (分数: {quality_score:.1f}), " + f"准备重试..." + ) + + # 构建反馈 + feedback = await self._build_feedback_from_review(review_result) + feedback_history.append(feedback) + + retry_count += 1 + + except Exception as e: + logger.error(f"EP{episode_number} 创作失败 (尝试 {retry_count + 1}): {str(e)}") + + if retry_count >= max_retries: + # 达到最大重试次数,返回失败结果 + return { + "episode_number": episode_number, + "success": False, + "error": str(e), + "retry_count": retry_count, + "batch_id": batch_id, + "episode": None + } + + retry_count += 1 + + # 不应该到达这里 + return { + "episode_number": episode_number, + "success": False, + "error": "未知错误", + "batch_id": batch_id + } + + async def _build_feedback_from_review(self, review_result: ReviewResult) -> str: + """ + 从审核结果构建反馈信息 + + Args: + review_result: 审核结果 + + Returns: + 反馈文本 + """ + feedback_parts = [] + + feedback_parts.append(f"质量分数: {review_result.overall_score:.1f}/100") + + if review_result.issues: + feedback_parts.append(f"\n发现 {len(review_result.issues)} 个问题:") + + # 按严重程度分组 + high_issues = [i for i in review_result.issues if i.severity.value == "high"] + medium_issues = [i for i in review_result.issues if i.severity.value == "medium"] + low_issues = [i for i in review_result.issues if i.severity.value == "low"] + + if high_issues: + feedback_parts.append("\n【高严重度问题】") + for issue in high_issues[:3]: # 限制数量 + feedback_parts.append(f"- {issue.description}") + if issue.suggestion: + feedback_parts.append(f" 建议: {issue.suggestion}") + + if medium_issues: + feedback_parts.append("\n【中严重度问题】") + for issue in medium_issues[:3]: + feedback_parts.append(f"- {issue.description}") + + return "\n".join(feedback_parts) + + async def _send_progress_update( + self, + execution_status: BatchExecutionStatus, + episode_result: Dict[str, Any] + ): + """ + 发送进度更新 + + Args: + execution_status: 执行状态 + episode_result: 剧集结果 + """ + progress = { + "batch_id": execution_status.batch_id, + "project_id": execution_status.project_id, + "current_episode": execution_status.current_episode, + "total_episodes": execution_status.total_episodes, + "completed_episodes": execution_status.completed_episodes, + "failed_episodes": execution_status.failed_episodes, + "progress_percentage": ( + execution_status.completed_episodes / execution_status.total_episodes * 100 + ), + "current_episode_result": episode_result, + "timestamp": datetime.now().isoformat() + } + + if execution_status.on_progress: + await execution_status.on_progress(progress) + + async def _generate_summary( + self, + execution_status: BatchExecutionStatus, + project: SeriesProject + ) -> Dict[str, Any]: + """ + 生成批量执行摘要 + + Args: + execution_status: 执行状态 + project: 项目 + + Returns: + 执行摘要 + """ + # 计算统计信息 + successful = sum( + 1 for r in execution_status.episode_results.values() + if r.get("success", False) + ) + failed = execution_status.failed_episodes + + # 计算质量分数统计 + quality_scores = [ + r.get("quality_score", 0) + for r in execution_status.episode_results.values() + if r.get("quality_score") is not None + ] + + avg_quality = sum(quality_scores) / len(quality_scores) if quality_scores else 0 + min_quality = min(quality_scores) if quality_scores else 0 + max_quality = max(quality_scores) if quality_scores else 0 + + # 计算耗时 + duration = ( + execution_status.end_time - execution_status.start_time + ).total_seconds() if execution_status.end_time else 0 + + # 构建摘要 + summary = { + "batch_id": execution_status.batch_id, + "project_id": execution_status.project_id, + "project_name": project.name, + "status": execution_status.status, + "start_episode": execution_status.start_episode, + "end_episode": execution_status.end_episode, + "total_episodes": execution_status.total_episodes, + "successful": successful, + "failed": failed, + "success_rate": successful / execution_status.total_episodes if execution_status.total_episodes > 0 else 0, + "quality_stats": { + "average": avg_quality, + "min": min_quality, + "max": max_quality + }, + "duration_seconds": duration, + "start_time": execution_status.start_time.isoformat(), + "end_time": execution_status.end_time.isoformat() if execution_status.end_time else None, + "episode_results": execution_status.episode_results + } + + return summary + + async def stop_batch(self, batch_id: str) -> bool: + """ + 停止批量执行 + + Args: + batch_id: 批次ID + + Returns: + 是否成功停止 + """ + if batch_id in self.active_batches: + self._stop_flags[batch_id] = True + logger.info(f"已发送停止信号: {batch_id}") + return True + return False + + def get_batch_status(self, batch_id: str) -> Optional[Dict[str, Any]]: + """ + 获取批量执行状态 + + Args: + batch_id: 批次ID + + Returns: + 执行状态信息 + """ + execution_status = self.active_batches.get(batch_id) + if not execution_status: + return None + + return { + "batch_id": execution_status.batch_id, + "project_id": execution_status.project_id, + "status": execution_status.status, + "current_episode": execution_status.current_episode, + "total_episodes": execution_status.total_episodes, + "completed_episodes": execution_status.completed_episodes, + "failed_episodes": execution_status.failed_episodes, + "progress_percentage": ( + execution_status.completed_episodes / execution_status.total_episodes * 100 + if execution_status.total_episodes > 0 else 0 + ), + "start_time": execution_status.start_time.isoformat(), + "end_time": execution_status.end_time.isoformat() if execution_status.end_time else None + } + + +# 全局单例 +_batch_executor: Optional[BatchExecutor] = None + + +def get_batch_executor() -> BatchExecutor: + """获取批量执行器单例""" + global _batch_executor + if _batch_executor is None: + _batch_executor = BatchExecutor() + return _batch_executor diff --git a/backend/app/core/execution/retry_manager.py b/backend/app/core/execution/retry_manager.py new file mode 100644 index 0000000..f4ff471 --- /dev/null +++ b/backend/app/core/execution/retry_manager.py @@ -0,0 +1,403 @@ +""" +Retry Manager + +自动重试管理器 - 负责判断是否需要重试以及执行重试逻辑 +""" +from typing import List, Dict, Any, Optional +from datetime import datetime +import uuid + +from app.models.project import SeriesProject, Episode, EpisodeIssue +from app.models.review import ReviewResult, ReviewIssue, SeverityLevel, DimensionType +from app.core.agents.series_creation_agent import get_series_agent +from app.core.memory.memory_manager import get_memory_manager +from app.utils.logger import get_logger + +logger = get_logger(__name__) + + +class RetryConfig: + """重试配置""" + + def __init__( + self, + max_retries: int = 2, + retry_on_failure: bool = True, + retry_on_low_quality: bool = True, + quality_threshold: float = 75.0, + retry_on_high_severity: bool = True, + max_high_severity: int = 0 + ): + self.max_retries = max_retries + self.retry_on_failure = retry_on_failure + self.retry_on_low_quality = retry_on_low_quality + self.quality_threshold = quality_threshold + self.retry_on_high_severity = retry_on_high_severity + self.max_high_severity = max_high_severity + + +class RetryManager: + """ + 自动重试管理器 + + 核心功能: + 1. 判断剧集是否应该重试 + 2. 执行剧集重试 + 3. 从审核结果构建反馈 + 4. 追踪重试历史 + """ + + def __init__(self): + """初始化重试管理器""" + self.series_agent = get_series_agent() + self.memory_manager = get_memory_manager() + + # 重试历史追踪 + self.retry_history: Dict[str, List[Dict[str, Any]]] = {} + + logger.info("自动重试管理器初始化完成") + + def should_retry( + self, + episode: Episode, + review_result: Optional[ReviewResult] = None, + config: Optional[RetryConfig] = None, + current_retry_count: int = 0 + ) -> tuple[bool, str]: + """ + 判断剧集是否应该重试 + + Args: + episode: 剧集 + review_result: 审核结果(可选) + config: 重试配置(可选) + current_retry_count: 当前已重试次数 + + Returns: + (是否重试, 原因) + """ + config = config or RetryConfig() + + # 检查重试次数限制 + if current_retry_count >= config.max_retries: + return False, f"已达到最大重试次数 ({config.max_retries})" + + # 1. 检查执行失败 + if config.retry_on_failure and episode.status == "needs-review": + # 检查是否有执行错误 + execution_errors = [ + issue for issue in episode.issues + if issue.type == "execution_error" + ] + if execution_errors: + return True, "执行失败,需要重试" + + # 2. 检查质量分数 + if review_result and config.retry_on_low_quality: + if review_result.overall_score < config.quality_threshold: + return True, ( + f"质量分数 ({review_result.overall_score:.1f}) " + f"低于阈值 ({config.quality_threshold})" + ) + + # 3. 检查高严重度问题 + if review_result and config.retry_on_high_severity: + high_count = review_result.high_severity_count + if high_count > config.max_high_severity: + return True, ( + f"存在 {high_count} 个高严重度问题 " + f"(阈值: {config.max_high_severity})" + ) + + # 4. 检查特定维度未通过 + if review_result: + failed_dimensions = [ + ds.dimension.value for ds in review_result.dimension_scores + if not ds.passed + ] + if failed_dimensions: + return True, f"以下维度未通过: {', '.join(failed_dimensions)}" + + # 不需要重试 + return False, "质量检查通过或不符合重试条件" + + async def retry_episode( + self, + project: SeriesProject, + episode: Episode, + review_result: Optional[ReviewResult] = None, + feedback: Optional[str] = None, + config: Optional[RetryConfig] = None + ) -> Episode: + """ + 重试剧集创作 + + Args: + project: 项目 + episode: 原剧集 + review_result: 审核结果 + feedback: 反馈信息(可选,不提供则自动生成) + config: 重试配置 + + Returns: + 新创作的剧集 + """ + retry_count = episode.retryCount + 1 + logger.info(f"重试 EP{episode.number} (第 {retry_count} 次重试)") + + # 生成反馈 + if not feedback and review_result: + feedback = await self._build_feedback(project, episode, review_result) + + # 记录重试历史 + retry_record = { + "timestamp": datetime.now().isoformat(), + "episode_number": episode.number, + "retry_count": retry_count, + "previous_score": episode.qualityScore, + "feedback": feedback, + "review_issues": [ + { + "type": issue.type.value, + "description": issue.description, + "severity": issue.severity.value, + "suggestion": issue.suggestion + } + for issue in (review_result.issues if review_result and review_result.issues else []) + ] + } + + if episode.id not in self.retry_history: + self.retry_history[episode.id] = [] + self.retry_history[episode.id].append(retry_record) + + try: + # 执行重试创作 + # 在真实实现中,会将反馈作为额外上下文传递给 Agent + new_episode = await self.series_agent.execute_episode( + project=project, + episode_number=episode.number, + title=episode.title or f"第{episode.number}集" + ) + + # 更新重试计数 + new_episode.retryCount = retry_count + + # 如果有内容,更新记忆 + if new_episode.content: + await self.memory_manager.update_memory_from_episode(project, new_episode) + + logger.info( + f"EP{episode.number} 重试完成: " + f"新分数={new_episode.qualityScore}, " + f"重试次数={retry_count}" + ) + + return new_episode + + except Exception as e: + logger.error(f"重试 EP{episode.number} 失败: {str(e)}") + + # 返回失败状态 + episode.status = "needs-review" + episode.issues.append( + EpisodeIssue( + type="retry_error", + description=f"重试失败: {str(e)}", + severity="high" + ) + ) + episode.retryCount = retry_count + + return episode + + async def _build_feedback( + self, + project: SeriesProject, + episode: Episode, + review_result: ReviewResult + ) -> str: + """ + 构建重试反馈 + + Args: + project: 项目 + episode: 剧集 + review_result: 审核结果 + + Returns: + 反馈文本 + """ + feedback_parts = [] + + # 1. 总体评价 + feedback_parts.append(f"【质量评估】") + feedback_parts.append(f"当前质量分数: {review_result.overall_score:.1f}/100") + feedback_parts.append(f"审核状态: {'通过' if review_result.passed else '未通过'}") + feedback_parts.append("") + + # 2. 维度分数 + if review_result.dimension_scores: + feedback_parts.append("【维度评分】") + for dim_score in review_result.dimension_scores: + status = "✓ 通过" if dim_score.passed else "✗ 未通过" + feedback_parts.append( + f"- {dim_score.dimension.value}: {dim_score.score:.1f} {status}" + ) + feedback_parts.append("") + + # 3. 问题分析(按严重程度) + if review_result.issues: + feedback_parts.append("【问题分析】") + + # 高严重度 + high_issues = [i for i in review_result.issues if i.severity == SeverityLevel.high] + if high_issues: + feedback_parts.append(f"\n高严重度问题 ({len(high_issues)} 个):") + for issue in high_issues[:5]: # 限制数量 + feedback_parts.append(f" • {issue.description}") + if issue.suggestion: + feedback_parts.append(f" 建议: {issue.suggestion}") + + # 中严重度 + medium_issues = [i for i in review_result.issues if i.severity == SeverityLevel.medium] + if medium_issues: + feedback_parts.append(f"\n中严重度问题 ({len(medium_issues)} 个):") + for issue in medium_issues[:5]: + feedback_parts.append(f" • {issue.description}") + + # 低严重度 + low_issues = [i for i in review_result.issues if i.severity == SeverityLevel.low] + if low_issues: + feedback_parts.append(f"\n低严重度问题 ({len(low_issues)} 个):") + for issue in low_issues[:3]: + feedback_parts.append(f" • {issue.description}") + + feedback_parts.append("") + + # 4. 改进建议(按维度) + feedback_parts.append("【改进建议】") + + dimension_suggestions = self._generate_dimension_suggestions(review_result) + for dimension, suggestions in dimension_suggestions.items(): + if suggestions: + feedback_parts.append(f"\n{dimension}:") + for suggestion in suggestions: + feedback_parts.append(f" - {suggestion}") + + # 5. 上下文信息 + feedback_parts.append("\n【上下文信息】") + feedback_parts.append(f"项目: {project.name}") + feedback_parts.append(f"集数: 第 {episode.number} 集") + feedback_parts.append(f"已重试次数: {episode.retryCount}") + + # 6. 全局设定提醒 + if project.globalContext: + feedback_parts.append("\n【全局设定提醒】") + if project.globalContext.worldSetting: + feedback_parts.append(f"世界观: {project.globalContext.worldSetting[:100]}...") + + if project.globalContext.characterProfiles: + characters = list(project.globalContext.characterProfiles.keys()) + feedback_parts.append(f"主要角色: {', '.join(characters)}") + + if project.globalContext.styleGuide: + feedback_parts.append(f"风格指南: {project.globalContext.styleGuide[:100]}...") + + return "\n".join(feedback_parts) + + def _generate_dimension_suggestions( + self, + review_result: ReviewResult + ) -> Dict[str, List[str]]: + """ + 为各维度生成改进建议 + + Args: + review_result: 审核结果 + + Returns: + 维度到建议列表的映射 + """ + suggestions = {} + + for dim_score in review_result.dimension_scores: + dimension_name = dim_score.dimension.value + dimension_suggestions = [] + + # 根据问题类型生成建议 + for issue in dim_score.issues: + if issue.suggestion: + dimension_suggestions.append(issue.suggestion) + + # 如果没有具体建议,添加通用建议 + if not dimension_suggestions and not dim_score.passed: + dimension_suggestions.append( + self._get_generic_suggestion(dim_score.dimension) + ) + + if dimension_suggestions: + suggestions[dimension_name] = dimension_suggestions[:3] # 限制数量 + + return suggestions + + def _get_generic_suggestion(self, dimension: DimensionType) -> str: + """ + 获取维度的通用改进建议 + + Args: + dimension: 维度类型 + + Returns: + 通用建议文本 + """ + generic_suggestions = { + DimensionType.consistency: "确保人物设定、场景设定、剧情逻辑与之前剧集保持一致", + DimensionType.quality: "提升文字质量,改善表达流畅度和准确性", + DimensionType.pacing: "调整剧情节奏,确保叙事速度适中,不急不缓", + DimensionType.dialogue: "优化对话质量,确保人物说话风格符合其性格设定", + DimensionType.character: "深化人物塑造,增强角色性格的立体性和可信度", + DimensionType.plot: "完善剧情逻辑,修复剧情漏洞,增强情节合理性" + } + + return generic_suggestions.get( + dimension, + "请根据具体问题进行改进" + ) + + def get_retry_history(self, episode_id: str) -> List[Dict[str, Any]]: + """ + 获取剧集的重试历史 + + Args: + episode_id: 剧集ID + + Returns: + 重试历史记录 + """ + return self.retry_history.get(episode_id, []) + + def clear_retry_history(self, episode_id: Optional[str] = None): + """ + 清除重试历史 + + Args: + episode_id: 剧集ID(可选,不提供则清除所有) + """ + if episode_id: + if episode_id in self.retry_history: + del self.retry_history[episode_id] + else: + self.retry_history.clear() + + +# 全局单例 +_retry_manager: Optional[RetryManager] = None + + +def get_retry_manager() -> RetryManager: + """获取重试管理器单例""" + global _retry_manager + if _retry_manager is None: + _retry_manager = RetryManager() + return _retry_manager diff --git a/backend/app/core/llm/__init__.py b/backend/app/core/llm/__init__.py new file mode 100644 index 0000000..ecab5da --- /dev/null +++ b/backend/app/core/llm/__init__.py @@ -0,0 +1,6 @@ +""" +LLM 集成模块 +""" +from app.core.llm.glm_client import GLMClient, get_glm_client, glm_client + +__all__ = ["GLMClient", "get_glm_client", "glm_client"] diff --git a/backend/app/core/llm/glm_client.py b/backend/app/core/llm/glm_client.py new file mode 100644 index 0000000..0303417 --- /dev/null +++ b/backend/app/core/llm/glm_client.py @@ -0,0 +1,299 @@ +""" +智谱 GLM-4.7 客户端封装 + +核心功能: +1. 将 Skill 的行为指导融入提示词 +2. 提供同步和异步调用接口 +3. 支持流式和非流式响应 +4. 错误处理和重试机制 +""" +from typing import List, Dict, Any, Optional, AsyncGenerator, Union +from zai import ZhipuAiClient +from app.config import settings +from app.utils.logger import get_logger + +logger = get_logger(__name__) + + +class GLMClient: + """ + 智谱 GLM-4.7 客户端 + + 核心功能:将 Skill 的行为指导融入提示词,指导 LLM 创作 + """ + + def __init__( + self, + api_key: Optional[str] = None, + model: Optional[str] = None + ): + """ + 初始化 GLM 客户端 + + Args: + api_key: 智谱 AI API Key + model: 模型名称 (glm-4-flash, glm-4-plus, glm-4-0520) + """ + self.api_key = api_key or settings.zai_api_key + self.model = model or settings.zai_model + self.client = ZhipuAiClient(api_key=self.api_key) + + logger.info(f"GLM 客户端初始化完成,模型: {self.model}") + + async def chat( + self, + messages: List[Dict[str, str]], + temperature: float = 0.7, + top_p: float = 0.9, + max_tokens: Optional[int] = None, + stream: bool = False, + **kwargs + ) -> Dict[str, Any]: + """ + 发送聊天请求 + + Args: + messages: 消息列表 [{"role": "system", "content": "..."}, ...] + temperature: 温度参数 (0-1),越高越随机 + top_p: 采样参数 + max_tokens: 最大 token 数 + stream: 是否流式输出 + **kwargs: 其他参数 + + Returns: + 响应结果字典 + """ + try: + logger.debug(f"发送 GLM 请求,模型: {self.model}, 温度: {temperature}") + + response = self.client.chat.completions.create( + model=self.model, + messages=messages, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + **kwargs + ) + + if stream: + return {"stream": self._process_stream_response(response)} + else: + return self._process_response(response) + + except Exception as e: + logger.error(f"GLM API 调用失败: {str(e)}") + raise + + async def chat_with_skill( + self, + skill_behavior: str, + user_input: str, + context: Optional[Dict[str, Any]] = None, + temperature: float = 0.7, + system_prompt_addition: Optional[str] = None, + skill_id: Optional[str] = None, + references: Optional[Dict[str, str]] = None + ) -> str: + """ + 使用 Skill 行为指导进行聊天 + + 这是核心功能:将 Skill 的行为指导融入提示词 + + Args: + skill_behavior: Skill 的行为指导内容 + user_input: 用户输入 + context: 上下文信息 (可选) + temperature: 温度参数 + system_prompt_addition: 额外的系统提示词 (可选) + skill_id: Skill ID(用于加载参考文件) + references: 参考文件内容字典 {filename: content}(可选) + + Returns: + LLM 响应文本 + """ + # 构建包含 Skill 行为指导和参考文件的系统提示词 + system_prompt = self._build_skill_prompt( + skill_behavior, + context or {}, + system_prompt_addition, + references + ) + + messages = [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_input} + ] + + try: + response = await self.chat(messages, temperature=temperature) + content = response["choices"][0]["message"]["content"] + + logger.info(f"GLM 响应成功,token 使用: {response.get('usage', {})}") + + return content + + except Exception as e: + logger.error(f"chat_with_skill 调用失败: {str(e)}") + raise + + def _build_skill_prompt( + self, + skill_behavior: str, + context: Dict[str, Any], + addition: Optional[str] = None, + references: Optional[Dict[str, str]] = None + ) -> str: + """ + 构建包含 Skill 行为指导和参考文件的提示词 + + 这是平台的核心机制:将 Skill 的行为指导和参考文件融入提示词 + + Args: + skill_behavior: Skill 的行为指导内容 + context: 上下文信息 + addition: 额外的系统提示词 + references: 参考文件内容字典 + + Returns: + 完整的系统提示词 + """ + parts = [ + "你是剧集创作专家。", + "", + "## Skill 行为指导", + "", + "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━", + skill_behavior, + "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━", + ] + + # 添加参考文件内容 + if references and references: + parts.extend([ + "", + "## 参考资料", + "", + "以下是 Skill 的参考文件内容,请在执行任务时参考这些资料:", + "", + ]) + + for filename, content in references.items(): + # 判断文件类型并格式化 + if filename.endswith('.md') or filename.endswith('.txt'): + parts.extend([ + f"### 📄 {filename}", + "```", + content, + "```", + "" + ]) + elif filename.endswith('.json') or filename.endswith('.yaml') or filename.endswith('.yml'): + parts.extend([ + f"### 📋 {filename}", + "```json" if filename.endswith('.json') else "```yaml", + content, + "```", + "" + ]) + else: + # 其他文件类型,直接显示内容 + parts.extend([ + f"### 📎 {filename}", + content, + "" + ]) + + parts.extend([ + "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━", + ]) + + # 添加额外的系统提示词 + if addition: + parts.extend(["", addition]) + + # 添加上下文信息 + if context: + parts.extend([ + "", + "## 当前上下文", + "", + self._format_context(context) + ]) + + return "\n".join(parts) + + def _format_context(self, context: Dict[str, Any]) -> str: + """格式化上下文信息""" + lines = [] + for key, value in context.items(): + if isinstance(value, dict): + lines.append(f"- {key}:") + for sub_key, sub_value in value.items(): + lines.append(f" - {sub_key}: {sub_value}") + elif isinstance(value, list): + lines.append(f"- {key}:") + for item in value: + lines.append(f" - {item}") + else: + lines.append(f"- {key}: {value}") + return "\n".join(lines) + + def _process_response(self, response) -> Dict[str, Any]: + """处理标准响应""" + return { + "id": response.id, + "choices": [ + { + "message": { + "role": choice.message.role, + "content": choice.message.content + }, + "finish_reason": choice.finish_reason, + "index": choice.index + } + for choice in response.choices + ], + "usage": { + "prompt_tokens": response.usage.prompt_tokens, + "completion_tokens": response.usage.completion_tokens, + "total_tokens": response.usage.total_tokens + }, + "model": response.model, + "created": response.created + } + + async def _process_stream_response(self, response) -> AsyncGenerator[str, None]: + """处理流式响应""" + async for chunk in response: + if chunk.choices[0].delta.content: + yield chunk.choices[0].delta.content + + async def test_connection(self) -> bool: + """测试 API 连接""" + try: + response = await self.chat( + messages=[{"role": "user", "content": "你好"}], + max_tokens=10 + ) + logger.info("GLM API 连接测试成功") + return True + except Exception as e: + logger.error(f"GLM API 连接测试失败: {str(e)}") + return False + + +# 全局单例 +_glm_client: Optional[GLMClient] = None + + +def get_glm_client() -> GLMClient: + """获取 GLM 客户端单例""" + global _glm_client + if _glm_client is None: + _glm_client = GLMClient() + return _glm_client + + +# 便捷导出 +glm_client = get_glm_client() diff --git a/backend/app/core/memory/__init__.py b/backend/app/core/memory/__init__.py new file mode 100644 index 0000000..fb34b43 --- /dev/null +++ b/backend/app/core/memory/__init__.py @@ -0,0 +1,8 @@ +""" +记忆系统模块 + +Memory System - 提供记忆管理功能 +""" +from app.core.memory.memory_manager import MemoryManager, get_memory_manager + +__all__ = ['MemoryManager', 'get_memory_manager'] diff --git a/backend/app/core/memory/memory_manager.py b/backend/app/core/memory/memory_manager.py new file mode 100644 index 0000000..92589b0 --- /dev/null +++ b/backend/app/core/memory/memory_manager.py @@ -0,0 +1,932 @@ +""" +记忆系统管理器 + +Memory Manager - 负责管理和维护项目的记忆系统,确保多剧集一致性 +""" +import json +import uuid +from typing import List, Dict, Any, Optional +from datetime import datetime + +from app.models.memory import ( + TimelineEvent, + PendingThread, + CharacterStateChange, + ForeshadowingEvent, + ConsistencyIssue, + ResolutionSuggestion, + EnhancedMemory, + ImportanceLevel, + ThreadStatus, + MemoryExtractionResult +) +from app.models.project import SeriesProject, Episode, Memory +from app.core.llm.glm_client import get_glm_client +from app.utils.logger import get_logger + +logger = get_logger(__name__) + + +class MemoryManager: + """ + 记忆系统管理器 + + 核心功能: + 1. 从剧集中提取关键事件 + 2. 检测伏笔和待收线 + 3. 追踪角色状态变化 + 4. 检查一致性 + 5. 提供收线建议 + """ + + def __init__(self): + """初始化记忆管理器""" + self.glm_client = get_glm_client() + logger.info("记忆管理器初始化完成") + + async def extract_events_from_episode( + self, + episode_content: str, + episode_number: int, + characters: List[str] + ) -> List[TimelineEvent]: + """ + 从完成的剧集中提取关键事件 + + 使用 LLM 分析剧情,识别并提取关键事件 + + Args: + episode_content: 剧集内容 + episode_number: 集数 + characters: 角色列表 + + Returns: + 提取的事件列表 + """ + try: + logger.info(f"开始从 EP{episode_number} 提取事件") + + # 构建提取提示词 + prompt = self._build_event_extraction_prompt( + episode_content, + episode_number, + characters + ) + + # 调用 LLM + response = await self.glm_client.chat( + messages=[ + { + "role": "system", + "content": "你是专业的剧情分析专家,擅长从剧集中提取关键事件。" + }, + {"role": "user", "content": prompt} + ], + temperature=0.3 + ) + + content = response["choices"][0]["message"]["content"] + + # 解析响应 + events = self._parse_events_response(content, episode_number) + + logger.info(f"成功从 EP{episode_number} 提取 {len(events)} 个事件") + return events + + except Exception as e: + logger.error(f"提取事件失败: {str(e)}") + return [] + + async def detect_foreshadowing( + self, + episode_content: str, + episode_number: int, + existing_threads: List[PendingThread] + ) -> tuple[List[ForeshadowingEvent], List[PendingThread]]: + """ + 检测剧集中的伏笔和新待收线 + + Args: + episode_content: 剧集内容 + episode_number: 集数 + existing_threads: 现有待收线列表 + + Returns: + (新检测到的伏笔, 新识别的待收线) + """ + try: + logger.info(f"开始检测 EP{episode_number} 的伏笔") + + # 构建检测提示词 + prompt = self._build_foreshadowing_detection_prompt( + episode_content, + episode_number, + existing_threads + ) + + # 调用 LLM + response = await self.glm_client.chat( + messages=[ + { + "role": "system", + "content": "你是专业的剧情分析专家,擅长识别伏笔和待收线。" + }, + {"role": "user", "content": prompt} + ], + temperature=0.4 + ) + + content = response["choices"][0]["message"]["content"] + + # 解析响应 + foreshadowing, new_threads = self._parse_foreshadowing_response( + content, + episode_number + ) + + logger.info( + f"检测到 {len(foreshadowing)} 个伏笔," + f"{len(new_threads)} 个新待收线" + ) + return foreshadowing, new_threads + + except Exception as e: + logger.error(f"检测伏笔失败: {str(e)}") + return [], [] + + async def update_character_states( + self, + episode_content: str, + episode_number: int, + characters: List[str], + current_states: Dict[str, List[CharacterStateChange]] + ) -> Dict[str, CharacterStateChange]: + """ + 追踪角色状态变化 + + Args: + episode_content: 剧集内容 + episode_number: 集数 + characters: 角色列表 + current_states: 当前角色状态 + + Returns: + 角色状态变化字典 + """ + try: + logger.info(f"开始更新 EP{episode_number} 的角色状态") + + # 构建状态分析提示词 + prompt = self._build_character_state_prompt( + episode_content, + episode_number, + characters, + current_states + ) + + # 调用 LLM + response = await self.glm_client.chat( + messages=[ + { + "role": "system", + "content": "你是专业的角色分析专家,擅长追踪角色发展和状态变化。" + }, + {"role": "user", "content": prompt} + ], + temperature=0.3 + ) + + content = response["choices"][0]["message"]["content"] + + # 解析响应 + state_changes = self._parse_character_states_response( + content, + episode_number + ) + + logger.info(f"更新了 {len(state_changes)} 个角色的状态") + return state_changes + + except Exception as e: + logger.error(f"更新角色状态失败: {str(e)}") + return {} + + async def check_consistency( + self, + episode_content: str, + episode_number: int, + memory: EnhancedMemory + ) -> List[ConsistencyIssue]: + """ + 检查剧集与记忆的一致性 + + Args: + episode_content: 剧集内容 + episode_number: 集数 + memory: 项目记忆 + + Returns: + 一致性问题列表 + """ + try: + logger.info(f"开始检查 EP{episode_number} 的一致性") + + # 构建一致性检查提示词 + prompt = self._build_consistency_check_prompt( + episode_content, + episode_number, + memory + ) + + # 调用 LLM + response = await self.glm_client.chat( + messages=[ + { + "role": "system", + "content": "你是专业的剧情一致性检查专家,擅长发现剧情矛盾。" + }, + {"role": "user", "content": prompt} + ], + temperature=0.2 + ) + + content = response["choices"][0]["message"]["content"] + + # 解析响应 + issues = self._parse_consistency_response(content) + + logger.info(f"发现 {len(issues)} 个一致性问题") + return issues + + except Exception as e: + logger.error(f"一致性检查失败: {str(e)}") + return [] + + async def suggest_thread_resolution( + self, + thread: PendingThread, + current_episode: int, + memory: EnhancedMemory + ) -> ResolutionSuggestion: + """ + 为待收线问题提供收线建议 + + Args: + thread: 待收线问题 + current_episode: 当前集数 + memory: 项目记忆 + + Returns: + 收线建议 + """ + try: + logger.info(f"为待收线 {thread.id} 生成收线建议") + + # 构建收线建议提示词 + prompt = self._build_resolution_prompt( + thread, + current_episode, + memory + ) + + # 调用 LLM + response = await self.glm_client.chat( + messages=[ + { + "role": "system", + "content": "你是专业的剧情创作专家,擅长设计精彩的收线。" + }, + {"role": "user", "content": prompt} + ], + temperature=0.7 + ) + + content = response["choices"][0]["message"]["content"] + + # 解析响应 + suggestion = self._parse_resolution_response(content, thread.id) + + logger.info(f"成功生成待收线 {thread.id} 的建议") + return suggestion + + except Exception as e: + logger.error(f"生成收线建议失败: {str(e)}") + return ResolutionSuggestion( + thread_id=thread.id, + suggested_episode=current_episode + 1, + approach="无法生成建议", + key_elements=[], + character_involvement={}, + potential_twists=[], + estimated_impact="" + ) + + async def update_memory_from_episode( + self, + project: SeriesProject, + episode: Episode + ) -> MemoryExtractionResult: + """ + 从剧集更新项目记忆(综合操作) + + Args: + project: 项目 + episode: 剧集 + + Returns: + 更新结果 + """ + try: + logger.info(f"开始从 EP{episode.number} 更新项目 {project.id} 的记忆") + + if not episode.content: + return MemoryExtractionResult( + success=False, + message="剧集内容为空" + ) + + # 获取角色列表 + characters = list(project.globalContext.characterProfiles.keys()) + + # 转换现有记忆为 EnhancedMemory + enhanced_memory = self._convert_to_enhanced_memory(project.memory) + + # 1. 提取事件 + events = await self.extract_events_from_episode( + episode.content, + episode.number, + characters + ) + + # 2. 检测伏笔和待收线 + existing_threads = enhanced_memory.pendingThreads + foreshadowing, new_threads = await self.detect_foreshadowing( + episode.content, + episode.number, + existing_threads + ) + + # 3. 更新角色状态 + state_changes = await self.update_character_states( + episode.content, + episode.number, + characters, + enhanced_memory.characterStates + ) + + # 4. 检查一致性 + issues = await self.check_consistency( + episode.content, + episode.number, + enhanced_memory + ) + + # 5. 更新记忆 + enhanced_memory.eventTimeline.extend(events) + enhanced_memory.foreshadowing.extend(foreshadowing) + enhanced_memory.pendingThreads.extend(new_threads) + + # 更新角色状态 + for char_name, change in state_changes.items(): + if char_name not in enhanced_memory.characterStates: + enhanced_memory.characterStates[char_name] = [] + enhanced_memory.characterStates[char_name].append(change) + + # 添加一致性问题 + enhanced_memory.consistencyIssues.extend(issues) + + # 更新元数据 + enhanced_memory.last_updated = datetime.now() + enhanced_memory.last_episode_processed = episode.number + + # 转换回 Memory 并保存 + project.memory = self._convert_to_memory(enhanced_memory) + + logger.info( + f"记忆更新完成: {len(events)} 事件, " + f"{len(foreshadowing)} 伏笔, {len(new_threads)} 待收线, " + f"{len(state_changes)} 角色状态, {len(issues)} 问题" + ) + + return MemoryExtractionResult( + success=True, + events_extracted=len(events), + foreshadowing_detected=len(foreshadowing), + character_states_updated=len(state_changes), + consistency_issues_found=len(issues), + threads_suggested=len(new_threads), + new_threads=new_threads, + message="记忆更新成功" + ) + + except Exception as e: + logger.error(f"更新记忆失败: {str(e)}") + return MemoryExtractionResult( + success=False, + message=f"更新失败: {str(e)}" + ) + + # ============================================ + # 提示词构建方法 + # ============================================ + + def _build_event_extraction_prompt( + self, + content: str, + episode_number: int, + characters: List[str] + ) -> str: + """构建事件提取提示词""" + return f"""请分析以下第 {episode_number} 集的剧情内容,提取关键事件。 + +【角色列表】 +{', '.join(characters) if characters else '无'} + +【剧集内容】 +{content[:5000]} # 限制长度以避免超出 token 限制 + +【任务要求】 +1. 识别剧情中的关键事件(推动剧情发展、改变角色关系、揭示重要信息等) +2. 对每个事件,提供: + - 事件描述(简明扼要) + - 涉及的角色 + - 重要程度(low/medium/high) + - 标签(如:冲突/转折/揭示/情感等) + +请以 JSON 格式返回,格式如下: +{{ + "events": [ + {{ + "event": "事件描述", + "characters_involved": ["角色1", "角色2"], + "importance": "high", + "tags": ["转折", "冲突"] + }} + ] +}} + +只返回 JSON,不要其他内容。""" + + def _build_foreshadowing_detection_prompt( + self, + content: str, + episode_number: int, + existing_threads: List[PendingThread] + ) -> str: + """构建伏笔检测提示词""" + threads_info = "\n".join([ + f"- {t.id}: {t.description} (重要性: {t.importance})" + for t in existing_threads[:10] # 限制数量 + ]) + + return f"""请分析以下剧情,识别伏笔和新的待收线问题。 + +【当前集数】 +第 {episode_number} 集 + +【现有待收线】 +{threads_info if threads_info else '无'} + +【剧集内容】 +{content[:5000]} + +【任务要求】 +1. 识别新的伏笔(后续会呼应的线索) +2. 识别新的待收线问题(需要后续解决) +3. 检查现有待收线是否有进展 + +请以 JSON 格式返回: +{{ + "foreshadowing": [ + {{ + "description": "伏笔描述", + "type": "plot/character/dialogue/object", + "importance": "high/medium/low" + }} + ], + "new_threads": [ + {{ + "description": "待收线描述", + "importance": "high/medium/low", + "characters_involved": ["角色名"], + "reminder_episode": 建议收线集数 + }} + ] +}} + +只返回 JSON,不要其他内容。""" + + def _build_character_state_prompt( + self, + content: str, + episode_number: int, + characters: List[str], + current_states: Dict[str, List[CharacterStateChange]] + ) -> str: + """构建角色状态分析提示词""" + states_info = "" + for char, states in list(current_states.items())[:5]: + latest = states[-1] if states else None + states_info += f"\n{char}: {latest.state if latest else '未知状态'}\n" + + return f"""请分析以下剧情,追踪角色的状态变化。 + +【当前集数】 +第 {episode_number} 集 + +【角色当前状态】 +{states_info if states_info else '无记录'} + +【剧集内容】 +{content[:5000]} + +【任务要求】 +1. 识别哪些角色发生了状态变化(心理、处境、关系等) +2. 描述变化的具体内容 +3. 评估情感影响 + +请以 JSON 格式返回: +{{ + "state_changes": [ + {{ + "character": "角色名", + "state": "新的状态描述", + "change": "变化过程描述", + "emotional_impact": "情感影响(如:痛苦/喜悦/恐惧等)", + "trigger_event": "触发变化的事件" + }} + ] +}} + +只返回 JSON,不要其他内容。""" + + def _build_consistency_check_prompt( + self, + content: str, + episode_number: int, + memory: EnhancedMemory + ) -> str: + """构建一致性检查提示词""" + # 构建关键信息摘要 + timeline_summary = "\n".join([ + f"EP{e.episode}: {e.event}" + for e in memory.eventTimeline[-20:] # 最近20个事件 + ]) + + threads_summary = "\n".join([ + f"- {t.description} (EP{t.introduced_at})" + for t in memory.pendingThreads if not t.resolved + ]) + + return f"""请检查以下剧集与之前剧情的一致性。 + +【当前集数】 +第 {episode_number} 集 + +【之前的关键事件】 +{timeline_summary if timeline_summary else '无'} + +【未收线的待收线】 +{threads_summary if threads_summary else '无'} + +【当前剧集内容】 +{content[:5000]} + +【任务要求】 +检查以下类型的一致性问题: +1. 角色行为不一致(与之前性格或状态不符) +2. 剧情矛盾(与已建立的事实冲突) +3. 设定错误(场景、时间、物品等) +4. 对话不合理(角色语气、知识范围等) + +请以 JSON 格式返回发现的问题: +{{ + "issues": [ + {{ + "type": "character/plot/setting/dialogue", + "severity": "high/medium/low", + "description": "问题描述", + "location": "问题位置(如:第X幕)", + "conflicting_info": {{"之前": "...", "现在": "..."}}, + "suggestion": "修复建议" + }} + ] +}} + +如果没有发现问题,返回 {{"issues": []}}。 +只返回 JSON,不要其他内容。""" + + def _build_resolution_prompt( + self, + thread: PendingThread, + current_episode: int, + memory: EnhancedMemory + ) -> str: + """构建收线建议提示词""" + # 获取相关上下文 + related_events = [ + e for e in memory.eventTimeline + if thread.id in e.related_threads + ] + + context = f""" +【待收线信息】 +描述: {thread.description} +引入集数: EP{thread.introduced_at} +重要程度: {thread.importance} +涉及角色: {', '.join(thread.characters_involved)} + +【相关事件】 +{chr(10).join([f"EP{e.episode}: {e.event}" for e in related_events[-5:]])} + +【当前进度】 +第 {current_episode} 集 +""" + + return f"""基于以上信息,请为这个待收线问题设计一个精彩的收线方案。 + +{context} + +【任务要求】 +1. 建议在哪个集数收线(考虑剧情节奏和重要性) +2. 设计收线方式(直接/反转/渐进等) +3. 列出关键要素 +4. 说明各角色的参与方式 +5. 提供可能的反转点子 +6. 评估对整体剧情的影响 + +请以 JSON 格式返回: +{{ + "suggested_episode": 收线集数, + "approach": "收线方式描述", + "key_elements": ["要素1", "要素2"], + "character_involvement": {{"角色名": "参与方式"}}, + "potential_twists": ["反转点子1", "反转点子2"], + "estimated_impact": "对整体剧情的影响" +}} + +只返回 JSON,不要其他内容。""" + + # ============================================ + # 响应解析方法 + # ============================================ + + def _parse_events_response( + self, + content: str, + episode_number: int + ) -> List[TimelineEvent]: + """解析事件提取响应""" + try: + # 尝试提取 JSON + json_start = content.find('{') + json_end = content.rfind('}') + 1 + + if json_start == -1 or json_end == 0: + logger.warning("无法在响应中找到 JSON") + return [] + + json_str = content[json_start:json_end] + data = json.loads(json_str) + + events = [] + for item in data.get("events", []): + event = TimelineEvent( + episode=episode_number, + event=item.get("event", ""), + characters_involved=item.get("characters_involved", []), + importance=ImportanceLevel(item.get("importance", "medium")), + tags=item.get("tags", []) + ) + events.append(event) + + return events + + except json.JSONDecodeError as e: + logger.error(f"JSON 解析失败: {str(e)}") + return [] + except Exception as e: + logger.error(f"解析事件响应失败: {str(e)}") + return [] + + def _parse_foreshadowing_response( + self, + content: str, + episode_number: int + ) -> tuple[List[ForeshadowingEvent], List[PendingThread]]: + """解析伏笔检测响应""" + try: + json_start = content.find('{') + json_end = content.rfind('}') + 1 + + if json_start == -1 or json_end == 0: + return [], [] + + json_str = content[json_start:json_end] + data = json.loads(json_str) + + # 解析伏笔 + foreshadowing = [] + for item in data.get("foreshadowing", []): + fs = ForeshadowingEvent( + id=str(uuid.uuid4()), + description=item.get("description", ""), + introduced_at=episode_number, + type=item.get("type", "plot"), + importance=ImportanceLevel(item.get("importance", "medium")) + ) + foreshadowing.append(fs) + + # 解析待收线 + threads = [] + for item in data.get("new_threads", []): + thread = PendingThread( + id=str(uuid.uuid4()), + description=item.get("description", ""), + introduced_at=episode_number, + importance=ImportanceLevel(item.get("importance", "medium")), + characters_involved=item.get("characters_involved", []), + reminder_episode=item.get("reminder_episode") + ) + threads.append(thread) + + return foreshadowing, threads + + except Exception as e: + logger.error(f"解析伏笔响应失败: {str(e)}") + return [], [] + + def _parse_character_states_response( + self, + content: str, + episode_number: int + ) -> Dict[str, CharacterStateChange]: + """解析角色状态响应""" + try: + json_start = content.find('{') + json_end = content.rfind('}') + 1 + + if json_start == -1 or json_end == 0: + return {} + + json_str = content[json_start:json_end] + data = json.loads(json_str) + + states = {} + for item in data.get("state_changes", []): + char_name = item.get("character", "") + if char_name: + change = CharacterStateChange( + episode=episode_number, + state=item.get("state", ""), + change=item.get("change", ""), + emotional_impact=item.get("emotional_impact"), + trigger_event=item.get("trigger_event") + ) + states[char_name] = change + + return states + + except Exception as e: + logger.error(f"解析角色状态响应失败: {str(e)}") + return {} + + def _parse_consistency_response( + self, + content: str + ) -> List[ConsistencyIssue]: + """解析一致性检查响应""" + try: + json_start = content.find('{') + json_end = content.rfind('}') + 1 + + if json_start == -1 or json_end == 0: + return [] + + json_str = content[json_start:json_end] + data = json.loads(json_str) + + issues = [] + for item in data.get("issues", []): + issue = ConsistencyIssue( + id=str(uuid.uuid4()), + type=item.get("type", "plot"), + severity=ImportanceLevel(item.get("severity", "medium")), + description=item.get("description", ""), + location=item.get("location"), + conflicting_info=item.get("conflicting_info", {}), + suggestion=item.get("suggestion", "") + ) + issues.append(issue) + + return issues + + except Exception as e: + logger.error(f"解析一致性响应失败: {str(e)}") + return [] + + def _parse_resolution_response( + self, + content: str, + thread_id: str + ) -> ResolutionSuggestion: + """解析收线建议响应""" + try: + json_start = content.find('{') + json_end = content.rfind('}') + 1 + + if json_start == -1 or json_end == 0: + # 返回默认建议 + return ResolutionSuggestion( + thread_id=thread_id, + suggested_episode=0, + approach="无法解析建议", + key_elements=[], + character_involvement={}, + potential_twists=[], + estimated_impact="" + ) + + json_str = content[json_start:json_end] + data = json.loads(json_str) + + return ResolutionSuggestion( + thread_id=thread_id, + suggested_episode=data.get("suggested_episode", 0), + approach=data.get("approach", ""), + key_elements=data.get("key_elements", []), + character_involvement=data.get("character_involvement", {}), + potential_twists=data.get("potential_twists", []), + estimated_impact=data.get("estimated_impact", "") + ) + + except Exception as e: + logger.error(f"解析收线建议响应失败: {str(e)}") + return ResolutionSuggestion( + thread_id=thread_id, + suggested_episode=0, + approach="解析失败", + key_elements=[], + character_involvement={}, + potential_twists=[], + estimated_impact="" + ) + + # ============================================ + # 转换方法 + # ============================================ + + def _convert_to_enhanced_memory(self, memory: Memory) -> EnhancedMemory: + """将基础 Memory 转换为 EnhancedMemory""" + return EnhancedMemory( + eventTimeline=[ + TimelineEvent(**event) if isinstance(event, dict) else event + for event in memory.eventTimeline + ], + pendingThreads=[ + PendingThread(**thread) if isinstance(thread, dict) else thread + for thread in memory.pendingThreads + ], + characterStates={ + char: [ + CharacterStateChange(**state) if isinstance(state, dict) else state + for state in states + ] + for char, states in memory.characterStates.items() + }, + foreshadowing=[ + ForeshadowingEvent(**fs) if isinstance(fs, dict) else fs + for fs in memory.foreshadowing + ], + relationships=memory.relationships if hasattr(memory, 'relationships') else {}, + consistencyIssues=[], + last_updated=datetime.now(), + last_episode_processed=0 + ) + + def _convert_to_memory(self, enhanced: EnhancedMemory) -> Memory: + """将 EnhancedMemory 转换回基础 Memory""" + return Memory( + eventTimeline=[event.dict() for event in enhanced.eventTimeline], + pendingThreads=[thread.dict() for thread in enhanced.pendingThreads], + foreshadowing=[fs.dict() for fs in enhanced.foreshadowing], + characterStates={ + char: [state.dict() for state in states] + for char, states in enhanced.characterStates.items() + } + ) + + +# ============================================ +# 全局单例 +# ============================================ + +_memory_manager: Optional[MemoryManager] = None + + +def get_memory_manager() -> MemoryManager: + """获取记忆管理器单例""" + global _memory_manager + if _memory_manager is None: + _memory_manager = MemoryManager() + return _memory_manager diff --git a/backend/app/core/review/__init__.py b/backend/app/core/review/__init__.py new file mode 100644 index 0000000..7621617 --- /dev/null +++ b/backend/app/core/review/__init__.py @@ -0,0 +1,12 @@ +""" +Review Core Module + +审核核心模块 +""" +from app.core.review.review_manager import ReviewManager, get_review_manager, review_manager + +__all__ = [ + "ReviewManager", + "get_review_manager", + "review_manager" +] diff --git a/backend/app/core/review/review_manager.py b/backend/app/core/review/review_manager.py new file mode 100644 index 0000000..d92dff9 --- /dev/null +++ b/backend/app/core/review/review_manager.py @@ -0,0 +1,825 @@ +""" +Review Manager + +审核管理器 - 负责执行内容审核,整合多个审核维度 +""" +import json +import uuid +from typing import List, Dict, Any, Optional +from datetime import datetime + +from app.models.review import ( + ReviewConfig, + ReviewResult, + ReviewIssue, + DimensionScore, + DimensionType, + SeverityLevel, + IssueType, + Location, + CustomRule +) +from app.models.project import SeriesProject, Episode +from app.core.llm.glm_client import GLMClient +from app.core.skills.skill_manager import SkillManager +from app.utils.logger import get_logger + + +logger = get_logger(__name__) + + +class ReviewManager: + """ + 审核管理器 + + 核心功能: + 1. 执行完整的审核流程 + 2. 调用多个审核Skill + 3. 聚合各维度结果 + 4. 检查自定义规则 + 5. 计算总分 + """ + + # 内置维度定义 + DIMENSIONS = { + DimensionType.consistency: { + "name": "一致性审核", + "description": "检查人物、场景、剧情的前后一致性", + "default_strictness": 0.7, + "skill_ids": ["consistency_checker"] + }, + DimensionType.quality: { + "name": "质量审核", + "description": "检查文字质量、表达流畅度", + "default_strictness": 0.6, + "skill_ids": [] + }, + DimensionType.pacing: { + "name": "节奏审核", + "description": "检查剧情节奏、叙事速度", + "default_strictness": 0.5, + "skill_ids": [] + }, + DimensionType.dialogue: { + "name": "对话审核", + "description": "检查对话质量和人物口吻", + "default_strictness": 0.7, + "skill_ids": [] + }, + DimensionType.character: { + "name": "人物审核", + "description": "检查人物塑造和性格表现", + "default_strictness": 0.6, + "skill_ids": [] + }, + DimensionType.plot: { + "name": "剧情审核", + "description": "检查剧情逻辑和完整性", + "default_strictness": 0.7, + "skill_ids": [] + } + } + + def __init__( + self, + glm_client: Optional[GLMClient] = None, + skill_manager: Optional[SkillManager] = None + ): + """ + 初始化审核管理器 + + Args: + glm_client: GLM客户端实例 + skill_manager: Skill管理器实例 + """ + self.glm_client = glm_client + self.skill_manager = skill_manager + + if not self.glm_client: + from app.core.llm.glm_client import get_glm_client + self.glm_client = get_glm_client() + + if not self.skill_manager: + from app.core.skills.skill_manager import get_skill_manager + self.skill_manager = get_skill_manager() + + logger.info("审核管理器初始化完成") + + async def review_episode( + self, + project: SeriesProject, + episode: Episode, + config: ReviewConfig, + dimensions: Optional[List[DimensionType]] = None + ) -> ReviewResult: + """ + 执行完整的审核流程 + + Args: + project: 项目信息 + episode: 剧集内容 + config: 审核配置 + dimensions: 指定要审核的维度(可选,不指定则审核所有启用的维度) + + Returns: + ReviewResult: 审核结果 + """ + start_time = datetime.now() + result_id = str(uuid.uuid4()) + + logger.info(f"开始审核剧集: {episode.id} - EP{episode.number}") + + # 确定要审核的维度 + if dimensions is None: + # 审核所有启用的维度 + dimensions = [ + dim for dim, dim_config in config.dimension_settings.items() + if dim_config.enabled + ] + else: + # 过滤出启用的指定维度 + dimensions = [ + dim for dim in dimensions + if dim in config.dimension_settings and config.dimension_settings[dim].enabled + ] + + logger.info(f"审核维度: {[d.value for d in dimensions]}") + + # 初始化结果 + result = ReviewResult( + id=result_id, + episode_id=episode.id, + episode_number=episode.number, + project_id=project.id, + dimension_scores=[], + issues=[], + overall_score=0.0, + passed=False, + reviewed_at=start_time + ) + + # 执行各维度审核 + for dimension in dimensions: + try: + logger.info(f"执行维度审核: {dimension.value}") + + dimension_score = await self._run_dimension_review( + project=project, + episode=episode, + dimension=dimension, + config=config, + dim_config=config.dimension_settings.get(dimension) + ) + + result.dimension_scores.append(dimension_score) + result.issues.extend(dimension_score.issues) + + logger.info( + f"维度 {dimension.value} 审核完成: " + f"分数={dimension_score.score}, " + f"问题数={len(dimension_score.issues)}" + ) + + except Exception as e: + logger.error(f"维度 {dimension.value} 审核失败: {str(e)}") + # 创建失败结果 + result.dimension_scores.append( + DimensionScore( + dimension=dimension, + score=0.0, + passed=False, + issue_count=0, + issues=[] + ) + ) + + # 检查自定义规则 + if config.custom_rules: + try: + custom_issues = await self._check_custom_rules( + episode=episode, + rules=config.custom_rules, + config=config + ) + result.issues.extend(custom_issues) + logger.info(f"自定义规则检查完成,发现 {len(custom_issues)} 个问题") + except Exception as e: + logger.error(f"自定义规则检查失败: {str(e)}") + + # 聚合结果 + self._aggregate_results(result, config) + + # 计算耗时 + result.review_duration = (datetime.now() - start_time).total_seconds() + + logger.info( + f"审核完成: 总分={result.overall_score:.1f}, " + f"通过={'是' if result.passed else '否'}, " + f"问题数={result.issue_count}" + ) + + return result + + async def _run_dimension_review( + self, + project: SeriesProject, + episode: Episode, + dimension: DimensionType, + config: ReviewConfig, + dim_config: Any + ) -> DimensionScore: + """ + 运行单个维度的审核 + + Args: + project: 项目信息 + episode: 剧集内容 + dimension: 审核维度 + config: 总体审核配置 + dim_config: 维度配置 + + Returns: + DimensionScore: 维度分数 + """ + issues = [] + + # 获取维度信息 + dim_info = self.DIMENSIONS.get(dimension) + if not dim_info: + logger.warning(f"未知维度: {dimension}") + return DimensionScore( + dimension=dimension, + score=0.0, + passed=True, + issue_count=0, + issues=[] + ) + + # 尝试使用Skill进行审核 + if dim_info["skill_ids"]: + for skill_id in dim_info["skill_ids"]: + if skill_id in config.enabled_review_skills: + try: + skill_issues = await self._run_review_skill( + project=project, + episode=episode, + skill_id=skill_id, + dimension=dimension, + strictness=dim_config.strictness if dim_config else config.overall_strictness + ) + issues.extend(skill_issues) + except Exception as e: + logger.error(f"Skill {skill_id} 审核失败: {str(e)}") + + # 如果没有Skill或Skill失败,使用LLM通用审核 + if not issues and dim_config: + try: + llm_issues = await self._run_llm_review( + project=project, + episode=episode, + dimension=dimension, + strictness=dim_config.strictness + ) + issues.extend(llm_issues) + except Exception as e: + logger.error(f"LLM审核失败: {str(e)}") + + # 计算分数 + score = self.calculate_dimension_score(issues, dim_config.strictness if dim_config else config.overall_strictness) + + # 判断是否通过(分数 >= 75 视为通过) + passed = score >= 75.0 + + return DimensionScore( + dimension=dimension, + score=score, + passed=passed, + issue_count=len(issues), + issues=issues + ) + + async def _run_review_skill( + self, + project: SeriesProject, + episode: Episode, + skill_id: str, + dimension: DimensionType, + strictness: float + ) -> List[ReviewIssue]: + """ + 运行审核Skill + + Args: + project: 项目信息 + episode: 剧集内容 + skill_id: Skill ID + dimension: 审核维度 + strictness: 严格程度 + + Returns: + List[ReviewIssue]: 发现的问题列表 + """ + # 加载Skill + skill = await self.skill_manager.load_skill(skill_id) + if not skill: + logger.warning(f"Skill未找到: {skill_id}") + return [] + + logger.info(f"使用Skill进行审核: {skill.name}") + + # 构建审核提示词 + review_prompt = self._build_review_prompt( + project=project, + episode=episode, + dimension=dimension, + strictness=strictness + ) + + # 调用Skill + try: + response = await self.glm_client.chat_with_skill( + skill_behavior=skill.behavior_guide, + user_input=review_prompt, + context={ + "project": project.name, + "episode": episode.number, + "dimension": dimension.value, + "strictness": strictness + }, + temperature=0.3 # 审核使用较低温度以保证稳定性 + ) + + # 解析响应 + return self._parse_review_response( + response=response, + dimension=dimension, + episode_number=episode.number + ) + + except Exception as e: + logger.error(f"Skill {skill_id} 调用失败: {str(e)}") + return [] + + async def _run_llm_review( + self, + project: SeriesProject, + episode: Episode, + dimension: DimensionType, + strictness: float + ) -> List[ReviewIssue]: + """ + 使用LLM进行通用审核(当没有专门Skill时) + + Args: + project: 项目信息 + episode: 剧集内容 + dimension: 审核维度 + strictness: 严格程度 + + Returns: + List[ReviewIssue]: 发现的问题列表 + """ + dim_info = self.DIMENSIONS.get(dimension, {}) + dim_name = dim_info.get("name", dimension.value) + + system_prompt = f"""你是专业的内容审核专家,负责审核剧集创作中的{dim_name}问题。 + +审核严格程度: {strictness:.1f} (0-1) + +请仔细分析以下剧集内容,找出{dim_name}方面的问题。 + +以JSON格式返回结果: +{{ + "issues": [ + {{ + "type": "问题类型", + "severity": "high/medium/low", + "description": "问题描述", + "suggestion": "修改建议", + "location": {{ + "scene": 场景编号, + "context": "相关上下文" + }} + }} + ] +}}""" + + user_prompt = f"""项目名称: {project.name} +集数: EP{episode.number} +标题: {episode.title or '未命名'} + +{episode.global_context_summary() if hasattr(episode, 'global_context_summary') else ''} + +剧集内容: +{episode.content} + +请分析{dim_name}问题。""" + + try: + response = await self.glm_client.chat( + messages=[ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt} + ], + temperature=0.3 + ) + + content = response["choices"][0]["message"]["content"] + return self._parse_review_response( + response=content, + dimension=dimension, + episode_number=episode.number + ) + + except Exception as e: + logger.error(f"LLM审核失败: {str(e)}") + return [] + + def _build_review_prompt( + self, + project: SeriesProject, + episode: Episode, + dimension: DimensionType, + strictness: float + ) -> str: + """构建审核提示词""" + dim_info = self.DIMENSIONS.get(dimension, {}) + + prompt_parts = [ + f"请审核以下剧集内容的{dim_info.get('name', dimension.value)}。", + f"", + f"项目: {project.name}", + f"集数: EP{episode.number}", + f"标题: {episode.title or '未命名'}", + f"", + f"审核严格程度: {strictness:.1f}", + f"", + ] + + # 添加全局上下文 + if project.globalContext: + prompt_parts.extend([ + "全局设定:", + f"- 世界观: {project.globalContext.worldSetting or '无'}", + f"- 人物: {', '.join(project.globalContext.characterProfiles.keys())}", + f"- 场景: {', '.join(project.globalContext.sceneSettings.keys())}", + f"" + ]) + + # 添加历史记忆 + if project.memory and project.memory.eventTimeline: + prompt_parts.extend([ + f"历史事件 ({len(project.memory.eventTimeline)} 条):", + "" + ]) + + # 添加剧集内容 + prompt_parts.extend([ + "剧集内容:", + episode.content or "(内容为空)" + ]) + + return "\n".join(prompt_parts) + + def _parse_review_response( + self, + response: str, + dimension: DimensionType, + episode_number: int + ) -> List[ReviewIssue]: + """ + 解析审核响应 + + Args: + response: Skill或LLM的响应 + dimension: 审核维度 + episode_number: 集数 + + Returns: + List[ReviewIssue]: 解析出的问题列表 + """ + issues = [] + + try: + # 尝试提取JSON + json_start = response.find("{") + json_end = response.rfind("}") + 1 + + if json_start >= 0 and json_end > json_start: + json_str = response[json_start:json_end] + data = json.loads(json_str) + + # 解析issues数组 + raw_issues = data.get("issues", []) + + # 兼容consistency_checker的输出格式 + if not raw_issues and "dimension_scores" in data: + # 从dimension_scores提取issues + for dim_name, dim_data in data.get("dimension_scores", {}).items(): + raw_issues.extend(dim_data.get("issues", [])) + + for raw_issue in raw_issues: + try: + # 解析位置 + location_data = raw_issue.get("location", {}) + if isinstance(location_data, dict): + location = Location( + episode=location_data.get("episode", episode_number), + scene=location_data.get("scene"), + line=location_data.get("line"), + context=location_data.get("context") + ) + else: + location = Location(episode=episode_number) + + # 映射问题类型 + issue_type_str = raw_issue.get("type", "custom") + try: + issue_type = IssueType(issue_type_str) + except ValueError: + issue_type = IssueType.custom + + # 映射严重程度 + severity_str = raw_issue.get("severity", "medium").lower() + try: + severity = SeverityLevel(severity_str) + except ValueError: + severity = SeverityLevel.medium + + issue = ReviewIssue( + id=str(uuid.uuid4()), + type=issue_type, + dimension=dimension, + severity=severity, + location=location, + description=raw_issue.get("description", ""), + suggestion=raw_issue.get("suggestion", "") + ) + issues.append(issue) + + except Exception as e: + logger.warning(f"解析单个问题失败: {str(e)}") + continue + + except json.JSONDecodeError as e: + logger.warning(f"JSON解析失败: {str(e)}") + # 尝试从文本中提取问题 + issues = self._extract_issues_from_text(response, dimension, episode_number) + + except Exception as e: + logger.error(f"解析审核响应失败: {str(e)}") + + return issues + + def _extract_issues_from_text( + self, + text: str, + dimension: DimensionType, + episode_number: int + ) -> List[ReviewIssue]: + """从文本中提取问题(当JSON解析失败时)""" + issues = [] + + # 简单的文本模式匹配 + lines = text.split('\n') + current_issue = None + + for line in lines: + line = line.strip() + + # 查找问题标记 + if any(marker in line.lower() for marker in ['问题', 'issue', '错误', 'error']): + current_issue = { + "description": line, + "severity": SeverityLevel.medium + } + elif current_issue and '建议' in line: + current_issue["suggestion"] = line + issues.append(current_issue) + current_issue = None + + return issues + + async def _check_custom_rules( + self, + episode: Episode, + rules: List[CustomRule], + config: ReviewConfig + ) -> List[ReviewIssue]: + """ + 检查自定义规则 + + Args: + episode: 剧集内容 + rules: 自定义规则列表 + config: 审核配置 + + Returns: + List[ReviewIssue]: 违反规则的问题 + """ + issues = [] + + # 只检查启用的规则 + enabled_rules = [r for r in rules if r.enabled] + + if not enabled_rules: + return issues + + logger.info(f"检查 {len(enabled_rules)} 条自定义规则") + + for rule in enabled_rules: + try: + # 使用LLM检查规则 + rule_issues = await self._check_single_rule(episode, rule, config) + issues.extend(rule_issues) + + except Exception as e: + logger.error(f"检查规则 {rule.id} 失败: {str(e)}") + + return issues + + async def _check_single_rule( + self, + episode: Episode, + rule: CustomRule, + config: ReviewConfig + ) -> List[ReviewIssue]: + """ + 检查单条自定义规则 + + Args: + episode: 剧集内容 + rule: 自定义规则 + config: 审核配置 + + Returns: + List[ReviewIssue]: 违反该规则的问题 + """ + system_prompt = f"""你是内容审核专家,检查内容是否符合特定规则。 + +规则: {rule.name} +规则描述: {rule.description} +触发条件: {rule.trigger_condition} + +请分析以下内容,判断是否违反该规则。 + +以JSON格式返回: +{{ + "violated": true/false, + "violations": [ + {{ + "description": "具体描述哪里违反了规则", + "location": {{"scene": 场景编号, "context": "上下文"}}, + "suggestion": "修改建议" + }} + ] +}}""" + + user_prompt = f"""集数: EP{episode.number} +内容: +{episode.content} + +请检查是否违反规则: {rule.name}""" + + try: + response = await self.glm_client.chat( + messages=[ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt} + ], + temperature=0.2 + ) + + content = response["choices"][0]["message"]["content"] + data = json.loads(content) + + if not data.get("violated", False): + return [] + + issues = [] + for violation in data.get("violations", []): + location_data = violation.get("location", {}) + issue = ReviewIssue( + id=str(uuid.uuid4()), + type=IssueType.custom, + dimension=rule.dimension, + severity=rule.severity, + location=Location( + episode=episode.number, + scene=location_data.get("scene"), + context=location_data.get("context") + ), + description=violation.get("description", ""), + suggestion=violation.get("suggestion", ""), + rule_id=rule.id + ) + issues.append(issue) + + return issues + + except Exception as e: + logger.error(f"规则检查失败 {rule.id}: {str(e)}") + return [] + + def _aggregate_results(self, result: ReviewResult, config: ReviewConfig): + """ + 聚合各维度结果为总体分数 + + Args: + result: 审核结果 + config: 审核配置 + """ + if not result.dimension_scores: + result.overall_score = 0.0 + result.passed = False + return + + # 加权平均计算总分 + total_weight = 0.0 + weighted_sum = 0.0 + + for dim_score in result.dimension_scores: + dim_config = config.dimension_settings.get(dim_score.dimension) + weight = dim_config.weight if dim_config else 1.0 + + weighted_sum += dim_score.score * weight + total_weight += weight + + if total_weight > 0: + result.overall_score = weighted_sum / total_weight + else: + result.overall_score = sum(ds.score for ds in result.dimension_scores) / len(result.dimension_scores) + + # 判断是否通过 + result.passed = ( + result.overall_score >= config.pass_threshold and + result.high_severity_count == 0 + ) + + # 计算统计信息 + result.calculate_statistics() + + def calculate_dimension_score( + self, + issues: List[ReviewIssue], + strictness: float + ) -> float: + """ + 计算维度分数 + + Args: + issues: 问题列表 + strictness: 严格程度 + + Returns: + float: 分数 (0-100) + """ + if not issues: + return 100.0 + + # 基础分数 + score = 100.0 + + # 根据问题严重程度扣分 + for issue in issues: + if issue.severity == SeverityLevel.high: + score -= 20 * strictness + elif issue.severity == SeverityLevel.medium: + score -= 10 * strictness + else: # low + score -= 5 * strictness + + # 确保分数在 0-100 范围内 + return max(0.0, min(100.0, score)) + + def get_dimension_info(self, dimension: DimensionType) -> Optional[Dict[str, Any]]: + """ + 获取维度信息 + + Args: + dimension: 维度类型 + + Returns: + 维度信息字典 + """ + return self.DIMENSIONS.get(dimension) + + def list_all_dimensions(self) -> List[DimensionType]: + """列出所有支持的维度""" + return list(self.DIMENSIONS.keys()) + + +# 全局单例 +_review_manager: Optional[ReviewManager] = None + + +def get_review_manager() -> ReviewManager: + """获取审核管理器单例""" + global _review_manager + if _review_manager is None: + _review_manager = ReviewManager() + return _review_manager + + +# 便捷导出 +review_manager = get_review_manager() diff --git a/backend/app/core/skills/__init__.py b/backend/app/core/skills/__init__.py new file mode 100644 index 0000000..e1df1bf --- /dev/null +++ b/backend/app/core/skills/__init__.py @@ -0,0 +1,6 @@ +""" +Skills 模块 +""" +from app.core.skills.skill_manager import SkillManager, get_skill_manager, skill_manager + +__all__ = ["SkillManager", "get_skill_manager", "skill_manager"] diff --git a/backend/app/core/skills/skill_manager.py b/backend/app/core/skills/skill_manager.py new file mode 100644 index 0000000..138997e --- /dev/null +++ b/backend/app/core/skills/skill_manager.py @@ -0,0 +1,915 @@ +""" +Skill 管理器 + +核心功能: +1. 加载和解析 Skill 文件 +2. 提取行为指导内容 +3. 缓存管理 +4. Skill 验证 +5. Script 执行(Builtin Skills) +6. Tool 注册和路由(Agent-Skill 生命周期) +""" +from pathlib import Path +from typing import Optional, List, Dict, Any, Callable +import re +import subprocess +import json +import sys +from datetime import datetime +import yaml + +from app.models.skill import Skill, SkillCreate, SkillUpdate +from app.config import settings +from app.utils.logger import get_logger + +logger = get_logger(__name__) + + +# ============================================================================ +# Tool Registry - Agent-Skill 生命周期:注册阶段 +# ============================================================================ + +class ToolRegistry: + """ + 工具注册表 + + 实现 Agent-Skill 生命周期的第一阶段:注册 + - 将 Skill 的 callable tools 注册到系统中 + - 为每个 tool 生成 JSON Schema 供 LLM 路由使用 + """ + + def __init__(self): + self._tools: Dict[str, Dict[str, Any]] = {} + self._skill_tool_map: Dict[str, List[str]] = {} # skill_id -> [tool_names] + + def register_tool( + self, + tool_id: str, + skill_id: str, + name: str, + description: str, + parameters: Dict[str, Any], + handler: Callable + ): + """ + 注册一个工具 + + Args: + tool_id: 工具唯一标识 + skill_id: 所属 Skill ID + name: 工具名称 + description: 工具描述(用于 LLM 路由决策) + parameters: JSON Schema 格式的参数定义 + handler: 执行函数 + """ + self._tools[tool_id] = { + "id": tool_id, + "skill_id": skill_id, + "name": name, + "description": description, + "parameters": parameters, + "handler": handler + } + + if skill_id not in self._skill_tool_map: + self._skill_tool_map[skill_id] = [] + self._skill_tool_map[skill_id].append(tool_id) + + logger.info(f"注册工具: {tool_id} (from {skill_id})") + + def get_tool(self, tool_id: str) -> Optional[Dict[str, Any]]: + """获取工具定义""" + return self._tools.get(tool_id) + + def get_tools_by_skill(self, skill_id: str) -> List[Dict[str, Any]]: + """获取指定 Skill 的所有工具""" + tool_ids = self._skill_tool_map.get(skill_id, []) + return [self._tools[tid] for tid in tool_ids if tid in self._tools] + + def list_all_tools(self) -> List[Dict[str, Any]]: + """列出所有已注册工具(用于 LLM 路由决策)""" + return [ + { + "id": t["id"], + "skill_id": t["skill_id"], + "name": t["name"], + "description": t["description"], + "parameters": t["parameters"] + } + for t in self._tools.values() + ] + + def get_tool_schemas_for_llm(self) -> List[Dict[str, Any]]: + """ + 获取所有工具的 JSON Schema(注入到 System Prompt 或 API tools 字段) + + 这是 Agent-Skill 生命周期第一阶段的关键输出 + """ + return [ + { + "type": "function", + "function": { + "name": t["name"], + "description": t["description"], + "parameters": t["parameters"] + } + } + for t in self._tools.values() + ] + + +# 全局工具注册表 +_tool_registry: Optional[ToolRegistry] = None + + +def get_tool_registry() -> ToolRegistry: + """获取工具注册表单例""" + global _tool_registry + if _tool_registry is None: + _tool_registry = ToolRegistry() + return _tool_registry + + +class SkillManager: + """ + Skill 管理器 + + 负责加载、解析、验证和管理所有 Skills + 实现 Agent-Skill 生命周期:注册 → 路由 → 执行 → 反馈 + """ + + def __init__(self, skills_dir: Optional[str] = None): + """ + 初始化 Skill 管理器 + + Args: + skills_dir: Skills 存储目录 + """ + self.skills_dir = Path(skills_dir or settings.skills_dir) + self.builtin_dir = self.skills_dir / "builtin_skills" + self.user_dir = self.skills_dir / "user_skills" + self._cache: Dict[str, Skill] = {} + self.tool_registry = get_tool_registry() + + # 确保目录存在 + self.builtin_dir.mkdir(parents=True, exist_ok=True) + self.user_dir.mkdir(parents=True, exist_ok=True) + + logger.info(f"Skill 管理器初始化完成,目录: {self.skills_dir}") + + async def load_skill(self, skill_id: str) -> Optional[Skill]: + """ + 加载 Skill + + Args: + skill_id: Skill ID + + Returns: + Skill 对象或 None + """ + # 先检查缓存 + if skill_id in self._cache: + logger.debug(f"从缓存加载 Skill: {skill_id}") + return self._cache[skill_id] + + # 确定路径 + skill_path = self._find_skill_path(skill_id) + if not skill_path: + logger.warning(f"Skill 未找到: {skill_id}") + return None + + try: + # 解析 SKILL.md + skill = await self._parse_skill_file(skill_path, skill_id) + + # 缓存 + self._cache[skill_id] = skill + logger.info(f"成功加载 Skill: {skill_id} - {skill.name}") + return skill + + except Exception as e: + logger.error(f"加载 Skill 失败 {skill_id}: {str(e)}") + return None + + def _find_skill_path(self, skill_id: str) -> Optional[Path]: + """ + 查找 Skill 路径 + + 优先级:用户 Skills > 内置 Skills + """ + # 先查用户 Skills + user_path = self.user_dir / skill_id / "SKILL.md" + if user_path.exists(): + return user_path + + # 再查内置 Skills + builtin_path = self.builtin_dir / skill_id / "SKILL.md" + if builtin_path.exists(): + return builtin_path + + return None + + async def _parse_skill_file(self, skill_path: Path, skill_id: str) -> Skill: + """ + 解析 SKILL.md 文件 + + Args: + skill_path: Skill 文件路径 + skill_id: Skill ID + + Returns: + Skill 对象 + """ + content = skill_path.read_text(encoding='utf-8') + + # 确定类型 + skill_type = "builtin" if "builtin_skills" in str(skill_path) else "user" + + # 提取元数据 + metadata = self._extract_metadata(content) + + # 提取行为指导(核心部分) + behavior_guide = self._extract_behavior_guide(content) + + # 检查是否有 Scripts(仅内置 Skill) + has_scripts = (skill_path.parent / "scripts").exists() + tools = [] + if has_scripts: + tools = self._load_tools(skill_path.parent / "scripts") + + return Skill( + id=skill_id, + name=metadata.get("name", skill_id), + version=metadata.get("version", "1.0.0"), + author=metadata.get("author", "未知"), + type=skill_type, + behavior_guide=behavior_guide, + config=self._extract_config(content), + template=metadata.get("template"), + tools=tools if has_scripts else None, + category=metadata.get("category", "通用"), + tags=metadata.get("tags", []), + visibility="private", + file_path=str(skill_path), + created_at=datetime.now(), + updated_at=datetime.now() + ) + + def _extract_metadata(self, content: str) -> Dict[str, Any]: + """ + 提取 Skill 元数据 + + 从 Markdown 的 YAML frontmatter 或 ## 基础信息 部分提取 + """ + metadata = {} + + # 尝试提取 YAML frontmatter + yaml_match = re.match(r'^---\n(.*?)\n---', content, re.DOTALL) + if yaml_match: + try: + metadata = yaml.safe_load(yaml_match.group(1)) or {} + except: + pass + + # 从 ## 基础信息 提取 + if not metadata: + lines = content.split('\n') + in_basic_info = False + for i, line in enumerate(lines): + if "## 基础信息" in line or "## Basic Information" in line: + in_basic_info = True + continue + if in_basic_info: + if line.strip().startswith("##"): + break + # 解析 - **key**: value 格式 + match = re.match(r'-\s*\*\*([^*]+)\*\*:\s*(.+)', line) + if match: + key = match.group(1).strip() + value = match.group(2).strip() + metadata[key] = value + + return metadata + + def _extract_behavior_guide(self, content: str) -> str: + """ + 提取行为指导部分(核心功能) + + 查找 ## 行为指导 或 ## Behavior Guide 部分 + """ + lines = content.split('\n') + start_idx = -1 + + for i, line in enumerate(lines): + if "行为指导" in line or "Behavior Guide" in line or "行为规则" in line: + start_idx = i + break + + if start_idx == -1: + # 如果没有专门的章节,尝试从整体提取 + return content[:500] # 返回前500字符作为默认 + + # 提取到下一个 ## 之前的内容 + guide_lines = [] + for i in range(start_idx + 1, len(lines)): + if lines[i].strip().startswith("##"): + break + guide_lines.append(lines[i]) + + return "\n".join(guide_lines).strip() + + def _extract_config(self, content: str) -> Dict[str, Any]: + """提取配置信息""" + # 默认配置 + return { + "preset": None, + "parameters": {}, + "weights": {} + } + + def _load_tools(self, scripts_dir: Path) -> List[Dict[str, Any]]: + """加载 Scripts 目录下的工具""" + tools = [] + if not scripts_dir.exists(): + return tools + + for script_file in scripts_dir.glob("*.py"): + tools.append({ + "name": script_file.stem, + "description": f"Tool from {script_file.name}", + "script": script_file.name + }) + + return tools + + async def reload_skill(self, skill_id: str) -> Optional[Skill]: + """ + 重新加载 Skill(清除缓存) + + Args: + skill_id: Skill ID + + Returns: + 重新加载的 Skill 或 None + """ + if skill_id in self._cache: + del self._cache[skill_id] + return await self.load_skill(skill_id) + + async def list_skills( + self, + skill_type: Optional[str] = None, + category: Optional[str] = None + ) -> List[Skill]: + """ + 列出所有 Skills + + Args: + skill_type: Skill 类型 (builtin/user) + category: 分类筛选 + + Returns: + Skill 列表 + """ + skills = [] + + # 扫描内置 Skills + if skill_type is None or skill_type == "builtin": + for skill_dir in self.builtin_dir.iterdir(): + if skill_dir.is_dir() and (skill_dir / "SKILL.md").exists(): + skill = await self.load_skill(skill_dir.name) + if skill and (category is None or skill.category == category): + skills.append(skill) + + # 扫描用户 Skills + if skill_type is None or skill_type == "user": + for skill_dir in self.user_dir.iterdir(): + if skill_dir.is_dir() and (skill_dir / "SKILL.md").exists(): + skill = await self.load_skill(skill_dir.name) + if skill and (category is None or skill.category == category): + skills.append(skill) + + return skills + + async def create_user_skill(self, skill_data: SkillCreate) -> Skill: + """ + 创建新的用户 Skill + + Args: + skill_data: Skill 创建数据 + + Returns: + 创建的 Skill 对象 + """ + # 创建目录 + skill_dir = self.user_dir / skill_data.id + skill_dir.mkdir(exist_ok=True) + + # 写入 SKILL.md + skill_file = skill_dir / "SKILL.md" + skill_file.write_text(skill_data.content, encoding='utf-8') + + logger.info(f"创建用户 Skill: {skill_data.id}") + + # 加载并返回 + return await self.load_skill(skill_data.id) + + async def update_user_skill( + self, + skill_id: str, + skill_data: SkillUpdate + ) -> Optional[Skill]: + """ + 更新用户 Skill + + Args: + skill_id: Skill ID + skill_data: 更新数据 + + Returns: + 更新后的 Skill 或 None + """ + skill_path = self._find_skill_path(skill_id) + if not skill_path: + return None + + # 只能更新用户 Skills + if "builtin_skills" in str(skill_path): + logger.warning(f"不能更新内置 Skill: {skill_id}") + return None + + # 读取现有内容 + content = skill_path.read_text(encoding='utf-8') + + # 更新内容 + if skill_data.content: + content = skill_data.content + else: + # TODO: 更新特定字段 + pass + + # 写入更新后的内容 + skill_path.write_text(content, encoding='utf-8') + + # 重新加载 + return await self.reload_skill(skill_id) + + async def delete_user_skill(self, skill_id: str) -> bool: + """ + 删除用户 Skill + + Args: + skill_id: Skill ID + + Returns: + 是否删除成功 + """ + skill_path = self._find_skill_path(skill_id) + if not skill_path: + return False + + # 只能删除用户 Skills + if "builtin_skills" in str(skill_path): + logger.warning(f"不能删除内置 Skill: {skill_id}") + return False + + # 删除目录 + import shutil + shutil.rmtree(skill_path.parent) + + # 清除缓存 + if skill_id in self._cache: + del self._cache[skill_id] + + logger.info(f"删除用户 Skill: {skill_id}") + return True + + # ======================================================================== + # Script 执行 - Builtin Skills 的可执行脚本 + # ======================================================================== + + async def execute_script( + self, + skill_id: str, + script_name: str, + args: List[str] = None + ) -> Dict[str, Any]: + """ + 执行 Builtin Skill 的脚本 + + 这是 Agent-Skill 生命周期第三阶段:执行 + - 调用 Skill 的 scripts/ 目录下的可执行脚本 + - 捕获输出并返回结果 + + Args: + skill_id: Skill ID(必须是 builtin 类型) + script_name: 脚本文件名(如 "init_skill.py") + args: 脚本参数列表 + + Returns: + 执行结果字典 {"success": bool, "output": str, "error": str} + """ + skill_path = self._find_skill_path(skill_id) + if not skill_path: + return {"success": False, "error": f"Skill 未找到: {skill_id}"} + + # 只允许执行 builtin skills 的脚本 + if "builtin_skills" not in str(skill_path): + return {"success": False, "error": f"只允许执行 builtin Skills 的脚本"} + + script_path = skill_path.parent / "scripts" / script_name + if not script_path.exists(): + return {"success": False, "error": f"脚本不存在: {script_name}"} + + try: + # 构建命令 + cmd = [sys.executable, str(script_path)] + if args: + cmd.extend(args) + + logger.info(f"执行脚本: {' '.join(cmd)}") + + # 执行脚本 + result = subprocess.run( + cmd, + capture_output=True, + text=True, + timeout=60, + cwd=str(script_path.parent) + ) + + if result.returncode == 0: + logger.info(f"脚本执行成功: {script_name}") + return { + "success": True, + "output": result.stdout, + "error": result.stderr + } + else: + logger.error(f"脚本执行失败: {script_name}, {result.stderr}") + return { + "success": False, + "output": result.stdout, + "error": result.stderr + } + + except subprocess.TimeoutExpired: + return {"success": False, "error": "脚本执行超时"} + except Exception as e: + logger.error(f"执行脚本异常: {str(e)}") + return {"success": False, "error": str(e)} + + # ======================================================================== + # skill-creator 专用方法 + # ======================================================================== + + async def init_skill_template( + self, + skill_name: str, + output_path: str = None + ) -> Dict[str, Any]: + """ + 使用 skill-creator 的 init_skill.py 初始化 Skill 模板 + + Args: + skill_name: Skill 名称(hyphen-case) + output_path: 输出路径(默认为 user_skills 目录) + + Returns: + 执行结果 + """ + if output_path is None: + output_path = str(self.user_dir) + + return await self.execute_script( + skill_id="skill-creator", + script_name="init_skill.py", + args=[skill_name, "--path", output_path] + ) + + async def validate_skill_structure( + self, + skill_id: str + ) -> Dict[str, Any]: + """ + 使用 skill-creator 的 quick_validate.py 验证 Skill 结构 + + Args: + skill_id: 要验证的 Skill ID + + Returns: + 验证结果 + """ + skill_path = self._find_skill_path(skill_id) + if not skill_path: + return {"success": False, "error": f"Skill 未找到: {skill_id}"} + + return await self.execute_script( + skill_id="skill-creator", + script_name="quick_validate.py", + args=[str(skill_path.parent)] + ) + + async def package_skill( + self, + skill_id: str, + output_dir: str = None + ) -> Dict[str, Any]: + """ + 使用 skill-creator 的 package_skill.py 打包 Skill + + Args: + skill_id: 要打包的 Skill ID + output_dir: 输出目录 + + Returns: + 打包结果 + """ + skill_path = self._find_skill_path(skill_id) + if not skill_path: + return {"success": False, "error": f"Skill 未找到: {skill_id}"} + + args = [str(skill_path.parent)] + if output_dir: + args.append(output_dir) + + return await self.execute_script( + skill_id="skill-creator", + script_name="package_skill.py", + args=args + ) + + # ======================================================================== + # Tool 注册和执行 - Agent-Skill 生命周期核心 + # ======================================================================== + + async def register_builtin_tools(self): + """ + 注册所有 Builtin Skills 的工具 + + 在应用启动时调用,实现 Agent-Skill 生命周期第一阶段:注册 + """ + # 注册 skill-creator 的工具 + self._register_skill_creator_tools() + + # 可以继续注册其他 builtin skills 的工具 + # self._register_other_skill_tools() + + logger.info("Builtin Skills 工具注册完成") + + def _register_skill_creator_tools(self): + """注册 skill-creator 的工具""" + skill_id = "skill-creator" + + # Tool: init_skill + self.tool_registry.register_tool( + tool_id=f"{skill_id}_init_skill", + skill_id=skill_id, + name="init_skill_template", + description="初始化一个新的 Skill 模板,创建包含 SKILL.md、scripts/、references/、assets/ 的标准目录结构", + parameters={ + "type": "object", + "properties": { + "skill_name": { + "type": "string", + "description": "Skill 名称(hyphen-case,如 'my-awesome-skill')" + }, + "output_path": { + "type": "string", + "description": "输出路径(可选,默认为 user_skills 目录)" + } + }, + "required": ["skill_name"] + }, + handler=lambda **kwargs: self._execute_init_skill(**kwargs) + ) + + # Tool: validate_skill + self.tool_registry.register_tool( + tool_id=f"{skill_id}_validate_skill", + skill_id=skill_id, + name="validate_skill_structure", + description="验证 Skill 的结构是否符合规范,检查 YAML frontmatter、命名规范等", + parameters={ + "type": "object", + "properties": { + "skill_id": { + "type": "string", + "description": "要验证的 Skill ID" + } + }, + "required": ["skill_id"] + }, + handler=lambda **kwargs: self._execute_validate_skill(**kwargs) + ) + + # Tool: package_skill + self.tool_registry.register_tool( + tool_id=f"{skill_id}_package_skill", + skill_id=skill_id, + name="package_skill", + description="将 Skill 打包成 .skill 文件用于分发", + parameters={ + "type": "object", + "properties": { + "skill_id": { + "type": "string", + "description": "要打包的 Skill ID" + }, + "output_dir": { + "type": "string", + "description": "输出目录(可选)" + } + }, + "required": ["skill_id"] + }, + handler=lambda **kwargs: self._execute_package_skill(**kwargs) + ) + + # Handler 方法(需要是 async 的,所以用 wrapper) + async def _execute_init_skill(self, skill_name: str, output_path: str = None): + return await self.init_skill_template(skill_name, output_path) + + async def _execute_validate_skill(self, skill_id: str): + return await self.validate_skill_structure(skill_id) + + async def _execute_package_skill(self, skill_id: str, output_dir: str = None): + return await self.package_skill(skill_id, output_dir) + + # ======================================================================== + # 路由和执行 - Agent-Skill 生命周期第二、三阶段 + # ======================================================================== + + def get_available_tools_for_llm(self) -> List[Dict[str, Any]]: + """ + 获取所有可用工具的 Schema(用于注入 LLM) + + 这是 Agent-Skill 生命周期第一阶段的输出 + - 将所有已注册工具的 JSON Schema 返回 + - 这些 Schema 将被注入到 System Prompt 或 API 的 tools 字段 + """ + return self.tool_registry.get_tool_schemas_for_llm() + + async def route_and_execute_tool( + self, + tool_name: str, + parameters: Dict[str, Any] + ) -> Dict[str, Any]: + """ + 路由并执行工具 + + 实现 Agent-Skill 生命周期: + - 第二阶段:路由(根据 tool_name 找到对应的 handler) + - 第三阶段:执行(调用 handler 执行实际操作) + + Args: + tool_name: LLM 决定调用的工具名称 + parameters: LLM 生成的参数 + + Returns: + 执行结果(将反馈给 LLM,实现第四阶段:反馈) + """ + # 查找工具 + tool = None + for t in self.tool_registry._tools.values(): + if t["name"] == tool_name: + tool = t + break + + if not tool: + return { + "success": False, + "error": f"工具未找到: {tool_name}" + } + + logger.info(f"路由到工具: {tool_name} (from {tool['skill_id']})") + + try: + # 执行工具 + result = await tool["handler"](**parameters) + + # 返回结果(将反馈给 LLM) + return result + + except Exception as e: + logger.error(f"执行工具失败: {str(e)}") + return { + "success": False, + "error": str(e) + } + + # ======================================================================== + # 参考文件加载 - 融入 LLM 上下文 + # ======================================================================== + + async def load_skill_references( + self, + skill_id: str, + max_file_size: int = 100000 # 100KB + ) -> Dict[str, str]: + """ + 加载 Skill 的参考文件内容 + + 参考文件会被融入到 LLM 的 system prompt 中 + 这样 LLM 在执行任务时可以参考这些资料 + + Args: + skill_id: Skill ID + max_file_size: 单个文件最大大小(字节),超过则跳过 + + Returns: + 参考文件内容字典 {filename: content} + """ + skill_path = self._find_skill_path(skill_id) + if not skill_path: + logger.warning(f"Skill 未找到,无法加载参考文件: {skill_id}") + return {} + + references_dir = skill_path.parent / "references" + if not references_dir.exists(): + logger.debug(f"Skill 没有参考文件目录: {skill_id}") + return {} + + references = {} + allowed_extensions = {'.md', '.txt', '.json', '.yaml', '.yml'} + + for file_path in references_dir.iterdir(): + if not file_path.is_file(): + continue + + # 检查文件扩展名 + if file_path.suffix.lower() not in allowed_extensions: + logger.debug(f"跳过不支持的文件类型: {file_path.name}") + continue + + # 检查文件大小 + file_size = file_path.stat().st_size + if file_size > max_file_size: + logger.warning( + f"参考文件过大,跳过: {file_path.name} ({file_size / 1024:.1f}KB > {max_file_size / 1024}KB)" + ) + continue + + try: + # 读取文件内容 + content = file_path.read_text(encoding='utf-8') + references[file_path.name] = content + logger.info(f"加载参考文件: {skill_id}/references/{file_path.name} ({file_size} bytes)") + except UnicodeDecodeError: + logger.warning(f"参考文件编码错误,跳过: {file_path.name}") + continue + except Exception as e: + logger.error(f"读取参考文件失败: {file_path.name}, {str(e)}") + continue + + if references: + logger.info(f"成功加载 {len(references)} 个参考文件: {skill_id}") + + return references + + async def get_skill_with_references( + self, + skill_id: str, + include_references: bool = True + ) -> Optional[Dict[str, Any]]: + """ + 获取 Skill 及其参考文件 + + 这是一个便捷方法,返回 Skill 对象和参考文件内容 + + Args: + skill_id: Skill ID + include_references: 是否包含参考文件 + + Returns: + 包含 skill 和 references 的字典 + """ + skill = await self.load_skill(skill_id) + if not skill: + return None + + result = { + "skill": skill, + "references": {} + } + + if include_references: + result["references"] = await self.load_skill_references(skill_id) + + return result + + +# 全局单例 +_skill_manager: Optional[SkillManager] = None + + +def get_skill_manager() -> SkillManager: + """获取 Skill 管理器单例""" + global _skill_manager + if _skill_manager is None: + _skill_manager = SkillManager() + return _skill_manager + + +# 便捷导出 +skill_manager = get_skill_manager() diff --git a/backend/app/core/skills/skill_resolver.py b/backend/app/core/skills/skill_resolver.py new file mode 100644 index 0000000..820dc7f --- /dev/null +++ b/backend/app/core/skills/skill_resolver.py @@ -0,0 +1,151 @@ +""" +Skill 解析器 - 根据配置动态选择和合并 Skills + +三级配置优先级: +1. 任务级动态配置(运行时指定) +2. 单集级配置(episodeSkillOverrides) +3. 项目级默认配置(defaultTaskSkills) +""" +from typing import List, Dict, Any, Optional +from app.models.project import SeriesProject, TaskType, SkillAssignment, TaskSkillConfig +from app.models.skill import Skill +from app.core.skills.skill_manager import get_skill_manager +from app.utils.logger import get_logger + +logger = get_logger(__name__) + + +class SkillResolver: + """Skill 解析器 - 负责根据三级配置动态选择 Skills""" + + def __init__(self): + self.skill_manager = get_skill_manager() + + async def resolve_skills_for_task( + self, + project: SeriesProject, + task_type: TaskType, + episode_number: int, + runtime_overrides: Optional[List[str]] = None + ) -> List[Dict[str, Any]]: + """ + 解析指定任务的 Skill 配置 + + Args: + project: 项目 + task_type: 任务类型 + episode_number: 集数 + runtime_overrides: 运行时指定的 Skill ID 列表(最高优先级) + + Returns: + 解析后的 Skill 列表,包含 skill、parameters、references + """ + # 1. 检查运行时覆盖(最高优先级) + if runtime_overrides: + logger.info(f"使用运行时覆盖 Skills: {runtime_overrides}") + skills = [] + for skill_id in runtime_overrides: + skill = await self.skill_manager.load_skill(skill_id) + if skill: + skills.append({ + "skill": skill, + "parameters": skill.config.parameters, + "references": await self.skill_manager.load_skill_references(skill_id) + }) + return skills + + # 2. 检查单集覆盖 + episode_override = project.episodeSkillOverrides.get(episode_number) + if episode_override and not episode_override.use_project_default: + logger.info(f"EP{episode_number} 使用单集覆盖配置") + task_config = next( + (c for c in episode_override.task_configs if c.task_type == task_type), + None + ) + if task_config: + return await self._load_skills_from_config(task_config.skills) + + # 3. 使用项目默认配置 + task_config = next( + (c for c in project.defaultTaskSkills if c.task_type == task_type), + None + ) + if task_config: + logger.info(f"使用项目默认配置: task_type={task_type.value}, skills={[s.skill_id for s in task_config.skills]}") + return await self._load_skills_from_config(task_config.skills) + + # 4. 回退到硬编码的默认值(兼容性) + logger.warning(f"未找到 {task_type.value} 的配置,使用回退默认值") + return await self._get_fallback_skills(task_type) + + async def _load_skills_from_config( + self, + assignments: List[SkillAssignment] + ) -> List[Dict[str, Any]]: + """从配置加载 Skills""" + results = [] + + # 按优先级排序 + sorted_assignments = sorted(assignments, key=lambda x: x.priority, reverse=True) + + for assignment in sorted_assignments: + if not assignment.enabled: + continue + + skill = await self.skill_manager.load_skill(assignment.skill_id) + if not skill: + logger.warning(f"Skill 未找到: {assignment.skill_id}") + continue + + # 合并参数 + parameters = skill.config.parameters.copy() + if assignment.parameter_overrides: + parameters.update(assignment.parameter_overrides) + + # 加载参考文件 + references = await self.skill_manager.load_skill_references(skill.id) + + results.append({ + "skill": skill, + "parameters": parameters, + "references": references + }) + + return results + + async def _get_fallback_skills(self, task_type: TaskType) -> List[Dict[str, Any]]: + """获取回退的默认 Skills(兼容现有硬编码)""" + fallback_map = { + TaskType.DIALOGUE: ["dialogue_writer_ancient", "dialogue-writer"], + TaskType.OUTLINE: ["dialogue_writer_ancient"], + TaskType.REVIEW: ["consistency_checker"], + TaskType.CONSISTENCY: ["consistency_checker"], + } + + skill_ids = fallback_map.get(task_type, []) + + for skill_id in skill_ids: + skill = await self.skill_manager.load_skill(skill_id) + if skill: + references = await self.skill_manager.load_skill_references(skill_id) + logger.info(f"使用回退 Skill: {skill_id}") + return [{ + "skill": skill, + "parameters": skill.config.parameters, + "references": references + }] + + logger.error(f"未找到任何可用的 Skill for task_type={task_type.value}") + return [] + + +# 全局单例 +_skill_resolver: Optional[SkillResolver] = None + + +def get_skill_resolver() -> SkillResolver: + """获取 Skill 解析器单例""" + global _skill_resolver + if _skill_resolver is None: + _skill_resolver = SkillResolver() + return _skill_resolver diff --git a/backend/app/db/__init__.py b/backend/app/db/__init__.py new file mode 100644 index 0000000..65f47a9 --- /dev/null +++ b/backend/app/db/__init__.py @@ -0,0 +1 @@ +# Database module diff --git a/backend/app/db/repositories.py b/backend/app/db/repositories.py new file mode 100644 index 0000000..ed43f70 --- /dev/null +++ b/backend/app/db/repositories.py @@ -0,0 +1,116 @@ +from datetime import datetime +from typing import List, Optional +import uuid + +from app.models.project import ( + SeriesProject, + SeriesProjectCreate, + Episode, + EpisodeExecuteRequest, + EpisodeExecuteResponse +) +from app.core.agents.series_creation_agent import get_series_agent +from app.utils.logger import get_logger + +logger = get_logger(__name__) + + +# ============================================ +# 内存存储 (MVP 阶段使用文件存储) +# ============================================ +_projects: dict = {} +_episodes: dict = {} + + +class ProjectRepository: + """项目仓储(MVP 简化版)""" + + async def create(self, project_data: SeriesProjectCreate) -> SeriesProject: + """创建新项目""" + project_id = str(uuid.uuid4()) + project = SeriesProject( + id=project_id, + name=project_data.name, + totalEpisodes=project_data.totalEpisodes, + agentId=project_data.agentId, + mode=project_data.mode, + globalContext=project_data.globalContext, + skillSettings=project_data.skillSettings, + createdAt=datetime.now(), + updatedAt=datetime.now() + ) + _projects[project_id] = project + logger.info(f"创建项目: {project_id} - {project.name}") + return project + + async def get(self, project_id: str) -> Optional[SeriesProject]: + """获取项目""" + return _projects.get(project_id) + + async def list(self, skip: int = 0, limit: int = 100) -> List[SeriesProject]: + """列出所有项目""" + return list(_projects.values())[skip:skip + limit] + + async def update( + self, + project_id: str, + project_data: dict + ) -> Optional[SeriesProject]: + """更新项目""" + project = _projects.get(project_id) + if not project: + return None + + for key, value in project_data.items(): + if hasattr(project, key): + setattr(project, key, value) + + project.updatedAt = datetime.now() + return project + + async def delete(self, project_id: str) -> bool: + """删除项目""" + if project_id in _projects: + del _projects[project_id] + return True + return False + + +class EpisodeRepository: + """剧集仓储(MVP 简化版)""" + + async def create(self, episode: Episode) -> Episode: + """创建剧集""" + if not episode.id: + episode.id = str(uuid.uuid4()) + _episodes[episode.id] = episode + logger.info(f"创建剧集: {episode.id} - EP{episode.number}") + return episode + + async def get(self, episode_id: str) -> Optional[Episode]: + """获取剧集""" + return _episodes.get(episode_id) + + async def list_by_project( + self, + project_id: str, + skip: int = 0, + limit: int = 100 + ) -> List[Episode]: + """列出项目的所有剧集""" + return [ + ep for ep in _episodes.values() + if ep.projectId == project_id + ][skip:skip + limit] + + async def update(self, episode: Episode) -> Episode: + """更新剧集""" + _episodes[episode.id] = episode + return episode + + +# ============================================ +# 全局仓储实例 +# ============================================ +project_repo = ProjectRepository() +episode_repo = EpisodeRepository() diff --git a/backend/app/main.py b/backend/app/main.py new file mode 100644 index 0000000..399990d --- /dev/null +++ b/backend/app/main.py @@ -0,0 +1,137 @@ +""" +Creative Studio - FastAPI 应用入口 +基于 Agent + Skill 架构的 AI 剧集创作平台 +""" +from fastapi import FastAPI +from fastapi.middleware.cors import CORSMiddleware +from contextlib import asynccontextmanager +from app.config import settings +from app.utils.logger import get_logger + +# 初始化日志 +logger = get_logger(__name__) + + +@asynccontextmanager +async def lifespan(app: FastAPI): + """应用生命周期管理""" + # 启动时执行 + logger.info(f"🚀 {settings.app_name} v{settings.app_version} 启动中...") + logger.info(f"📝 环境: {settings.environment}") + logger.info(f"🔧 调试模式: {settings.debug}") + + # 初始化 Skill 管理器 + from app.core.skills.skill_manager import get_skill_manager + skill_manager = get_skill_manager() + logger.info(f"✅ Skill 管理器初始化完成,已加载 {len(await skill_manager.list_skills())} 个 Skills") + + # 注册 Builtin Skills 的工具(Agent-Skill 生命周期:注册阶段) + await skill_manager.register_builtin_tools() + logger.info(f"✅ Builtin Skills 工具注册完成") + + # 初始化 GLM 客户端 + from app.core.llm.glm_client import get_glm_client + glm_client = get_glm_client() + logger.info(f"✅ GLM 客户端初始化完成,模型: {glm_client.model}") + + # 初始化剧集创作 Agent + from app.core.agents.series_creation_agent import get_series_agent + agent = get_series_agent() + logger.info(f"✅ 剧集创作 Agent 初始化完成") + + # 初始化审核管理器 + from app.core.review.review_manager import get_review_manager + review_mgr = get_review_manager() + logger.info(f"✅ 审核管理器初始化完成") + + # 初始化批量执行器 + from app.core.execution.batch_executor import get_batch_executor + batch_exec = get_batch_executor() + logger.info(f"✅ 批量执行器初始化完成") + + # 初始化重试管理器 + from app.core.execution.retry_manager import get_retry_manager + retry_mgr = get_retry_manager() + logger.info(f"✅ 重试管理器初始化完成") + + yield + + # 关闭时执行 + logger.info(f"👋 {settings.app_name} 正在关闭...") + + +# 创建 FastAPI 应用 +app = FastAPI( + title=settings.app_name, + description="基于 Agent + Skill 架构的 AI 剧集创作平台", + version=settings.app_version, + lifespan=lifespan, + docs_url="/docs", + redoc_url="/redoc", + openapi_url="/openapi.json" +) + +# 配置 CORS +app.add_middleware( + CORSMiddleware, + allow_origins=settings.cors_origins, + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + + +# ============================================ +# 根路由 +# ============================================ +@app.get("/", tags=["根路径"]) +async def root(): + """API 根路径""" + return { + "name": settings.app_name, + "version": settings.app_version, + "description": "基于 Agent + Skill 架构的 AI 剧集创作平台", + "docs": "/docs", + "status": "running" + } + + +@app.get("/health", tags=["健康检查"]) +async def health_check(): + """健康检查端点""" + return { + "status": "healthy", + "environment": settings.environment + } + + +# ============================================ +# API 路由注册 +# ============================================ +from app.api.v1 import skills, projects, ai_assistant, memory, review, websocket, uploads, episodes + +app.include_router(skills.router, prefix="/api/v1") +app.include_router(projects.router, prefix="/api/v1") +app.include_router(episodes.router, prefix="/api/v1") +app.include_router(ai_assistant.router, prefix="/api/v1") +app.include_router(memory.router, prefix="/api/v1") +app.include_router(review.router, prefix="/api/v1") +app.include_router(uploads.router, prefix="/api/v1") +app.include_router(websocket.router) # WebSocket router doesn't need prefix + +logger.info("✅ API 路由注册完成") + + +# ============================================ +# 启动说明 +# ============================================ +if __name__ == "__main__": + import uvicorn + + uvicorn.run( + "app.main:app", + host=settings.host, + port=settings.port, + reload=settings.debug, + log_level=settings.log_level.lower() + ) diff --git a/backend/app/models/__init__.py b/backend/app/models/__init__.py new file mode 100644 index 0000000..01217f0 --- /dev/null +++ b/backend/app/models/__init__.py @@ -0,0 +1,71 @@ +# Models module +from app.models.memory import ( + TimelineEvent, + PendingThread, + CharacterStateChange, + EnhancedMemory, + ForeshadowingEvent, + ConsistencyIssue, + ResolutionSuggestion, + ThreadCreateRequest, + ThreadUpdateRequest, + ThreadResolveRequest, + MemoryUpdateRequest, + MemoryExtractionResult, + ImportanceLevel, + ThreadStatus +) + +from app.models.review import ( + ReviewConfig, + DimensionConfig, + CustomRule, + ReviewResult, + ReviewIssue, + DimensionScore, + Location, + ReviewConfigUpdate, + ReviewRequest, + ReviewResponse, + DimensionInfo, + DimensionType, + SeverityLevel, + IssueType, + CustomRuleCreate, + CustomRuleUpdate, + ReviewPreset +) + +__all__ = [ + 'TimelineEvent', + 'PendingThread', + 'CharacterStateChange', + 'EnhancedMemory', + 'ForeshadowingEvent', + 'ConsistencyIssue', + 'ResolutionSuggestion', + 'ThreadCreateRequest', + 'ThreadUpdateRequest', + 'ThreadResolveRequest', + 'MemoryUpdateRequest', + 'MemoryExtractionResult', + 'ImportanceLevel', + 'ThreadStatus', + 'ReviewConfig', + 'DimensionConfig', + 'CustomRule', + 'ReviewResult', + 'ReviewIssue', + 'DimensionScore', + 'Location', + 'ReviewConfigUpdate', + 'ReviewRequest', + 'ReviewResponse', + 'DimensionInfo', + 'DimensionType', + 'SeverityLevel', + 'IssueType', + 'CustomRuleCreate', + 'CustomRuleUpdate', + 'ReviewPreset' +] diff --git a/backend/app/models/episode_content.py b/backend/app/models/episode_content.py new file mode 100644 index 0000000..23c1e69 --- /dev/null +++ b/backend/app/models/episode_content.py @@ -0,0 +1,93 @@ +""" +剧集内容数据模型 + +用于存储生成后的剧集内容,支持预览、编辑和导出 +""" +from pydantic import BaseModel, Field +from typing import Optional, Dict, Any +from datetime import datetime +from enum import Enum + + +class ContentStatus(str, Enum): + """内容状态""" + DRAFT = "draft" # 草稿 + GENERATING = "generating" # 生成中 + PENDING_REVIEW = "pending_review" # 待审核 + APPROVED = "approved" # 已通过 + REJECTED = "rejected" # 已拒绝 + + +class EpisodeContent(BaseModel): + """单集内容""" + id: str = Field(..., description="内容 ID") + project_id: str = Field(..., description="项目 ID") + episode_number: int = Field(..., description="集数", ge=1) + title: Optional[str] = Field(None, description="标题") + content: str = Field(..., description="内容(Markdown 格式)") + status: ContentStatus = Field(default=ContentStatus.DRAFT, description="状态") + quality_score: Optional[float] = Field(None, description="质量分数") + review_issues: list = Field(default_factory=list, description="审核问题") + + # 元数据 + created_at: datetime = Field(default_factory=datetime.now, description="创建时间") + updated_at: datetime = Field(default_factory=datetime.now, description="更新时间") + created_by: str = Field(default="system", description="创建者") + + # 生成参数 + generation_params: Optional[Dict[str, Any]] = Field(None, description="生成参数") + + # 版本信息 + version: int = Field(default=1, description="版本号") + parent_version: Optional[int] = Field(None, description="父版本号") + + class Config: + json_schema_extra = { + "example": { + "id": "ep_001_001", + "project_id": "proj_001", + "episode_number": 1, + "title": "第一集:初遇", + "content": "# 第一集\n\n## 场景一\n...", + "status": "draft", + "quality_score": 85.5, + "review_issues": [] + } + } + + +class EpisodeContentCreate(BaseModel): + """创建剧集内容""" + project_id: str + episode_number: int + title: Optional[str] = None + content: str + generation_params: Optional[Dict[str, Any]] = None + + +class EpisodeContentUpdate(BaseModel): + """更新剧集内容""" + title: Optional[str] = None + content: Optional[str] = None + status: Optional[ContentStatus] = None + + +class EpisodeContentReview(BaseModel): + """审核剧集内容""" + approved: bool = Field(..., description="是否通过") + comment: Optional[str] = Field(None, description="审核意见") + + +class ExportFormat(str, Enum): + """导出格式""" + MARKDOWN = "markdown" + TXT = "txt" + PDF = "pdf" + DOCX = "docx" + + +class ExportRequest(BaseModel): + """导出请求""" + format: ExportFormat + episode_numbers: Optional[list[int]] = Field(None, description="指定集数,空表示全部") + include_metadata: bool = Field(default=False, description="是否包含元数据") diff --git a/backend/app/models/memory.py b/backend/app/models/memory.py new file mode 100644 index 0000000..40ae515 --- /dev/null +++ b/backend/app/models/memory.py @@ -0,0 +1,233 @@ +""" +记忆系统数据模型 + +Memory System - 完整的记忆系统数据结构,支持多剧集一致性管理 +""" +from pydantic import BaseModel, Field +from typing import Dict, List, Optional, Any +from datetime import datetime +from enum import Enum + + +# ============================================ +# 枚举类型 +# ============================================ + +class ImportanceLevel(str, Enum): + """重要程度""" + LOW = "low" + MEDIUM = "medium" + HIGH = "high" + + +class ThreadStatus(str, Enum): + """待收线状态""" + PENDING = "pending" + IN_PROGRESS = "跟进中" + RESOLVED = "已收线" + + +# ============================================ +# 核心数据模型 +# ============================================ + +class TimelineEvent(BaseModel): + """ + 时间线事件 + + 记录剧情中发生的关键事件,用于维护多集一致性 + """ + episode: int = Field(..., description="所在集数") + event: str = Field(..., description="事件描述") + timestamp: datetime = Field(default_factory=datetime.now, description="记录时间") + characters_involved: List[str] = Field(default_factory=list, description="涉及的角色") + importance: ImportanceLevel = Field(default=ImportanceLevel.MEDIUM, description="重要程度") + tags: List[str] = Field(default_factory=list, description="事件标签") + related_threads: List[str] = Field(default_factory=list, description="相关的待收线ID") + + +class PendingThread(BaseModel): + """ + 待收线问题(伏笔) + + 记录剧情中引入的未解决问题,用于确保后续收线 + """ + id: str = Field(..., description="待收线唯一标识") + description: str = Field(..., description="待收线描述") + introduced_at: int = Field(..., description="引入的集数") + importance: ImportanceLevel = Field(..., description="重要程度") + resolved: bool = Field(default=False, description="是否已收线") + resolved_at: Optional[int] = Field(default=None, description="收线的集数") + reminder_episode: Optional[int] = Field(default=None, description="提醒收线的集数") + status: ThreadStatus = Field(default=ThreadStatus.PENDING, description="当前状态") + characters_involved: List[str] = Field(default_factory=list, description="涉及的角色") + notes: str = Field(default="", description="备注信息") + created_at: datetime = Field(default_factory=datetime.now, description="创建时间") + updated_at: datetime = Field(default_factory=datetime.now, description="更新时间") + + +class CharacterStateChange(BaseModel): + """ + 角色状态变化 + + 记录角色在不同集数中的状态变化,保持角色发展的一致性 + """ + episode: int = Field(..., description="发生变化的集数") + state: str = Field(..., description="当前状态") + change: str = Field(..., description="状态变化描述") + emotional_impact: Optional[str] = Field(default=None, description="情感影响") + trigger_event: Optional[str] = Field(default=None, description="触发事件") + timestamp: datetime = Field(default_factory=datetime.now, description="记录时间") + + +class ForeshadowingEvent(BaseModel): + """ + 伏笔事件 + + 记录剧情中的伏笔元素,用于后续呼应 + """ + id: str = Field(..., description="伏笔唯一标识") + description: str = Field(..., description="伏笔描述") + introduced_at: int = Field(..., description="引入集数") + payoff_at: Optional[int] = Field(default=None, description="呼应集数") + type: str = Field(default="plot", description="伏笔类型: plot/character/dialogue/object") + importance: ImportanceLevel = Field(default=ImportanceLevel.MEDIUM, description="重要程度") + is_payed_off: bool = Field(default=False, description="是否已呼应") + related_events: List[str] = Field(default_factory=list, description="相关事件ID") + notes: str = Field(default="", description="备注") + + +class ConsistencyIssue(BaseModel): + """ + 一致性问题 + + 记录检测到的一致性问题 + """ + id: str = Field(..., description="问题唯一标识") + type: str = Field(..., description="问题类型: character/plot/setting/dialogue") + severity: ImportanceLevel = Field(..., description="严重程度") + description: str = Field(..., description="问题描述") + location: Optional[str] = Field(default=None, description="问题位置(场景/段落)") + conflicting_info: Dict[str, Any] = Field(default_factory=dict, description="冲突信息") + suggestion: str = Field(default="", description="修复建议") + detected_at: datetime = Field(default_factory=datetime.now, description="检测时间") + + +class ResolutionSuggestion(BaseModel): + """ + 收线建议 + + 为待收线问题提供收线建议 + """ + thread_id: str = Field(..., description="待收线ID") + suggested_episode: int = Field(..., description="建议收线集数") + approach: str = Field(..., description="收线方式描述") + key_elements: List[str] = Field(default_factory=list, description="关键要素") + character_involvement: Dict[str, str] = Field(default_factory=dict, description="角色参与方式") + potential_twists: List[str] = Field(default_factory=list, description="可能的反转") + estimated_impact: str = Field(default="", description="预估影响") + + +class EnhancedMemory(BaseModel): + """ + 增强记忆系统 + + 完整的记忆系统,包含所有记忆相关数据 + """ + # 事件时间线 + eventTimeline: List[TimelineEvent] = Field( + default_factory=list, + description="按时间顺序排列的关键事件" + ) + + # 待收线问题 + pendingThreads: List[PendingThread] = Field( + default_factory=list, + description="未解决的伏笔和待收线" + ) + + # 角色状态历史 + characterStates: Dict[str, List[CharacterStateChange]] = Field( + default_factory=dict, + description="每个角色的状态变化历史" + ) + + # 伏笔追踪 + foreshadowing: List[ForeshadowingEvent] = Field( + default_factory=list, + description="伏笔事件列表" + ) + + # 角色关系 + relationships: Dict[str, Dict[str, str]] = Field( + default_factory=dict, + description="角色关系网络" + ) + + # 一致性问题记录 + consistencyIssues: List[ConsistencyIssue] = Field( + default_factory=list, + description="检测到的一致性问题" + ) + + # 元数据 + last_updated: datetime = Field( + default_factory=datetime.now, + description="最后更新时间" + ) + last_episode_processed: int = Field( + default=0, + description="最后处理的集数" + ) + + +# ============================================ +# 请求/响应模型 +# ============================================ + +class ThreadCreateRequest(BaseModel): + """创建待收线请求""" + description: str + importance: ImportanceLevel + reminder_episode: Optional[int] = None + characters_involved: List[str] = Field(default_factory=list) + notes: str = Field(default="") + + +class ThreadUpdateRequest(BaseModel): + """更新待收线请求""" + description: Optional[str] = None + importance: Optional[ImportanceLevel] = None + status: Optional[ThreadStatus] = None + reminder_episode: Optional[int] = None + characters_involved: Optional[List[str]] = None + notes: Optional[str] = None + + +class ThreadResolveRequest(BaseModel): + """收线请求""" + thread_id: str + resolved_at: int + resolution_summary: str = Field(default="", description="收线摘要") + + +class MemoryUpdateRequest(BaseModel): + """记忆更新请求""" + episode_number: int + episode_content: str + extract_events: bool = Field(default=True, description="是否提取事件") + detect_foreshadowing: bool = Field(default=True, description="是否检测伏笔") + update_characters: bool = Field(default=True, description="是否更新角色状态") + check_consistency: bool = Field(default=True, description="是否检查一致性") + + +class MemoryExtractionResult(BaseModel): + """记忆提取结果""" + success: bool + events_extracted: int = Field(default=0, description="提取的事件数") + foreshadowing_detected: int = Field(default=0, description="检测到的伏笔数") + character_states_updated: int = Field(default=0, description="更新的角色状态数") + consistency_issues_found: int = Field(default=0, description="发现的一致性问题数") + threads_suggested: int = Field(default=0, description="建议的待收线数") + new_threads: List[PendingThread] = Field(default_factory=list, description="新识别的待收线") + message: str = Field(default="", description="结果消息") diff --git a/backend/app/models/project.py b/backend/app/models/project.py new file mode 100644 index 0000000..203c23c --- /dev/null +++ b/backend/app/models/project.py @@ -0,0 +1,193 @@ +""" +项目管理数据模型 + +Series Project - 剧集项目的完整数据结构 +""" +from pydantic import BaseModel, Field +from typing import Dict, List, Optional, Any, TYPE_CHECKING +from datetime import datetime +from enum import Enum + +# Use TYPE_CHECKING to avoid circular imports +if TYPE_CHECKING: + from app.models.review import ReviewConfig, ReviewResult + + +# ============================================ +# 枚举类型 +# ============================================ + +class TaskType(str, Enum): + """任务类型枚举""" + DIALOGUE = "dialogue" # 对话创作 + REVIEW = "review" # 内容审核 + OUTLINE = "outline" # 大纲生成 + STRUCTURE = "structure" # 结构分析 + SCENE = "scene" # 场景描写 + CONSISTENCY = "consistency" # 一致性检查 + + +# ============================================ +# 嵌套模型 +# ============================================ + +class CharacterProfile(BaseModel): + """人物小传""" + name: str + description: str = "" + personality: str = "" + speechStyle: str = "" + currentState: str = "" + relationships: Dict[str, str] = Field(default_factory=dict) + appearance: Optional[str] = None + + +class SceneSetting(BaseModel): + """场景设定""" + name: str + description: str = "" + atmosphere: str = "" + + +class GlobalContext(BaseModel): + """全局上下文""" + worldSetting: str = "" + characterProfiles: Dict[str, CharacterProfile] = Field(default_factory=dict) + sceneSettings: Dict[str, SceneSetting] = Field(default_factory=dict) + overallOutline: str = "" + styleGuide: str = "" + + +class Memory(BaseModel): + """记忆系统""" + eventTimeline: List[Dict[str, Any]] = Field(default_factory=list) + pendingThreads: List[Dict[str, Any]] = Field(default_factory=list) + foreshadowing: List[Dict[str, Any]] = Field(default_factory=list) + characterStates: Dict[str, List[Dict[str, Any]]] = Field(default_factory=dict) + + +class SkillSetting(BaseModel): + """Skill 设置(保留用于兼容)""" + enabled: bool = True + parameterOverrides: Optional[Dict[str, Any]] = None + + +class SkillAssignment(BaseModel): + """Skill 分配配置""" + skill_id: str + enabled: bool = True + priority: int = 0 + parameter_overrides: Optional[Dict[str, Any]] = None + + +class TaskSkillConfig(BaseModel): + """任务级 Skill 配置""" + task_type: TaskType + skills: List[SkillAssignment] = Field(default_factory=list) + + +class EpisodeSkillOverride(BaseModel): + """单集 Skill 覆盖配置""" + episode_number: int + task_configs: List[TaskSkillConfig] = Field(default_factory=list) + use_project_default: bool = True + + +class EpisodeIssue(BaseModel): + """剧集问题""" + type: str + description: str + severity: str # low, medium, high + suggestion: str = "" + + +# ============================================ +# 核心模型 +# ============================================ + +class SeriesProject(BaseModel): + """剧集项目模型""" + id: str = "" + name: str + type: str = "series" + agentId: str = "series-creation" + mode: str = "batch" # auto, batch, step + + # 全局上下文 + globalContext: GlobalContext = Field(default_factory=GlobalContext) + + # 记忆系统 + memory: Memory = Field(default_factory=Memory) + + # 配置 + totalEpisodes: int = 30 + + # 三级 Skills 配置 + defaultTaskSkills: List[TaskSkillConfig] = Field(default_factory=list) + episodeSkillOverrides: Dict[int, EpisodeSkillOverride] = Field(default_factory=dict) + + # 保留现有字段用于兼容 + skillSettings: Dict[str, SkillSetting] = Field(default_factory=dict) + autoRetryConfig: Optional[Dict[str, Any]] = None + + # 审核配置 + reviewConfig: Optional[Dict[str, Any]] = None + + # 时间戳 + createdAt: datetime = Field(default_factory=datetime.now) + updatedAt: datetime = Field(default_factory=datetime.now) + + +class Episode(BaseModel): + """单集模型""" + id: str = "" + projectId: str = "" + number: int = 0 + title: str = "" + status: str = "pending" # pending, writing, review, completed, needs-review + + # 内容 + structure: Optional[Dict[str, Any]] = None + outline: Optional[str] = None + content: Optional[str] = None + summary: Optional[str] = None + + # 质量信息 + qualityScore: Optional[float] = None + retryCount: int = 0 + issues: List[EpisodeIssue] = Field(default_factory=list) + + # 审核结果 + reviewResult: Optional[Dict[str, Any]] = None + + # 时间戳 + createdAt: datetime = Field(default_factory=datetime.now) + completedAt: Optional[datetime] = None + + +# ============================================ +# 请求/响应模型 +# ============================================ + +class SeriesProjectCreate(BaseModel): + """创建项目请求""" + name: str + totalEpisodes: int = 30 + agentId: str = "series-creation" + mode: str = "batch" + globalContext: GlobalContext = Field(default_factory=GlobalContext) + skillSettings: Dict[str, SkillSetting] = Field(default_factory=dict) + + +class EpisodeExecuteRequest(BaseModel): + """执行剧集请求""" + episodeNumber: int + title: str = "" + contextOverrides: Optional[Dict[str, Any]] = None + + +class EpisodeExecuteResponse(BaseModel): + """执行剧集响应""" + episode: Episode + success: bool + message: str = "" diff --git a/backend/app/models/review.py b/backend/app/models/review.py new file mode 100644 index 0000000..c9c3989 --- /dev/null +++ b/backend/app/models/review.py @@ -0,0 +1,234 @@ +""" +Review System Data Models + +审核系统数据模型 - 用于配置和执行内容审核 +""" +from pydantic import BaseModel, Field +from typing import Dict, List, Optional, Any +from datetime import datetime +from enum import Enum + + +# ============================================ +# 枚举类型 +# ============================================ + +class SeverityLevel(str, Enum): + """严重程度""" + low = "low" + medium = "medium" + high = "high" + + +class DimensionType(str, Enum): + """审核维度类型""" + consistency = "consistency" # 一致性审核 + quality = "quality" # 质量审核 + pacing = "pacing" # 节奏审核 + dialogue = "dialogue" # 对话审核 + character = "character" # 人物审核 + plot = "plot" # 剧情审核 + custom = "custom" # 自定义审核 + + +class IssueType(str, Enum): + """问题类型""" + inconsistency = "inconsistency" # 不一致 + grammar = "grammar" # 语法错误 + logic_error = "logic_error" # 逻辑错误 + pacing_issue = "pacing_issue" # 节奏问题 + character_ooc = "character_ooc" # 人物性格崩坏 + plot_hole = "plot_hole" # 剧情漏洞 + weak_dialogue = "weak_dialogue" # 对话薄弱 + description_issue = "description_issue" # 描述问题 + custom = "custom" # 自定义问题 + + +# ============================================ +# 嵌套模型 +# ============================================ + +class Location(BaseModel): + """位置信息 - 精确定位问题所在位置""" + episode: int = Field(..., description="集数") + scene: Optional[int] = Field(None, description="场景编号") + line: Optional[int] = Field(None, description="行号") + context: Optional[str] = Field(None, description="上下文片段") + + +class DimensionConfig(BaseModel): + """维度配置 - 单个审核维度的配置""" + enabled: bool = Field(True, description="是否启用该维度") + strictness: float = Field(0.7, ge=0.0, le=1.0, description="严格程度 0-1") + custom_rules: List[str] = Field(default_factory=list, description="该维度的自定义规则ID列表") + weight: float = Field(1.0, ge=0.0, le=2.0, description="权重,用于计算总分") + + +class CustomRule(BaseModel): + """自定义规则""" + id: str = Field(..., description="规则ID") + name: str = Field(..., description="规则名称") + description: str = Field("", description="规则描述") + trigger_condition: str = Field(..., description="触发条件(自然语言描述)") + dimension: DimensionType = Field(..., description="所属维度") + severity: SeverityLevel = Field(SeverityLevel.medium, description="默认严重程度") + enabled: bool = Field(True, description="是否启用") + created_at: datetime = Field(default_factory=datetime.now) + updated_at: datetime = Field(default_factory=datetime.now) + + +class ReviewIssue(BaseModel): + """审核问题 - 具体的审核发现""" + id: str = Field(..., description="问题ID") + type: IssueType = Field(..., description="问题类型") + dimension: DimensionType = Field(..., description="所属维度") + severity: SeverityLevel = Field(..., description="严重程度") + location: Location = Field(..., description="问题位置") + description: str = Field(..., description="问题描述") + suggestion: str = Field("", description="修改建议") + rule_id: Optional[str] = Field(None, description="触发的自定义规则ID(如果有)") + + +class DimensionScore(BaseModel): + """维度分数""" + dimension: DimensionType = Field(..., description="维度") + score: float = Field(..., ge=0.0, le=100.0, description="分数 0-100") + passed: bool = Field(..., description="是否通过") + issue_count: int = Field(0, ge=0, description="问题数量") + issues: List[ReviewIssue] = Field(default_factory=list, description="问题列表") + + +# ============================================ +# 核心模型 +# ============================================ + +class ReviewConfig(BaseModel): + """审核配置 - 项目的审核设置""" + enabled_review_skills: List[str] = Field( + default_factory=lambda: ["consistency_checker"], + description="启用的审核Skill ID列表" + ) + overall_strictness: float = Field(0.7, ge=0.0, le=1.0, description="整体严格程度 0-1") + dimension_settings: Dict[DimensionType, DimensionConfig] = Field( + default_factory=dict, + description="各维度配置" + ) + custom_rules: List[CustomRule] = Field(default_factory=list, description="自定义规则列表") + auto_fix_enabled: bool = Field(False, description="是否启用自动修复") + pass_threshold: float = Field(75.0, ge=0.0, le=100.0, description="通过阈值") + + class Config: + use_enum_values = False + + +class ReviewResult(BaseModel): + """审核结果 - 单集审核的完整结果""" + id: str = Field(..., description="审核结果ID") + episode_id: str = Field(..., description="剧集ID") + episode_number: int = Field(..., description="集数") + project_id: str = Field(..., description="项目ID") + + # 整体结果 + overall_score: float = Field(..., ge=0.0, le=100.0, description="总分 0-100") + passed: bool = Field(..., description="是否通过审核") + passed_dimensions: int = Field(0, ge=0, description="通过的维度数") + total_dimensions: int = Field(0, ge=0, description="总维度数") + + # 维度分数 + dimension_scores: List[DimensionScore] = Field( + default_factory=list, + description="各维度分数" + ) + + # 所有问题 + issues: List[ReviewIssue] = Field( + default_factory=list, + description="所有问题(按严重程度排序)" + ) + + # 统计 + issue_count: int = Field(0, ge=0, description="总问题数") + high_severity_count: int = Field(0, ge=0, description="高严重度问题数") + medium_severity_count: int = Field(0, ge=0, description="中严重度问题数") + low_severity_count: int = Field(0, ge=0, description="低严重度问题数") + + # 元数据 + reviewed_at: datetime = Field(default_factory=datetime.now, description="审核时间") + review_duration: Optional[float] = Field(None, description="审核耗时(秒)") + reviewer_version: str = Field("1.0.0", description="审核器版本") + + def calculate_statistics(self): + """计算统计信息""" + self.issue_count = len(self.issues) + self.high_severity_count = sum(1 for i in self.issues if i.severity == SeverityLevel.high) + self.medium_severity_count = sum(1 for i in self.issues if i.severity == SeverityLevel.medium) + self.low_severity_count = sum(1 for i in self.issues if i.severity == SeverityLevel.low) + + self.passed_dimensions = sum(1 for ds in self.dimension_scores if ds.passed) + self.total_dimensions = len(self.dimension_scores) + + +# ============================================ +# 请求/响应模型 +# ============================================ + +class ReviewConfigUpdate(BaseModel): + """更新审核配置请求""" + enabled_review_skills: Optional[List[str]] = None + overall_strictness: Optional[float] = Field(None, ge=0.0, le=1.0) + dimension_settings: Optional[Dict[DimensionType, DimensionConfig]] = None + custom_rules: Optional[List[CustomRule]] = None + auto_fix_enabled: Optional[bool] = None + pass_threshold: Optional[float] = Field(None, ge=0.0, le=100.0) + + +class ReviewRequest(BaseModel): + """执行审核请求""" + episode_number: int = Field(..., ge=1, description="集数") + dimensions: Optional[List[DimensionType]] = Field(None, description="指定要审核的维度(可选)") + force_rereview: bool = Field(False, description="是否强制重新审核") + + +class ReviewResponse(BaseModel): + """审核响应""" + success: bool + review_result: Optional[ReviewResult] = None + message: str = "" + episode_id: Optional[str] = None + + +class DimensionInfo(BaseModel): + """维度信息 - 用于返回可用维度""" + id: DimensionType = Field(..., description="维度ID") + name: str = Field(..., description="维度名称") + description: str = Field(..., description="维度描述") + default_strictness: float = Field(0.7, ge=0.0, le=1.0, description="默认严格程度") + supported_skill_ids: List[str] = Field(default_factory=list, description="支持该维度的Skill ID列表") + + +class CustomRuleCreate(BaseModel): + """创建自定义规则请求""" + name: str = Field(..., description="规则名称") + description: str = Field("", description="规则描述") + trigger_condition: str = Field(..., description="触发条件") + dimension: DimensionType = Field(..., description="所属维度") + severity: SeverityLevel = Field(SeverityLevel.medium, description="严重程度") + + +class CustomRuleUpdate(BaseModel): + """更新自定义规则请求""" + name: Optional[str] = None + description: Optional[str] = None + trigger_condition: Optional[str] = None + dimension: Optional[DimensionType] = None + severity: Optional[SeverityLevel] = None + enabled: Optional[bool] = None + + +class ReviewPreset(BaseModel): + """审核预设 - 预设的审核配置""" + id: str = Field(..., description="预设ID") + name: str = Field(..., description="预设名称") + description: str = Field(..., description="预设描述") + config: ReviewConfig = Field(..., description="审核配置") + category: str = Field("general", description="预设分类") diff --git a/backend/app/models/skill.py b/backend/app/models/skill.py new file mode 100644 index 0000000..0684f93 --- /dev/null +++ b/backend/app/models/skill.py @@ -0,0 +1,120 @@ +""" +Skill 数据模型 + +Skill 是平台的核心概念:可配置的行为指导单元 +""" +from pydantic import BaseModel, Field +from typing import Optional, Dict, Any, List +from datetime import datetime + + +class SkillTool(BaseModel): + """Skill 工具定义(仅内置 Skill 支持)""" + name: str + description: str + script: str # 脚本文件名 + parameters: Dict[str, Dict[str, Any]] = Field(default_factory=dict) + + +class SkillConfig(BaseModel): + """Skill 配置""" + # 风格预设 + preset: Optional[str] = None + + # 可调节参数 + parameters: Dict[str, Dict[str, Any]] = Field(default_factory=dict) + + # 权重配置 + weights: Optional[Dict[str, float]] = None + + +class Skill(BaseModel): + """Skill 模型""" + id: str + name: str + version: str = "1.0.0" + author: str = "未知" + + # 类型:builtin 或 user + type: str = "user" + + # 核心内容 - 行为指导 + behavior_guide: str = "" # 直接影响 Agent 行为 + + # 配置 + config: SkillConfig = Field(default_factory=SkillConfig) + + # 提示词模板(可选) + template: Optional[str] = None + + # 工具定义(仅内置 Skill) + tools: Optional[List[SkillTool]] = None + + # 元数据 + category: str = "通用" + tags: List[str] = Field(default_factory=list) + visibility: str = "private" # private 或 public + + # 文件路径(用于内部追踪) + file_path: Optional[str] = None + + # 时间戳 + created_at: Optional[datetime] = None + updated_at: Optional[datetime] = None + + +class SkillCreate(BaseModel): + """创建 Skill 请求""" + id: str + name: str + content: str # Markdown 格式的完整 Skill 内容 + category: str = "通用" + tags: List[str] = Field(default_factory=list) + + +class SkillUpdate(BaseModel): + """更新 Skill 请求""" + name: Optional[str] = None + content: Optional[str] = None + category: Optional[str] = None + tags: Optional[List[str]] = None + + +class SkillConfigUpdate(BaseModel): + """更新 Skill 配置请求""" + preset: Optional[str] = None + parameters: Optional[Dict[str, Dict[str, Any]]] = None + weights: Optional[Dict[str, float]] = None + + +class SkillTestRequest(BaseModel): + """测试 Skill 请求""" + test_input: str + context: Optional[Dict[str, Any]] = None + temperature: float = 0.7 + + +class SkillTestResponse(BaseModel): + """测试 Skill 响应""" + skill_id: str + skill_name: str + response: str + usage: Optional[Dict[str, int]] = None + + +class SkillGenerateRequest(BaseModel): + """AI 生成 Skill 请求""" + description: str = Field(..., description="用户对想要创建的 Skill 的描述") + category: Optional[str] = None + tags: Optional[List[str]] = None + temperature: float = Field(0.7, ge=0, le=1) + + +class SkillGenerateResponse(BaseModel): + """AI 生成 Skill 响应""" + suggested_id: str + suggested_name: str + skill_content: str + category: str + suggested_tags: List[str] + explanation: str # AI 对生成的 Skill 的说明 diff --git a/backend/app/models/skill_config.py b/backend/app/models/skill_config.py new file mode 100644 index 0000000..05d1e53 --- /dev/null +++ b/backend/app/models/skill_config.py @@ -0,0 +1,25 @@ +""" +Skills 配置请求/响应模型 +""" +from pydantic import BaseModel +from typing import List, Dict, Optional, Any +from app.models.project import TaskType, SkillAssignment, TaskSkillConfig, EpisodeSkillOverride + + +class ProjectSkillConfigUpdate(BaseModel): + """项目 Skills 配置更新""" + defaultTaskSkills: List[TaskSkillConfig] + episodeSkillOverrides: Dict[int, EpisodeSkillOverride] + + +class SkillConfigResponse(BaseModel): + """Skills 配置响应""" + defaultTaskSkills: List[TaskSkillConfig] + episodeSkillOverrides: Dict[int, EpisodeSkillOverride] + + +class EpisodeSkillConfigUpdate(BaseModel): + """单集 Skills 配置更新""" + episode_number: int + task_configs: List[TaskSkillConfig] + use_project_default: bool = True diff --git a/backend/app/models/skill_integration.py b/backend/app/models/skill_integration.py new file mode 100644 index 0000000..c7eccab --- /dev/null +++ b/backend/app/models/skill_integration.py @@ -0,0 +1,315 @@ +""" +完整的数据模型和 API,实现 Skills 与 LLM 的深度结合 +""" +from pydantic import BaseModel, Field +from typing import List, Optional, Dict, Any, Union +from datetime import datetime +from enum import Enum + + +# ============================================================================ +# Skill 与 LLM 结合的核心模型 +# ============================================================================ + +class SkillTriggerType(str, Enum): + """Skill 触发类型""" + # LLM 根据 description 自动选择 + AUTO = "auto" + # 用户/Agent 显式指定 + EXPLICIT = "explicit" + # Agent 工作流固定使用 + FIXED = "fixed" + + +class SkillMetadata(BaseModel): + """Skill 元数据(用于触发和选择)""" + name: str + # 关键:description 用于 LLM 理解何时使用此 Skill + description: str # 应包含:功能、触发条件、使用场景 + category: str + tags: List[str] + trigger_type: SkillTriggerType + + # 可选的触发关键词(用于精确匹配) + keywords: List[str] = Field(default_factory=list) + + # 适用场景(用于 AI 匹配) + use_cases: List[str] = Field(default_factory=list) + + # 依赖的其他 Skills + dependencies: List[str] = Field(default_factory=list) + + # 权重(用于多 Skill 选择时的排序) + priority: float = 1.0 + + +class SkillReferenceFile(BaseModel): + """参考文件定义""" + filename: str + filetype: str # md, txt, json, yaml, py, sh + content: str + description: Optional[str] = None + size_bytes: int + + +class SkillScript(BaseModel): + """脚本定义""" + filename: str + language: str # python, bash, javascript + code: str + description: str + parameters: Dict[str, Any] = Field(default_factory=dict) + + +class SkillAsset(BaseModel): + """资产定义(模板、图标等)""" + filename: str + filetype: str + content: str # base64 或文本 + description: str + + +class SkillStructure(BaseModel): + """完整的 Skill 结构(符合 skill-creator 标准)""" + # SKILL.md + frontmatter: Dict[str, Any] # YAML frontmatter: name, description + behavior_guide: str # Markdown 内容 + + # 可选资源 + scripts: List[SkillScript] = Field(default_factory=list) + references: List[SkillReferenceFile] = Field(default_factory=list) + assets: List[SkillAsset] = Field(default_factory=list) + + +# ============================================================================ +# AI 生成 Skill 的完整流程 +# ============================================================================ + +class SkillGenerationRequest(BaseModel): + """ + AI 生成 Skill 请求 + + 按照 skill-creator 的完整流程: + Step 1: Understanding - 理解需求 + Step 2: Planning - 规划资源 + Step 3: Initialize - 生成结构 + Step 4: Edit - 用户调整 + Step 5: Package - 打包 + """ + # Step 1: 用户意图 + user_intent: str = Field( + ..., + description="用户想要创建的 Skill 的意图描述" + ) + use_cases: List[str] = Field( + default_factory=list, + description="具体用例示例,如 ['用户说:帮我写一段古风对话', '用户说:审核这段内容的一致性']" + ) + + # 可选约束 + preferred_category: Optional[str] = None + preferred_tags: Optional[List[str]] = None + trigger_type: SkillTriggerType = SkillTriggerType.AUTO + + # 生成选项 + include_scripts: bool = False # 是否生成脚本 + include_references: bool = False # 是否生成参考文档模板 + include_assets: bool = False # 是否生成资产模板 + temperature: float = Field(0.7, ge=0, le=1) + + +class SkillAnalysis(BaseModel): + """Step 1: 需求分析结果""" + skill_type: str # 如 "对话创作", "内容审核", "文件处理" + core_functionality: str + intended_users: List[str] + key_features: List[str] + suggested_complexity: str # "simple", "medium", "complex" + + +class SkillPlanning(BaseModel): + """Step 2: 资源规划结果""" + needs_scripts: bool + suggested_scripts: List[Dict[str, str]] # [{filename, purpose}] + needs_references: bool + suggested_references: List[Dict[str, str]] + needs_assets: bool + suggested_assets: List[Dict[str, str]] + + # Degree of freedom 建议 + suggested_freedom: str # "high", "medium", "low" + + +class SkillGenerationResponse(BaseModel): + """ + AI 生成 Skill 的完整响应 + + 包含完整的 skill-creator 流程结果 + """ + # Step 1: 分析结果 + analysis: SkillAnalysis + + # Step 2: 规划结果 + planning: SkillPlanning + + # Step 3: 完整的 Skill 结构 + skill_structure: SkillStructure + + # 元数据建议 + suggested_metadata: SkillMetadata + + # 实现建议 + implementation_steps: List[str] + best_practices: List[str] + + # 验证结果 + validation_passed: bool + validation_warnings: List[str] = Field(default_factory=list) + validation_errors: List[str] = Field(default_factory=list) + + +# ============================================================================ +# Skill 调整/优化请求 +# ============================================================================ + +class SkillRefinementRequest(BaseModel): + """对已生成的 Skill 进行调整""" + skill_id: str + current_content: str + + # 调整需求 + refinement_type: str # "optimize", "extend", "fix", "adapt" + refinement_description: str + + # 上下文 + user_feedback: Optional[str] = None + use_case_update: Optional[str] = None + + temperature: float = Field(0.7, ge=0, le=1) + + +class SkillRefinementResponse(BaseModel): + """调整后的 Skill""" + updated_structure: SkillStructure + changes_made: List[str] + improvement_suggestions: List[str] + + +# ============================================================================ +# Skill 选择和路由(LLM 与 Skills 结合) +# ============================================================================ + +class SkillSelectionCriteria(BaseModel): + """Skill 选择条件""" + # 用户意图 + user_intent: str + + # 上下文信息 + context: Dict[str, Any] = Field(default_factory=dict) + + # 约束条件 + required_category: Optional[str] = None + required_tags: Optional[List[str]] = None + exclude_skills: List[str] = Field(default_factory=list) + + # 选择模式 + selection_mode: str = "auto" # "auto", "fixed", "hybrid" + + +class SkillSelectionResult(BaseModel): + """Skill 选择结果""" + selected_skills: List[SkillMetadata] + selection_reason: str + confidence_scores: Dict[str, float] + alternative_skills: List[SkillMetadata] = Field(default_factory=list) + + +class SkillExecutionContext(BaseModel): + """Skill 执行上下文""" + skill_id: str + skill_metadata: SkillMetadata + execution_mode: str # "integrated", "standalone", "chained" + + # 上下文数据 + user_input: str + context_data: Dict[str, Any] = Field(default_factory=dict) + + # 参考文件(已加载) + loaded_references: Dict[str, str] = Field(default_factory=dict) + + # 执行参数 + temperature: float = 0.7 + max_tokens: Optional[int] = None + + +class SkillExecutionResponse(BaseModel): + """Skill 执行结果""" + skill_id: str + execution_success: bool + result: str + + # 使用的信息 + references_used: List[str] + scripts_executed: List[str] + + # 反馈信息 + execution_time_ms: int + tokens_used: Dict[str, int] + + # 建议 + improvement_suggestions: List[str] = Field(default_factory=list) + + +# ============================================================================ +# Agent 工作流中的 Skill 配置 +# ============================================================================ + +class AgentWorkflowStep(BaseModel): + """Agent 工作流步骤""" + step_name: str + step_number: int + + # Skill 配置 + skill_id: Optional[str] = None # None 表示不使用 Skill + skill_mode: str = "required" # "required", "optional", "conditional" + + # 条件(用于 conditional 模式) + condition: Optional[str] = None + + # 回退行为 + fallback_skill_id: Optional[str] = None + fallback_behavior: Optional[str] = None + + +class AgentWorkflowConfig(BaseModel): + """Agent 工作流配置(定义哪些步骤使用哪些 Skills)""" + agent_id: str + agent_name: str + + # 工作流步骤 + steps: List[AgentWorkflowStep] + + # 全局配置 + default_temperature: float = 0.7 + allow_skill_substitution: bool = False + require_all_skills: bool = True + + +# ============================================================================ +# 批量 Skill 操作 +# ============================================================================ + +class BatchSkillOperation(BaseModel): + """批量 Skill 操作""" + operation: str # "validate", "package", "deploy" + skill_ids: List[str] + options: Dict[str, Any] = Field(default_factory=dict) + + +class BatchSkillOperationResponse(BaseModel): + """批量操作结果""" + operation: str + total_skills: int + successful_skills: List[str] + failed_skills: List[Dict[str, str]] + summary: Dict[str, Any] diff --git a/backend/app/schemas/__init__.py b/backend/app/schemas/__init__.py new file mode 100644 index 0000000..fff1fac --- /dev/null +++ b/backend/app/schemas/__init__.py @@ -0,0 +1 @@ +# Schemas module diff --git a/backend/app/utils/__init__.py b/backend/app/utils/__init__.py new file mode 100644 index 0000000..feddb93 --- /dev/null +++ b/backend/app/utils/__init__.py @@ -0,0 +1 @@ +# Utils module diff --git a/backend/app/utils/logger.py b/backend/app/utils/logger.py new file mode 100644 index 0000000..f5254a6 --- /dev/null +++ b/backend/app/utils/logger.py @@ -0,0 +1,63 @@ +""" +Creative Studio 日志配置 +""" +import logging +import sys +from pathlib import Path +from logging.handlers import RotatingFileHandler +from app.config import settings + + +def setup_logging(): + """配置应用日志""" + + # 创建日志目录 + log_path = Path(settings.log_file) + log_path.parent.mkdir(parents=True, exist_ok=True) + + # 配置根日志记录器 + root_logger = logging.getLogger() + root_logger.setLevel(getattr(logging, settings.log_level.upper())) + + # 清除现有处理器 + root_logger.handlers.clear() + + # 日志格式 + formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s', + datefmt='%Y-%m-%d %H:%M:%S' + ) + + # 控制台处理器 + console_handler = logging.StreamHandler(sys.stdout) + console_handler.setLevel(logging.INFO) + console_handler.setFormatter(formatter) + root_logger.addHandler(console_handler) + + # 文件处理器(带轮转) + file_handler = RotatingFileHandler( + settings.log_file, + maxBytes=10 * 1024 * 1024, # 10 MB + backupCount=5, + encoding='utf-8' + ) + file_handler.setLevel(getattr(logging, settings.log_level.upper())) + file_handler.setFormatter(formatter) + root_logger.addHandler(file_handler) + + +def get_logger(name: str) -> logging.Logger: + """ + 获取日志记录器 + + Args: + name: 日志记录器名称(通常使用 __name__) + + Returns: + 配置好的日志记录器 + """ + return logging.getLogger(name) + + +# 初始化日志 +setup_logging() diff --git a/backend/requirements.txt b/backend/requirements.txt new file mode 100644 index 0000000..f3517a6 --- /dev/null +++ b/backend/requirements.txt @@ -0,0 +1,47 @@ +# Creative Studio Backend Dependencies +# Python 3.11+ + +# Web Framework +fastapi==0.104.1 +uvicorn[standard]==0.24.0 + +# GLM API(官方 SDK 安装) +zai-sdk>=0.2.0 + +# Database + +# Database +motor==3.3.2 +pymongo==4.6.0 + +# Redis +redis==5.0.1 + +# Data Validation +pydantic>=2.7.0 +pydantic-settings + +# Authentication +python-jose[cryptography]==3.3.0 +passlib[bcrypt]==1.7.4 + +# Async Tasks +celery==5.3.4 +kombu==5.3.4 + +# HTTP Client +httpx==0.25.2 +aiohttp==3.9.1 + +# Utilities +python-dotenv==1.0.0 + +# Vector Database +chromadb==0.4.18 + +# Development +pytest==7.4.3 +pytest-asyncio==0.21.1 +black==23.12.0 +flake8==6.1.0 +mypy==1.7.1 diff --git a/backend/setup.py b/backend/setup.py new file mode 100644 index 0000000..6117683 --- /dev/null +++ b/backend/setup.py @@ -0,0 +1,41 @@ +""" +Creative Studio Backend - 安装配置 +""" +from setuptools import setup, find_packages + +setup( + name="creative-studio-backend", + version="1.0.0", + description="基于 Agent + Skill 架构的 AI 剧集创作平台后端", + author="Creative Studio Team", + packages=find_packages(), + python_requires=">=3.11", + install_requires=[ + "fastapi>=0.104.1", + "uvicorn[standard]>=0.24.0", + "python-multipart>=0.0.6", + "zai-sdk>=0.2.0", + "motor>=3.3.2", + "pymongo>=4.6.0", + "redis>=5.0.1", + "pydantic>=2.5.0", + "pydantic-settings>=2.1.0", + "python-jose[cryptography]>=3.3.0", + "passlib[bcrypt]>=1.7.4", + "celery>=5.3.4", + "httpx>=0.25.2", + "aiohttp>=3.9.1", + "python-dotenv>=1.0.0", + "pyyaml>=6.0.1", + "chromadb>=0.4.18", + ], + extras_require={ + "dev": [ + "pytest>=7.4.3", + "pytest-asyncio>=0.21.1", + "black>=23.12.0", + "flake8>=6.1.0", + "mypy>=1.7.1", + ] + }, +) diff --git a/backend/skills_storage/builtin_skills/skill-creator/SKILL.md b/backend/skills_storage/builtin_skills/skill-creator/SKILL.md new file mode 100644 index 0000000..b7f8659 --- /dev/null +++ b/backend/skills_storage/builtin_skills/skill-creator/SKILL.md @@ -0,0 +1,356 @@ +--- +name: skill-creator +description: Guide for creating effective skills. This skill should be used when users want to create a new skill (or update an existing skill) that extends Claude's capabilities with specialized knowledge, workflows, or tool integrations. +license: Complete terms in LICENSE.txt +--- + +# Skill Creator + +This skill provides guidance for creating effective skills. + +## About Skills + +Skills are modular, self-contained packages that extend Claude's capabilities by providing +specialized knowledge, workflows, and tools. Think of them as "onboarding guides" for specific +domains or tasks—they transform Claude from a general-purpose agent into a specialized agent +equipped with procedural knowledge that no model can fully possess. + +### What Skills Provide + +1. Specialized workflows - Multi-step procedures for specific domains +2. Tool integrations - Instructions for working with specific file formats or APIs +3. Domain expertise - Company-specific knowledge, schemas, business logic +4. Bundled resources - Scripts, references, and assets for complex and repetitive tasks + +## Core Principles + +### Concise is Key + +The context window is a public good. Skills share the context window with everything else Claude needs: system prompt, conversation history, other Skills' metadata, and the actual user request. + +**Default assumption: Claude is already very smart.** Only add context Claude doesn't already have. Challenge each piece of information: "Does Claude really need this explanation?" and "Does this paragraph justify its token cost?" + +Prefer concise examples over verbose explanations. + +### Set Appropriate Degrees of Freedom + +Match the level of specificity to the task's fragility and variability: + +**High freedom (text-based instructions)**: Use when multiple approaches are valid, decisions depend on context, or heuristics guide the approach. + +**Medium freedom (pseudocode or scripts with parameters)**: Use when a preferred pattern exists, some variation is acceptable, or configuration affects behavior. + +**Low freedom (specific scripts, few parameters)**: Use when operations are fragile and error-prone, consistency is critical, or a specific sequence must be followed. + +Think of Claude as exploring a path: a narrow bridge with cliffs needs specific guardrails (low freedom), while an open field allows many routes (high freedom). + +### Anatomy of a Skill + +Every skill consists of a required SKILL.md file and optional bundled resources: + +``` +skill-name/ +├── SKILL.md (required) +│ ├── YAML frontmatter metadata (required) +│ │ ├── name: (required) +│ │ └── description: (required) +│ └── Markdown instructions (required) +└── Bundled Resources (optional) + ├── scripts/ - Executable code (Python/Bash/etc.) + ├── references/ - Documentation intended to be loaded into context as needed + └── assets/ - Files used in output (templates, icons, fonts, etc.) +``` + +#### SKILL.md (required) + +Every SKILL.md consists of: + +- **Frontmatter** (YAML): Contains `name` and `description` fields. These are the only fields that Claude reads to determine when the skill gets used, thus it is very important to be clear and comprehensive in describing what the skill is, and when it should be used. +- **Body** (Markdown): Instructions and guidance for using the skill. Only loaded AFTER the skill triggers (if at all). + +#### Bundled Resources (optional) + +##### Scripts (`scripts/`) + +Executable code (Python/Bash/etc.) for tasks that require deterministic reliability or are repeatedly rewritten. + +- **When to include**: When the same code is being rewritten repeatedly or deterministic reliability is needed +- **Example**: `scripts/rotate_pdf.py` for PDF rotation tasks +- **Benefits**: Token efficient, deterministic, may be executed without loading into context +- **Note**: Scripts may still need to be read by Claude for patching or environment-specific adjustments + +##### References (`references/`) + +Documentation and reference material intended to be loaded as needed into context to inform Claude's process and thinking. + +- **When to include**: For documentation that Claude should reference while working +- **Examples**: `references/finance.md` for financial schemas, `references/mnda.md` for company NDA template, `references/policies.md` for company policies, `references/api_docs.md` for API specifications +- **Use cases**: Database schemas, API documentation, domain knowledge, company policies, detailed workflow guides +- **Benefits**: Keeps SKILL.md lean, loaded only when Claude determines it's needed +- **Best practice**: If files are large (>10k words), include grep search patterns in SKILL.md +- **Avoid duplication**: Information should live in either SKILL.md or references files, not both. Prefer references files for detailed information unless it's truly core to the skill—this keeps SKILL.md lean while making information discoverable without hogging the context window. Keep only essential procedural instructions and workflow guidance in SKILL.md; move detailed reference material, schemas, and examples to references files. + +##### Assets (`assets/`) + +Files not intended to be loaded into context, but rather used within the output Claude produces. + +- **When to include**: When the skill needs files that will be used in the final output +- **Examples**: `assets/logo.png` for brand assets, `assets/slides.pptx` for PowerPoint templates, `assets/frontend-template/` for HTML/React boilerplate, `assets/font.ttf` for typography +- **Use cases**: Templates, images, icons, boilerplate code, fonts, sample documents that get copied or modified +- **Benefits**: Separates output resources from documentation, enables Claude to use files without loading them into context + +#### What to Not Include in a Skill + +A skill should only contain essential files that directly support its functionality. Do NOT create extraneous documentation or auxiliary files, including: + +- README.md +- INSTALLATION_GUIDE.md +- QUICK_REFERENCE.md +- CHANGELOG.md +- etc. + +The skill should only contain the information needed for an AI agent to do the job at hand. It should not contain auxilary context about the process that went into creating it, setup and testing procedures, user-facing documentation, etc. Creating additional documentation files just adds clutter and confusion. + +### Progressive Disclosure Design Principle + +Skills use a three-level loading system to manage context efficiently: + +1. **Metadata (name + description)** - Always in context (~100 words) +2. **SKILL.md body** - When skill triggers (<5k words) +3. **Bundled resources** - As needed by Claude (Unlimited because scripts can be executed without reading into context window) + +#### Progressive Disclosure Patterns + +Keep SKILL.md body to the essentials and under 500 lines to minimize context bloat. Split content into separate files when approaching this limit. When splitting out content into other files, it is very important to reference them from SKILL.md and describe clearly when to read them, to ensure the reader of the skill knows they exist and when to use them. + +**Key principle:** When a skill supports multiple variations, frameworks, or options, keep only the core workflow and selection guidance in SKILL.md. Move variant-specific details (patterns, examples, configuration) into separate reference files. + +**Pattern 1: High-level guide with references** + +```markdown +# PDF Processing + +## Quick start + +Extract text with pdfplumber: +[code example] + +## Advanced features + +- **Form filling**: See [FORMS.md](FORMS.md) for complete guide +- **API reference**: See [REFERENCE.md](REFERENCE.md) for all methods +- **Examples**: See [EXAMPLES.md](EXAMPLES.md) for common patterns +``` + +Claude loads FORMS.md, REFERENCE.md, or EXAMPLES.md only when needed. + +**Pattern 2: Domain-specific organization** + +For Skills with multiple domains, organize content by domain to avoid loading irrelevant context: + +``` +bigquery-skill/ +├── SKILL.md (overview and navigation) +└── reference/ + ├── finance.md (revenue, billing metrics) + ├── sales.md (opportunities, pipeline) + ├── product.md (API usage, features) + └── marketing.md (campaigns, attribution) +``` + +When a user asks about sales metrics, Claude only reads sales.md. + +Similarly, for skills supporting multiple frameworks or variants, organize by variant: + +``` +cloud-deploy/ +├── SKILL.md (workflow + provider selection) +└── references/ + ├── aws.md (AWS deployment patterns) + ├── gcp.md (GCP deployment patterns) + └── azure.md (Azure deployment patterns) +``` + +When the user chooses AWS, Claude only reads aws.md. + +**Pattern 3: Conditional details** + +Show basic content, link to advanced content: + +```markdown +# DOCX Processing + +## Creating documents + +Use docx-js for new documents. See [DOCX-JS.md](DOCX-JS.md). + +## Editing documents + +For simple edits, modify the XML directly. + +**For tracked changes**: See [REDLINING.md](REDLINING.md) +**For OOXML details**: See [OOXML.md](OOXML.md) +``` + +Claude reads REDLINING.md or OOXML.md only when the user needs those features. + +**Important guidelines:** + +- **Avoid deeply nested references** - Keep references one level deep from SKILL.md. All reference files should link directly from SKILL.md. +- **Structure longer reference files** - For files longer than 100 lines, include a table of contents at the top so Claude can see the full scope when previewing. + +## Skill Creation Process + +Skill creation involves these steps: + +1. Understand the skill with concrete examples +2. Plan reusable skill contents (scripts, references, assets) +3. Initialize the skill (run init_skill.py) +4. Edit the skill (implement resources and write SKILL.md) +5. Package the skill (run package_skill.py) +6. Iterate based on real usage + +Follow these steps in order, skipping only if there is a clear reason why they are not applicable. + +### Step 1: Understanding the Skill with Concrete Examples + +Skip this step only when the skill's usage patterns are already clearly understood. It remains valuable even when working with an existing skill. + +To create an effective skill, clearly understand concrete examples of how the skill will be used. This understanding can come from either direct user examples or generated examples that are validated with user feedback. + +For example, when building an image-editor skill, relevant questions include: + +- "What functionality should the image-editor skill support? Editing, rotating, anything else?" +- "Can you give some examples of how this skill would be used?" +- "I can imagine users asking for things like 'Remove the red-eye from this image' or 'Rotate this image'. Are there other ways you imagine this skill being used?" +- "What would a user say that should trigger this skill?" + +To avoid overwhelming users, avoid asking too many questions in a single message. Start with the most important questions and follow up as needed for better effectiveness. + +Conclude this step when there is a clear sense of the functionality the skill should support. + +### Step 2: Planning the Reusable Skill Contents + +To turn concrete examples into an effective skill, analyze each example by: + +1. Considering how to execute on the example from scratch +2. Identifying what scripts, references, and assets would be helpful when executing these workflows repeatedly + +Example: When building a `pdf-editor` skill to handle queries like "Help me rotate this PDF," the analysis shows: + +1. Rotating a PDF requires re-writing the same code each time +2. A `scripts/rotate_pdf.py` script would be helpful to store in the skill + +Example: When designing a `frontend-webapp-builder` skill for queries like "Build me a todo app" or "Build me a dashboard to track my steps," the analysis shows: + +1. Writing a frontend webapp requires the same boilerplate HTML/React each time +2. An `assets/hello-world/` template containing the boilerplate HTML/React project files would be helpful to store in the skill + +Example: When building a `big-query` skill to handle queries like "How many users have logged in today?" the analysis shows: + +1. Querying BigQuery requires re-discovering the table schemas and relationships each time +2. A `references/schema.md` file documenting the table schemas would be helpful to store in the skill + +To establish the skill's contents, analyze each concrete example to create a list of the reusable resources to include: scripts, references, and assets. + +### Step 3: Initializing the Skill + +At this point, it is time to actually create the skill. + +Skip this step only if the skill being developed already exists, and iteration or packaging is needed. In this case, continue to the next step. + +When creating a new skill from scratch, always run the `init_skill.py` script. The script conveniently generates a new template skill directory that automatically includes everything a skill requires, making the skill creation process much more efficient and reliable. + +Usage: + +```bash +scripts/init_skill.py <skill-name> --path <output-directory> +``` + +The script: + +- Creates the skill directory at the specified path +- Generates a SKILL.md template with proper frontmatter and TODO placeholders +- Creates example resource directories: `scripts/`, `references/`, and `assets/` +- Adds example files in each directory that can be customized or deleted + +After initialization, customize or remove the generated SKILL.md and example files as needed. + +### Step 4: Edit the Skill + +When editing the (newly-generated or existing) skill, remember that the skill is being created for another instance of Claude to use. Include information that would be beneficial and non-obvious to Claude. Consider what procedural knowledge, domain-specific details, or reusable assets would help another Claude instance execute these tasks more effectively. + +#### Learn Proven Design Patterns + +Consult these helpful guides based on your skill's needs: + +- **Multi-step processes**: See references/workflows.md for sequential workflows and conditional logic +- **Specific output formats or quality standards**: See references/output-patterns.md for template and example patterns + +These files contain established best practices for effective skill design. + +#### Start with Reusable Skill Contents + +To begin implementation, start with the reusable resources identified above: `scripts/`, `references/`, and `assets/` files. Note that this step may require user input. For example, when implementing a `brand-guidelines` skill, the user may need to provide brand assets or templates to store in `assets/`, or documentation to store in `references/`. + +Added scripts must be tested by actually running them to ensure there are no bugs and that the output matches what is expected. If there are many similar scripts, only a representative sample needs to be tested to ensure confidence that they all work while balancing time to completion. + +Any example files and directories not needed for the skill should be deleted. The initialization script creates example files in `scripts/`, `references/`, and `assets/` to demonstrate structure, but most skills won't need all of them. + +#### Update SKILL.md + +**Writing Guidelines:** Always use imperative/infinitive form. + +##### Frontmatter + +Write the YAML frontmatter with `name` and `description`: + +- `name`: The skill name +- `description`: This is the primary triggering mechanism for your skill, and helps Claude understand when to use the skill. + - Include both what the Skill does and specific triggers/contexts for when to use it. + - Include all "when to use" information here - Not in the body. The body is only loaded after triggering, so "When to Use This Skill" sections in the body are not helpful to Claude. + - Example description for a `docx` skill: "Comprehensive document creation, editing, and analysis with support for tracked changes, comments, formatting preservation, and text extraction. Use when Claude needs to work with professional documents (.docx files) for: (1) Creating new documents, (2) Modifying or editing content, (3) Working with tracked changes, (4) Adding comments, or any other document tasks" + +Do not include any other fields in YAML frontmatter. + +##### Body + +Write instructions for using the skill and its bundled resources. + +### Step 5: Packaging a Skill + +Once development of the skill is complete, it must be packaged into a distributable .skill file that gets shared with the user. The packaging process automatically validates the skill first to ensure it meets all requirements: + +```bash +scripts/package_skill.py <path/to/skill-folder> +``` + +Optional output directory specification: + +```bash +scripts/package_skill.py <path/to/skill-folder> ./dist +``` + +The packaging script will: + +1. **Validate** the skill automatically, checking: + + - YAML frontmatter format and required fields + - Skill naming conventions and directory structure + - Description completeness and quality + - File organization and resource references + +2. **Package** the skill if validation passes, creating a .skill file named after the skill (e.g., `my-skill.skill`) that includes all files and maintains the proper directory structure for distribution. The .skill file is a zip file with a .skill extension. + +If validation fails, the script will report the errors and exit without creating a package. Fix any validation errors and run the packaging command again. + +### Step 6: Iterate + +After testing the skill, users may request improvements. Often this happens right after using the skill, with fresh context of how the skill performed. + +**Iteration workflow:** + +1. Use the skill on real tasks +2. Notice struggles or inefficiencies +3. Identify how SKILL.md or bundled resources should be updated +4. Implement changes and test again diff --git a/backend/skills_storage/builtin_skills/skill-creator/references/output-patterns.md b/backend/skills_storage/builtin_skills/skill-creator/references/output-patterns.md new file mode 100644 index 0000000..073ddda --- /dev/null +++ b/backend/skills_storage/builtin_skills/skill-creator/references/output-patterns.md @@ -0,0 +1,82 @@ +# Output Patterns + +Use these patterns when skills need to produce consistent, high-quality output. + +## Template Pattern + +Provide templates for output format. Match the level of strictness to your needs. + +**For strict requirements (like API responses or data formats):** + +```markdown +## Report structure + +ALWAYS use this exact template structure: + +# [Analysis Title] + +## Executive summary +[One-paragraph overview of key findings] + +## Key findings +- Finding 1 with supporting data +- Finding 2 with supporting data +- Finding 3 with supporting data + +## Recommendations +1. Specific actionable recommendation +2. Specific actionable recommendation +``` + +**For flexible guidance (when adaptation is useful):** + +```markdown +## Report structure + +Here is a sensible default format, but use your best judgment: + +# [Analysis Title] + +## Executive summary +[Overview] + +## Key findings +[Adapt sections based on what you discover] + +## Recommendations +[Tailor to the specific context] + +Adjust sections as needed for the specific analysis type. +``` + +## Examples Pattern + +For skills where output quality depends on seeing examples, provide input/output pairs: + +```markdown +## Commit message format + +Generate commit messages following these examples: + +**Example 1:** +Input: Added user authentication with JWT tokens +Output: +``` +feat(auth): implement JWT-based authentication + +Add login endpoint and token validation middleware +``` + +**Example 2:** +Input: Fixed bug where dates displayed incorrectly in reports +Output: +``` +fix(reports): correct date formatting in timezone conversion + +Use UTC timestamps consistently across report generation +``` + +Follow this style: type(scope): brief description, then detailed explanation. +``` + +Examples help Claude understand the desired style and level of detail more clearly than descriptions alone. diff --git a/backend/skills_storage/builtin_skills/skill-creator/references/workflows.md b/backend/skills_storage/builtin_skills/skill-creator/references/workflows.md new file mode 100644 index 0000000..a350c3c --- /dev/null +++ b/backend/skills_storage/builtin_skills/skill-creator/references/workflows.md @@ -0,0 +1,28 @@ +# Workflow Patterns + +## Sequential Workflows + +For complex tasks, break operations into clear, sequential steps. It is often helpful to give Claude an overview of the process towards the beginning of SKILL.md: + +```markdown +Filling a PDF form involves these steps: + +1. Analyze the form (run analyze_form.py) +2. Create field mapping (edit fields.json) +3. Validate mapping (run validate_fields.py) +4. Fill the form (run fill_form.py) +5. Verify output (run verify_output.py) +``` + +## Conditional Workflows + +For tasks with branching logic, guide Claude through decision points: + +```markdown +1. Determine the modification type: + **Creating new content?** → Follow "Creation workflow" below + **Editing existing content?** → Follow "Editing workflow" below + +2. Creation workflow: [steps] +3. Editing workflow: [steps] +``` \ No newline at end of file diff --git a/backend/skills_storage/builtin_skills/skill-creator/scripts/init_skill.py b/backend/skills_storage/builtin_skills/skill-creator/scripts/init_skill.py new file mode 100644 index 0000000..329ad4e --- /dev/null +++ b/backend/skills_storage/builtin_skills/skill-creator/scripts/init_skill.py @@ -0,0 +1,303 @@ +#!/usr/bin/env python3 +""" +Skill Initializer - Creates a new skill from template + +Usage: + init_skill.py <skill-name> --path <path> + +Examples: + init_skill.py my-new-skill --path skills/public + init_skill.py my-api-helper --path skills/private + init_skill.py custom-skill --path /custom/location +""" + +import sys +from pathlib import Path + + +SKILL_TEMPLATE = """--- +name: {skill_name} +description: [TODO: Complete and informative explanation of what the skill does and when to use it. Include WHEN to use this skill - specific scenarios, file types, or tasks that trigger it.] +--- + +# {skill_title} + +## Overview + +[TODO: 1-2 sentences explaining what this skill enables] + +## Structuring This Skill + +[TODO: Choose the structure that best fits this skill's purpose. Common patterns: + +**1. Workflow-Based** (best for sequential processes) +- Works well when there are clear step-by-step procedures +- Example: DOCX skill with "Workflow Decision Tree" → "Reading" → "Creating" → "Editing" +- Structure: ## Overview → ## Workflow Decision Tree → ## Step 1 → ## Step 2... + +**2. Task-Based** (best for tool collections) +- Works well when the skill offers different operations/capabilities +- Example: PDF skill with "Quick Start" → "Merge PDFs" → "Split PDFs" → "Extract Text" +- Structure: ## Overview → ## Quick Start → ## Task Category 1 → ## Task Category 2... + +**3. Reference/Guidelines** (best for standards or specifications) +- Works well for brand guidelines, coding standards, or requirements +- Example: Brand styling with "Brand Guidelines" → "Colors" → "Typography" → "Features" +- Structure: ## Overview → ## Guidelines → ## Specifications → ## Usage... + +**4. Capabilities-Based** (best for integrated systems) +- Works well when the skill provides multiple interrelated features +- Example: Product Management with "Core Capabilities" → numbered capability list +- Structure: ## Overview → ## Core Capabilities → ### 1. Feature → ### 2. Feature... + +Patterns can be mixed and matched as needed. Most skills combine patterns (e.g., start with task-based, add workflow for complex operations). + +Delete this entire "Structuring This Skill" section when done - it's just guidance.] + +## [TODO: Replace with the first main section based on chosen structure] + +[TODO: Add content here. See examples in existing skills: +- Code samples for technical skills +- Decision trees for complex workflows +- Concrete examples with realistic user requests +- References to scripts/templates/references as needed] + +## Resources + +This skill includes example resource directories that demonstrate how to organize different types of bundled resources: + +### scripts/ +Executable code (Python/Bash/etc.) that can be run directly to perform specific operations. + +**Examples from other skills:** +- PDF skill: `fill_fillable_fields.py`, `extract_form_field_info.py` - utilities for PDF manipulation +- DOCX skill: `document.py`, `utilities.py` - Python modules for document processing + +**Appropriate for:** Python scripts, shell scripts, or any executable code that performs automation, data processing, or specific operations. + +**Note:** Scripts may be executed without loading into context, but can still be read by Claude for patching or environment adjustments. + +### references/ +Documentation and reference material intended to be loaded into context to inform Claude's process and thinking. + +**Examples from other skills:** +- Product management: `communication.md`, `context_building.md` - detailed workflow guides +- BigQuery: API reference documentation and query examples +- Finance: Schema documentation, company policies + +**Appropriate for:** In-depth documentation, API references, database schemas, comprehensive guides, or any detailed information that Claude should reference while working. + +### assets/ +Files not intended to be loaded into context, but rather used within the output Claude produces. + +**Examples from other skills:** +- Brand styling: PowerPoint template files (.pptx), logo files +- Frontend builder: HTML/React boilerplate project directories +- Typography: Font files (.ttf, .woff2) + +**Appropriate for:** Templates, boilerplate code, document templates, images, icons, fonts, or any files meant to be copied or used in the final output. + +--- + +**Any unneeded directories can be deleted.** Not every skill requires all three types of resources. +""" + +EXAMPLE_SCRIPT = '''#!/usr/bin/env python3 +""" +Example helper script for {skill_name} + +This is a placeholder script that can be executed directly. +Replace with actual implementation or delete if not needed. + +Example real scripts from other skills: +- pdf/scripts/fill_fillable_fields.py - Fills PDF form fields +- pdf/scripts/convert_pdf_to_images.py - Converts PDF pages to images +""" + +def main(): + print("This is an example script for {skill_name}") + # TODO: Add actual script logic here + # This could be data processing, file conversion, API calls, etc. + +if __name__ == "__main__": + main() +''' + +EXAMPLE_REFERENCE = """# Reference Documentation for {skill_title} + +This is a placeholder for detailed reference documentation. +Replace with actual reference content or delete if not needed. + +Example real reference docs from other skills: +- product-management/references/communication.md - Comprehensive guide for status updates +- product-management/references/context_building.md - Deep-dive on gathering context +- bigquery/references/ - API references and query examples + +## When Reference Docs Are Useful + +Reference docs are ideal for: +- Comprehensive API documentation +- Detailed workflow guides +- Complex multi-step processes +- Information too lengthy for main SKILL.md +- Content that's only needed for specific use cases + +## Structure Suggestions + +### API Reference Example +- Overview +- Authentication +- Endpoints with examples +- Error codes +- Rate limits + +### Workflow Guide Example +- Prerequisites +- Step-by-step instructions +- Common patterns +- Troubleshooting +- Best practices +""" + +EXAMPLE_ASSET = """# Example Asset File + +This placeholder represents where asset files would be stored. +Replace with actual asset files (templates, images, fonts, etc.) or delete if not needed. + +Asset files are NOT intended to be loaded into context, but rather used within +the output Claude produces. + +Example asset files from other skills: +- Brand guidelines: logo.png, slides_template.pptx +- Frontend builder: hello-world/ directory with HTML/React boilerplate +- Typography: custom-font.ttf, font-family.woff2 +- Data: sample_data.csv, test_dataset.json + +## Common Asset Types + +- Templates: .pptx, .docx, boilerplate directories +- Images: .png, .jpg, .svg, .gif +- Fonts: .ttf, .otf, .woff, .woff2 +- Boilerplate code: Project directories, starter files +- Icons: .ico, .svg +- Data files: .csv, .json, .xml, .yaml + +Note: This is a text placeholder. Actual assets can be any file type. +""" + + +def title_case_skill_name(skill_name): + """Convert hyphenated skill name to Title Case for display.""" + return ' '.join(word.capitalize() for word in skill_name.split('-')) + + +def init_skill(skill_name, path): + """ + Initialize a new skill directory with template SKILL.md. + + Args: + skill_name: Name of the skill + path: Path where the skill directory should be created + + Returns: + Path to created skill directory, or None if error + """ + # Determine skill directory path + skill_dir = Path(path).resolve() / skill_name + + # Check if directory already exists + if skill_dir.exists(): + print(f"❌ Error: Skill directory already exists: {skill_dir}") + return None + + # Create skill directory + try: + skill_dir.mkdir(parents=True, exist_ok=False) + print(f"✅ Created skill directory: {skill_dir}") + except Exception as e: + print(f"❌ Error creating directory: {e}") + return None + + # Create SKILL.md from template + skill_title = title_case_skill_name(skill_name) + skill_content = SKILL_TEMPLATE.format( + skill_name=skill_name, + skill_title=skill_title + ) + + skill_md_path = skill_dir / 'SKILL.md' + try: + skill_md_path.write_text(skill_content) + print("✅ Created SKILL.md") + except Exception as e: + print(f"❌ Error creating SKILL.md: {e}") + return None + + # Create resource directories with example files + try: + # Create scripts/ directory with example script + scripts_dir = skill_dir / 'scripts' + scripts_dir.mkdir(exist_ok=True) + example_script = scripts_dir / 'example.py' + example_script.write_text(EXAMPLE_SCRIPT.format(skill_name=skill_name)) + example_script.chmod(0o755) + print("✅ Created scripts/example.py") + + # Create references/ directory with example reference doc + references_dir = skill_dir / 'references' + references_dir.mkdir(exist_ok=True) + example_reference = references_dir / 'api_reference.md' + example_reference.write_text(EXAMPLE_REFERENCE.format(skill_title=skill_title)) + print("✅ Created references/api_reference.md") + + # Create assets/ directory with example asset placeholder + assets_dir = skill_dir / 'assets' + assets_dir.mkdir(exist_ok=True) + example_asset = assets_dir / 'example_asset.txt' + example_asset.write_text(EXAMPLE_ASSET) + print("✅ Created assets/example_asset.txt") + except Exception as e: + print(f"❌ Error creating resource directories: {e}") + return None + + # Print next steps + print(f"\n✅ Skill '{skill_name}' initialized successfully at {skill_dir}") + print("\nNext steps:") + print("1. Edit SKILL.md to complete the TODO items and update the description") + print("2. Customize or delete the example files in scripts/, references/, and assets/") + print("3. Run the validator when ready to check the skill structure") + + return skill_dir + + +def main(): + if len(sys.argv) < 4 or sys.argv[2] != '--path': + print("Usage: init_skill.py <skill-name> --path <path>") + print("\nSkill name requirements:") + print(" - Hyphen-case identifier (e.g., 'data-analyzer')") + print(" - Lowercase letters, digits, and hyphens only") + print(" - Max 40 characters") + print(" - Must match directory name exactly") + print("\nExamples:") + print(" init_skill.py my-new-skill --path skills/public") + print(" init_skill.py my-api-helper --path skills/private") + print(" init_skill.py custom-skill --path /custom/location") + sys.exit(1) + + skill_name = sys.argv[1] + path = sys.argv[3] + + print(f"🚀 Initializing skill: {skill_name}") + print(f" Location: {path}") + print() + + result = init_skill(skill_name, path) + + if result: + sys.exit(0) + else: + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/backend/skills_storage/builtin_skills/skill-creator/scripts/package_skill.py b/backend/skills_storage/builtin_skills/skill-creator/scripts/package_skill.py new file mode 100644 index 0000000..5cd36cb --- /dev/null +++ b/backend/skills_storage/builtin_skills/skill-creator/scripts/package_skill.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python3 +""" +Skill Packager - Creates a distributable .skill file of a skill folder + +Usage: + python utils/package_skill.py <path/to/skill-folder> [output-directory] + +Example: + python utils/package_skill.py skills/public/my-skill + python utils/package_skill.py skills/public/my-skill ./dist +""" + +import sys +import zipfile +from pathlib import Path +from quick_validate import validate_skill + + +def package_skill(skill_path, output_dir=None): + """ + Package a skill folder into a .skill file. + + Args: + skill_path: Path to the skill folder + output_dir: Optional output directory for the .skill file (defaults to current directory) + + Returns: + Path to the created .skill file, or None if error + """ + skill_path = Path(skill_path).resolve() + + # Validate skill folder exists + if not skill_path.exists(): + print(f"❌ Error: Skill folder not found: {skill_path}") + return None + + if not skill_path.is_dir(): + print(f"❌ Error: Path is not a directory: {skill_path}") + return None + + # Validate SKILL.md exists + skill_md = skill_path / "SKILL.md" + if not skill_md.exists(): + print(f"❌ Error: SKILL.md not found in {skill_path}") + return None + + # Run validation before packaging + print("🔍 Validating skill...") + valid, message = validate_skill(skill_path) + if not valid: + print(f"❌ Validation failed: {message}") + print(" Please fix the validation errors before packaging.") + return None + print(f"✅ {message}\n") + + # Determine output location + skill_name = skill_path.name + if output_dir: + output_path = Path(output_dir).resolve() + output_path.mkdir(parents=True, exist_ok=True) + else: + output_path = Path.cwd() + + skill_filename = output_path / f"{skill_name}.skill" + + # Create the .skill file (zip format) + try: + with zipfile.ZipFile(skill_filename, 'w', zipfile.ZIP_DEFLATED) as zipf: + # Walk through the skill directory + for file_path in skill_path.rglob('*'): + if file_path.is_file(): + # Calculate the relative path within the zip + arcname = file_path.relative_to(skill_path.parent) + zipf.write(file_path, arcname) + print(f" Added: {arcname}") + + print(f"\n✅ Successfully packaged skill to: {skill_filename}") + return skill_filename + + except Exception as e: + print(f"❌ Error creating .skill file: {e}") + return None + + +def main(): + if len(sys.argv) < 2: + print("Usage: python utils/package_skill.py <path/to/skill-folder> [output-directory]") + print("\nExample:") + print(" python utils/package_skill.py skills/public/my-skill") + print(" python utils/package_skill.py skills/public/my-skill ./dist") + sys.exit(1) + + skill_path = sys.argv[1] + output_dir = sys.argv[2] if len(sys.argv) > 2 else None + + print(f"📦 Packaging skill: {skill_path}") + if output_dir: + print(f" Output directory: {output_dir}") + print() + + result = package_skill(skill_path, output_dir) + + if result: + sys.exit(0) + else: + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/backend/skills_storage/builtin_skills/skill-creator/scripts/quick_validate.py b/backend/skills_storage/builtin_skills/skill-creator/scripts/quick_validate.py new file mode 100644 index 0000000..d9fbeb7 --- /dev/null +++ b/backend/skills_storage/builtin_skills/skill-creator/scripts/quick_validate.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python3 +""" +Quick validation script for skills - minimal version +""" + +import sys +import os +import re +import yaml +from pathlib import Path + +def validate_skill(skill_path): + """Basic validation of a skill""" + skill_path = Path(skill_path) + + # Check SKILL.md exists + skill_md = skill_path / 'SKILL.md' + if not skill_md.exists(): + return False, "SKILL.md not found" + + # Read and validate frontmatter + content = skill_md.read_text() + if not content.startswith('---'): + return False, "No YAML frontmatter found" + + # Extract frontmatter + match = re.match(r'^---\n(.*?)\n---', content, re.DOTALL) + if not match: + return False, "Invalid frontmatter format" + + frontmatter_text = match.group(1) + + # Parse YAML frontmatter + try: + frontmatter = yaml.safe_load(frontmatter_text) + if not isinstance(frontmatter, dict): + return False, "Frontmatter must be a YAML dictionary" + except yaml.YAMLError as e: + return False, f"Invalid YAML in frontmatter: {e}" + + # Define allowed properties + ALLOWED_PROPERTIES = {'name', 'description', 'license', 'allowed-tools', 'metadata'} + + # Check for unexpected properties (excluding nested keys under metadata) + unexpected_keys = set(frontmatter.keys()) - ALLOWED_PROPERTIES + if unexpected_keys: + return False, ( + f"Unexpected key(s) in SKILL.md frontmatter: {', '.join(sorted(unexpected_keys))}. " + f"Allowed properties are: {', '.join(sorted(ALLOWED_PROPERTIES))}" + ) + + # Check required fields + if 'name' not in frontmatter: + return False, "Missing 'name' in frontmatter" + if 'description' not in frontmatter: + return False, "Missing 'description' in frontmatter" + + # Extract name for validation + name = frontmatter.get('name', '') + if not isinstance(name, str): + return False, f"Name must be a string, got {type(name).__name__}" + name = name.strip() + if name: + # Check naming convention (hyphen-case: lowercase with hyphens) + if not re.match(r'^[a-z0-9-]+$', name): + return False, f"Name '{name}' should be hyphen-case (lowercase letters, digits, and hyphens only)" + if name.startswith('-') or name.endswith('-') or '--' in name: + return False, f"Name '{name}' cannot start/end with hyphen or contain consecutive hyphens" + # Check name length (max 64 characters per spec) + if len(name) > 64: + return False, f"Name is too long ({len(name)} characters). Maximum is 64 characters." + + # Extract and validate description + description = frontmatter.get('description', '') + if not isinstance(description, str): + return False, f"Description must be a string, got {type(description).__name__}" + description = description.strip() + if description: + # Check for angle brackets + if '<' in description or '>' in description: + return False, "Description cannot contain angle brackets (< or >)" + # Check description length (max 1024 characters per spec) + if len(description) > 1024: + return False, f"Description is too long ({len(description)} characters). Maximum is 1024 characters." + + return True, "Skill is valid!" + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("Usage: python quick_validate.py <skill_directory>") + sys.exit(1) + + valid, message = validate_skill(sys.argv[1]) + print(message) + sys.exit(0 if valid else 1) \ No newline at end of file diff --git a/backend/skills_storage/user_skills/consistency_checker/SKILL.md b/backend/skills_storage/user_skills/consistency_checker/SKILL.md new file mode 100644 index 0000000..ffc03d9 --- /dev/null +++ b/backend/skills_storage/user_skills/consistency_checker/SKILL.md @@ -0,0 +1,105 @@ +--- +name: consistency-checker +description: Consistency checker for series content. Use this skill when you need to verify that new episode content maintains consistency with global settings, character profiles, and historical plotlines across multiple episodes of a series. +--- + +# Consistency Checker + +## Overview + +This skill checks new episode content against global settings and historical context to ensure multi-episode consistency in series creation. + +## Core Principles + +### What This Skill Does + +The Consistency Checker reviews content for: + +- **Character consistency** - Personality, speech style, status remain true to established profiles +- **Scene consistency** - Environment descriptions match established settings +- **Plot logic** - Story developments follow logical cause-and-effect relationships +- **Foreshadowing tracking** - New plot threads are recorded and existing ones are resolved + +## Check Dimensions + +### 1. Character Setting Consistency + +Check if character personality, speech style, and status match their established profiles: + +- Does personality match the character biography description? +- Is speech style consistent (e.g., an outspoken character shouldn't suddenly become tactful)? +- Are there contradictions in status (e.g., a commoner suddenly using royal language)? + +### 2. Scene Setting Consistency + +Check if scene descriptions match established settings: + +- Do environment descriptions match the setting? +- Are scene transitions reasonable and natural? +- Are descriptions of the same scene across different episodes consistent? + +### 3. Plot Logic Consistency + +Check if plot developments are reasonable: + +- Any contradictions with previous plotlines? +- Is the timeline reasonable? +- Do cause-and-effect relationships hold? + +### 4. Foreshadowing Management + +Check foreshadowing setup and follow-through: + +- Are new foreshadowing elements recorded? +- Are existing foreshadowing threads followed up? +- Any contradictions in foreshadowing? + +## Output Format + +Return results in the following JSON format: + +```json +{ + "passed": true, + "score": 85, + "issues": [ + { + "type": "character_inconsistency", + "dimension": "Character Consistency", + "severity": "high", + "location": { + "episode": 3, + "scene": 5, + "line": 12 + }, + "description": "Li Yunfei's speech became overly literary, inconsistent with his character", + "original_text": "Li Yunfei: This... this seems rather inappropriate", + "suggestion": "Suggested change: Li Yunfei: This isn't right!", + "auto_fix_available": true + } + ], + "new_foreshadowing": [ + { + "episode": 3, + "scene": 2, + "description": "Wan'er mentions 'a pouch left by mother'", + "importance": "high" + } + ], + "resolved_threads": [ + { + "thread": "Emperor's wariness of the Chancellor", + "resolved_in": "EP03", + "resolution": "Chancellor once held the late Emperor's edict" + } + ] +} +``` + +## Available Tools + +This skill can reference the following tools and context: + +- `readCharacterProfile(characterId)` - Read character biography +- `readSceneSetting(sceneId)` - Read scene settings +- `readMemory(episodeRange)` - Read historical memory diff --git a/backend/skills_storage/user_skills/dialogue_writer_ancient/SKILL.md b/backend/skills_storage/user_skills/dialogue_writer_ancient/SKILL.md new file mode 100644 index 0000000..aadbbd7 --- /dev/null +++ b/backend/skills_storage/user_skills/dialogue_writer_ancient/SKILL.md @@ -0,0 +1,90 @@ +--- +name: dialogue-writer-ancient +description: Expert ancient-style dialogue writer for period dramas. Use this skill when creating character dialogue that must match historical settings, with characters speaking according to their status and personality in archaic Chinese style. +--- + +# Ancient Dialogue Writer + +## Overview + +This skill writes character dialogue for period dramas, ensuring dialogue style matches the historical background and characters speak according to their established status and personality. + +## Core Principles + +### Dialogue Style Requirements + +When writing dialogue, you must follow these rules: + +#### 1. Language Characteristics + +- Use semi-classical expressions like "何故" (why), "罢了" (that's all), "莫要" (do not) +- Avoid modern vocabulary like "因为所以" (because/therefore), "虽然但是" (although/but), "OK", "拜拜" (bye) +- Use rhetorical questions like "岂" (how could), "怎" (how), "何" (what) +- Use affirmations like "便是" (it is), "自然" (naturally), "确实" (indeed) + +#### 2. Speech Style Hierarchy + +**Royalty:** +- Majestic, terse,多用反问 +- Refined diction, without excessive ornamentation + +**Generals:** +- Direct, brief, forceful +- Mostly imperative sentences, few questions + +**Civil Officials:** +- Euphemistic,多用典故 +- Skilled with metaphors, elegant language + +**Maids/Servants:** +- Humble,多用敬语 +- Cautious speech, concise and meaningful + +**Commoners:** +- Simple, straightforward +- Colloquial expressions + +#### 3. Emotional Expression Rules + +**When Angry:** +- Don't say "I'm angry" directly +- Express through slamming tables, pacing, hand on sword hilt +- Keep dialogue brief and forceful, use rhetorical questions + +**When Sad:** +- Use "..." pauses, then speak softly +- Avoid loud wailing, use restrained expression +- Use scenery to accentuate sadness + +**When Happy:** +- Use smiles, nods instead of loud laughter +- Relaxed tone, gentle wording + +#### 4. Dialogue Rhythm Control + +- Important dialogue: no more than 15 characters per line +- Daily conversation: no more than 25 characters per line +- Controversial scenes: use short sentences for rapid exchange +- Emotional scenes:适当 slow the pace + +## Output Format + +``` +【Character Name】Dialogue content +(Action/expression description) +``` + +Example: +``` +【Li Yunfei】This matter, how can I tolerate it! +(Hand on sword hilt, glaring angrily) + +【Su Wan'er】General, please calm down... +(Lowers eyes, speaks softly, voice trembling slightly) +``` + +## Available Tools + +- `readCharacterProfile(characterId)` - Read character biography +- `readSceneSetting(sceneId)` - Read scene settings +- `readStyleGuide()` - Read style guide diff --git a/backend/start.bat b/backend/start.bat new file mode 100644 index 0000000..8e60d4a --- /dev/null +++ b/backend/start.bat @@ -0,0 +1,70 @@ +@echo off +REM Creative Studio Backend - Windows 启动脚本 + +echo ============================================ +echo Creative Studio Backend - 启动脚本 +echo ============================================ +echo. + +REM 检查虚拟环境 +if not exist ".venv" ( + echo [1/4] 创建虚拟环境... + python -m venv .venv + if errorlevel 1 ( + echo 错误: 创建虚拟环境失败 + pause + exit /b 1 + ) +) else ( + echo [1/4] 虚拟环境已存在 +) + +REM 激活虚拟环境 +echo [2/4] 激活虚拟环境... +call .venv\Scripts\activate.bat +if errorlevel 1 ( + echo 错误: 激活虚拟环境失败 + pause + exit /b 1 +) + +REM 检查并安装依赖 +echo [3/4] 检查依赖... +pip show fastapi >nul 2>&1 +if errorlevel 1 ( + echo 正在安装依赖... + pip install -e . + if errorlevel 1 ( + echo 错误: 安装依赖失败 + pause + exit /b 1 + ) +) else ( + echo 依赖已安装 +) + +REM 检查环境变量文件 +if not exist ".env" ( + echo. + echo 警告: .env 文件不存在 + echo 请先复制 .env.example 为 .env 并配置 ZHIPUAI_API_KEY + echo. + copy .env.example .env + echo 已创建 .env 文件,请编辑并填入必要的配置 + echo. + notepad .env + pause +) + +REM 启动服务 +echo [4/4] 启动后端服务... +echo. +echo ============================================ +echo 服务地址: http://localhost:8000 +echo API 文档: http://localhost:8000/docs +echo ============================================ +echo. + +uvicorn app.main:app --reload --host 0.0.0.0 --port 8000 + +pause diff --git a/backend/stop.bat b/backend/stop.bat new file mode 100644 index 0000000..68af8bd --- /dev/null +++ b/backend/stop.bat @@ -0,0 +1,12 @@ +@echo off +REM Creative Studio Backend - 停止脚本 + +echo 正在停止 Creative Studio Backend... + +REM 查找并终止 uvicorn 进程 +for /f "tokens=2" %%a in ('tasklist ^| findstr uvicorn') do ( + taskkill /F /PID %%a 2>nul +) + +echo Backend 已停止 +pause diff --git a/backend/test_integration.py b/backend/test_integration.py new file mode 100644 index 0000000..529e3d8 --- /dev/null +++ b/backend/test_integration.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +\"\"\"\"\" + +""" +集成测试脚本 - 测试核心模块的集成 +\"\"\" + +import asyncio +import sys +import json +from pathlib import Path + +# 添加项目根目录到 Python 路径 +project_root = Path(__file__).parent + +async def main(): + """主测试入口""" + print("=" * 50) + print("集成测试") + print() + + # 测试 1: 验证 Zai-sdk 导入 + print("[1/6] 测试 zai-sdk 导入...") + try: + from zai import ZhipuAiClient + print(" ✓ zai-sdk 导入成功") + except ImportError as e: + print(f" ✗ zai-sdk 导入失败: {str(e)}") + return False + + # 测试 2: 验证 Skill Manager + print() + print("[2/6] 测试 Skill Manager...") + try: + from app.core.skills.skill_manager import get_skill_manager + sm = get_skill_manager() + + # 测试加载 skill-creator + skill = await sm.load_skill("skill-creator") + if skill: + print(f" ✓ skill-creator 加载成功") + print(f" 名称: {skill.name}") + else: + print(" ✗ skill-creator 未找到") + + # 测试获取工具列表 + tools = sm.get_available_tools_for_llm() + print(f" 工具数量: {len(tools)}") + + except Exception as e: + print(f" ✗ Skill Manager 测试失败: {str(e)}") + return False + + # 测试 3: 验证记忆系统 + print() + print("[3/6] 测试 Memory Manager...") + try: + from app.core.memory.memory_manager import get_memory_manager + mm = get_memory_manager() + + # 测试事件提取 + events = await mm.extract_events_from_episode( + episode_content=\"测试 EP01 内容:李云飞与苏婉儿在江南相遇,两人决定私定终身...", + episode_number=1, + characters=["李云飞", "苏婉儿"] + ) + print(f" ✓ 提取 {len(events)} 个事件") + + # 测试伏笔检测 + foreshadowing, threads = await mm.detect_foreshadowing( + episode_content=\"测试 EP02 内容...", + episode_number=2, + existing_threads=[] + ) + print(f" ✓ 检测到 {len(foreshadowing)} 个伏笔,新增 {len(threads)} 个待收线") + + except Exception as e: + print(f" ✗ Memory Manager 测试失败: {str(e)}") + return False + + # 测试 4: 验证审核系统 + print() + print("[4/6] 测试 Review Manager...") + try: + from app.core.review.review_manager import get_review_manager + rm = get_review_manager() + + # 测试维度列表 + dimensions = rm.list_all_dimensions() + print(f" ✓ 审核维度: {len(dimensions)}") + + except Exception as e: + print(f" ✗ Review Manager 测试失败: {str(e)}") + return False + + # 测试 5: 验证批次执行 + print() + print("[5/6] 测试 Batch Executor...") + try: + from app.core.execution.batch_executor import get_batch_executor + be = get_batch_executor() + + print("批次执行测试...") + # 创建测试项目 + from app.models.project import SeriesProject + project = SeriesProject( + id="test-project-001", + name="测试项目", + type="series", + globalContext={"worldSetting": "测试世界观"}, + characterProfiles={}, + sceneSettings={}, + overallOutline="测试大纲", + styleGuide="测试风格", + skills={"dialogue_writer_ancient": "dialogue-writer-ancient"} + ) + + # 模拟批次执行 + result = await be.execute_batch( + project=project, + start_episode=1, + end_episode=3, + enable_retry=False, + max_retries=1, + on_progress=lambda d: print(f" 进度: {d['progress_percentage']}"), + on_episode_complete=lambda d: print(f" 集 {d['current_episode']} 完成") + ) + + print(f" ✓ 批次执行完成") + print(f" 耗分:") + print(f" 成功: {result['successful']}") + print(f" 失败: {result['failed']}") + print(f" 耗率: {result['success_rate']}") + print(f" 质量: {result.get('quality_stats', {}).get('average', 0):.1f}") + + except Exception as e: + print(f" ✗ Batch Executor 测试失败: {str(e)}") + return False + + print() + print("=" * 50) + print("所有测试完成!") + print() + print("说明:") + print("如果测试全部通过,说明核心模块可以正常协作。") + print("如果某些测试失败,根据错误信息修复对应模块。") + + return True + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/creative-studio-platform.md b/creative-studio-platform.md new file mode 100644 index 0000000..1c477cd --- /dev/null +++ b/creative-studio-platform.md @@ -0,0 +1,2564 @@ +# 灵感工坊 (Creative Studio) - AI 创作平台 +## 基于 Agent + Skill 架构的剧集创作平台 + +--- + +## 一、产品概述 + +### 1.1 产品定位 + +**灵感工坊** 是一个基于 **"Agent 固定框架 + Skill 可配置单元"** 架构的剧集创作平台。 + +#### 核心理念 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ "Agent是骨架,Skill是血肉" │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ Agent (固定框架) Skills (可配置单元) │ +│ ┌─────────────────┐ ┌─────────────────┐ │ +│ │ 工作流编排 │ 使用 │ 专业能力 │ │ +│ │ 决策逻辑 │ ──────▶ │ 方法论 │ │ +│ │ 调度规则 │ │ 提示词模板 │ │ +│ │ 质量控制 │ │ 参数配置 │ │ +│ │ │ │ │ │ +│ │ 平台维护 │ │ 用户维护 │ │ +│ │ 很少修改 │ │ 经常微调 │ │ +│ └─────────────────┘ └─────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +#### 平台特性 + +- **Skill 指导行为**:用户编写的 Skill 直接融入提示词,指导 Agent 的创作行为(类似 Claude Code Skills) +- **全自动创作**:Agent 自主完成全部创作流程,用户只需最后审核 +- **多集一致性保障**:全局上下文 + 记忆系统 + 一致性审核,确保几十集内容不矛盾 +- **点击交互**:非问答式,通过可视化界面点击配置和执行 + +### 1.2 核心价值 + +| 传统方案 | 灵感工坊 | +|---------|---------| +| 问答式交互,需要反复沟通 | 点击式配置,一次性设置完成 | +| 每集独立创作,容易前后矛盾 | 全局上下文 + 记忆系统确保一致性 | +| 需要人工全程监督 | 全自动模式,仅需最后审核 | +| 创作风格难以量化 | Skill 可配置,风格参数化 | +| Agent 需要用户编排维护 | Agent 固定模板,Skill 适配项目 | + +### 1.3 核心概念关系 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 层级关系 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ ┌─────────────────┐ 引用 ┌─────────────────┐ │ +│ │ Series Project │ ────────▶ │ Agent │ │ +│ │ (剧集项目) │ │ (工作流模板) │ │ +│ │ │ │ │ │ +│ │ - 全局上下文 │ │ - 调用规则 │ │ +│ │ - 记忆系统 │ │ - 决策逻辑 │ │ +│ │ - 多个Episodes │ │ - 流程编排 │ │ +│ │ │ │ │ │ +│ │ ┌─────────────┐│ │ 引用 │ ┌─────────┐ │ +│ │ │ Episode 01 ││ │ └────▶│ Skill │ │ +│ │ │ Episode 02 ││ │ │(原子能力)│ │ +│ │ │ Episode ... ││ │ │ │ │ +│ │ └─────────────┘│ │ │- 方法论 │ │ +│ └─────────────────┘ └─────────────────┤- 提示词 │ │ +│ │- 工具 │ │ +│ └─────────┘ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +| 概念 | 定位 | 可复用性 | 维护者 | +|-----|------|---------|-------| +| **Skill** | 原子能力,行为指导 | ✅ 高度可复用 | 用户 | +| **Agent** | 工作流模板 | ✅ 可复用 | 平台 | +| **Series Project** | 剧集容器 | ❌ 不可复用 | 用户 | +| **Episode** | 具体集数实例 | ❌ 不可复用 | Agent 生成 | + +--- + +## 二、三大管理中心 + +### 2.1 Skill 管理中心 + +#### 2.1.1 Skill 的本质 + +**Skill 是用户级别的行为指导者**,类似 Claude Code Skills: + +- Skill 的内容会直接融入 Agent 的提示词 +- LLM 根据 Skill 的指导进行创作 +- 用户通过修改 Skill 来改变 Agent 的行为 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ Skill 如何指导 Agent 行为 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 用户需求: "创作 EP05 场景3: 李云飞与皇帝对峙" │ +│ ↓ │ +│ Agent 分析: 需要调用 dialogue-writer Skill │ +│ ↓ │ +│ Agent 加载 Skill 内容 → 融入提示词 │ +│ ↓ │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 发送给 LLM 的提示词包含: │ │ +│ │ ───────────────────────────────────────────────────── │ │ +│ │ 你是剧集创作专家。 │ │ +│ │ │ │ +│ │ 以下是你的行为指导 (来自 Skill: 古风对话大师): │ │ +│ │ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ │ │ +│ │ 1. 使用半文半白的表达,如"何故"、"罢了"、"莫要" │ │ +│ │ 2. 皇室说话威严少语,武将直率简短,文官婉转 │ │ +│ │ 3. 愤怒时用动作表达,不要直接说"我很生气" │ │ +│ │ 4. 重要对话一句不超过15字 │ │ +│ │ 5. 使用(停顿)、(冷笑)、(长叹)标记 │ │ +│ │ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ ↓ │ +│ LLM 根据 Skill 指导输出对话 │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +#### 2.1.2 Skill 结构定义 + +```markdown +# Skill: 古风对话大师 + +## 基础信息 +- **ID**: dialogue-writer-ancient +- **版本**: 2.1.0 +- **作者**: 用户名 +- **分类**: 编剧 > 对话风格 + +## 触发条件 +当 Agent 需要创作人物对话时,使用此 Skill + +## 行为指导 (核心 - 直接影响 Agent 行为) + +### 对话风格要求 +你创作对话时必须遵循以下规则: + +1. **语言特征** + - 使用半文半白的表达,如"何故"、"罢了"、"莫要" + - 避免现代词汇,如"因为所以"、"虽然但是" + - 疑问句多用"岂"、"怎"、"何" + +2. **说话风格分层** + - 皇室: 威严、少语、多用反问 + - 武将: 直率、简短、有力 + - 文官: 婉转、多用典故 + - 宫女: 谦卑、多用敬语 + +3. **情感表达规则** + - 愤怒时: 不要直接说"我很生气",通过拍桌、踱步表达 + - 悲伤时: 用"..."停顿,然后低声说 + - 喜悦时: 用微笑、点头代替大笑 + +4. **对话节奏控制** + - 重要对话: 一句不超过 15 字 + - 日常对话: 一句不超过 25 字 + - 争议场景: 多用短句来回交锋 + +## 输出格式 +``` +【人物名】对话内容 +(动作/神态描写) +``` + +## 可用工具 +- `readCharacterProfile(characterId)` - 读取人物小传 +- `readSceneSetting(sceneId)` - 读取场景设定 +``` + +#### 2.1.3 Skill 配置层级 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ Skill 配置层级 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 层级1: 快速预设 (适合新手) │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 预设风格: │ │ +│ │ • 古装正剧 → 自动配置所有 Skills │ │ +│ │ • 现代甜宠 → 自动配置所有 Skills │ │ +│ │ • 悬疑推理 → 自动配置所有 Skills │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ ↓ │ +│ 层级2: 参数微调 (适合进阶) │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 对话风格: [古风] + [幽默元素 20%] │ │ +│ │ 叙事节奏: [快] + [重点场景放慢] │ │ +│ │ 情感浓度: [适中] │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ ↓ │ +│ 层级3: 完全自定义 (适合高级用户) │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 直接编辑 Skill 的行为指导文档 │ │ +│ │ 自定义新的 Skill │ │ +│ │ 组合多个 Skills │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +#### 2.1.4 Skill 管理功能 + +| 功能 | 说明 | +|-----|-----| +| **快速预设** | 一键应用风格预设(古装/现代/悬疑等) | +| **参数调节** | 滑块、选择框调整 Skill 参数 | +| **完整编辑** | 编辑 Skill 的行为指导文档 | +| **版本控制** | Skill 版本管理,支持回滚 | +| **测试调试** | 独立测试 Skill,查看效果 | +| **Fork 修改** | 基于现有 Skill 创建副本修改 | +| **发布分享** | 发布到 Skill 市场 | + +#### 2.1.5 内置 Skills vs 用户 Skills + +| 类型 | 说明 | 维护者 | 可编辑 | 支持 Scripts | +|-----|------|-------|-------|-------------| +| **内置 Skills** | skill-creator、agent-creator 等平台功能 | 平台 | ❌ | ✅ | +| **用户 Skills** | 对话风格、结构分析、一致性审核等创作能力 | 用户 | ✅ | ❌ | + +#### 2.1.6 内置 Skills 说明 + +平台提供以下内置 Skills(支持 Scripts 执行): + +| Skill ID | 功能 | Scripts 用途 | +|----------|------|-------------| +| **skill-creator** | 帮助用户创建新 Skill | 验证 Skill 结构、生成模板 | +| **agent-creator** | 帮助平台创建新 Agent | 验证 Agent 配置、生成工作流 | +| **project-manager** | 项目管理操作 | 项目初始化、文件组织 | + +这些内置 Skills 来自 Claude Code 官方规范,包含可执行的 Scripts 用于特定功能。平台提供 Scripts 执行引擎,但用户创建 Skills 时无需编写 Scripts,只需提供行为指导即可。 + +### 2.2 Agent 管理中心 + +#### 2.2.1 Agent 的定位 + +**Agent 是代码驱动的固定工作流框架**,由平台维护,用户不可编辑: + +**设计理念**: +- Agent 的工作流是硬编码在代码中的,不是存储在数据库 +- 用户通过配置不同的 Skills 来改变 Agent 的行为 +- 平台通过发布新版本的方式更新 Agent + +**为什么不支持可视化编排**: +- 内容创作有行业标准流程(大纲 → 对话 → 审核) +- 固定流程更稳定、易维护 +- 降低用户学习成本(不需要理解工作流概念) +- 80% 的用户使用标准流程即可满足需求 + +```markdown +# Agent: 剧集创作 Agent (平台维护,用户不可编辑) + +## 角色 +你是剧集创作的执行者,负责协调各个 Skill 完成创作流程。 + +## 工作流 (固定不变) +``` +┌─────────────────────────────────────────────────────────────┐ +│ 初始化 → 加载全局上下文和历史记忆 │ +│ ↓ │ +│ FOR EACH episode: │ +│ 1. [结构分析] → script-structure-analyzer │ +│ 2. [大纲生成] → outline-generator │ +│ 3. [对话创作] → dialogue-writer │ +│ 4. [一致性审核] → consistency-checker │ +│ 5. 质量检查 → 自动重试或标记需审核 │ +│ 6. [更新记忆] → 提取关键事件、伏笔、人物状态 │ +│ ↓ │ +│ 完成所有集数 → 生成质量报告 │ +└─────────────────────────────────────────────────────────────┘ +``` + +## 决策规则 (固定) +- 如果质量分数 < 阈值 → 重新生成当前阶段 +- 如果连续3次不通过 → 标记需人工审核 +- 如果检测到伏笔 → 更新伏笔追踪系统 + +## 可调用的 Skills (用户可配置替换) +- `script-structure-analyzer` (用户可选择或自定义) +- `outline-generator` (用户可选择或自定义) +- `dialogue-writer` (用户可选择或自定义) +- `consistency-checker` (用户可选择或自定义) + +## Agent 本身不包含具体的创作方法论 +所有创作风格、输出格式、质量标准都由 Skills 定义 +``` + +#### 2.2.2 Agent 类型 + +| Agent 类型 | 用途 | 包含的 Skills (可配置) | +|-----------|------|---------------------| +| **剧集创作 Agent** | 短剧/连续剧创作 | 结构分析、大纲生成、对话写作、一致性审核 | +| **分镜设计 Agent** | 视频分镜创作 | 节拍拆解、镜头设计、九宫格生成 | +| **短篇小说 Agent** | 短篇故事创作 | 大纲生成、章节写作、人物设定 | +| **营销文案 Agent** | 广告文案创作 | 需求分析、创意生成、文案优化 | + +#### 2.2.3 Agent 管理功能 + +| 功能 | 说明 | +|-----|-----| +| **选择 Agent** | 根据项目类型选择对应的 Agent 模板 | +| **配置 Skills** | 为 Agent 配置不同的 Skills | +| **查看工作流** | 可视化展示 Agent 的执行流程 | +| **测试运行** | 测试 Agent 配置的效果 | + +### 2.3 项目管理中心 + +#### 2.3.1 剧集项目结构 + +``` +┌─────────────────────────────────────────────────────────────────────┐ +│ Series Project (剧集项目) │ +├─────────────────────────────────────────────────────────────────────┤ +│ │ +│ ┌───────────────────────────────────────────────────────────────┐ │ +│ │ Global Context (全局上下文) - 项目级共享 │ │ +│ │ ┌─────────────────────────────────────────────────────────┐ │ │ +│ │ │ 世界设定 (世界观、背景、规则) │ │ │ +│ │ │ 人物小传 (所有角色的设定、关系、状态) │ │ │ +│ │ │ 场景设定 (常驻场景的描述) │ │ │ +│ │ │ 剧情大纲 (整体故事线、关键转折点) │ │ │ +│ │ │ 风格设定 (对话风格、叙事风格) │ │ │ +│ │ └─────────────────────────────────────────────────────────┘ │ │ +│ └───────────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌───────────────────────────────────────────────────────────────┐ │ +│ │ Memory System (记忆系统) - 自动维护 │ │ +│ │ ┌─────────────────────────────────────────────────────────┐ │ │ +│ │ │ 事件时间线 (已发生的关键事件) │ │ │ +│ │ │ 人物状态变化 (各人物状态的历史变化) │ │ │ +│ │ │ 伏笔追踪 (新增伏笔、待收线问题) │ │ │ +│ │ └─────────────────────────────────────────────────────────┘ │ │ +│ └───────────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌───────────────────────────────────────────────────────────────┐ │ +│ │ Episodes (子项目) │ │ +│ │ ┌───────────┐ ┌───────────┐ ┌───────────┐ ┌───────────┐ │ │ +│ │ │ Episode 1 │ │ Episode 2 │ │ Episode 3 │ │ Episode ...│ │ │ +│ │ │ │ │ │ │ │ │ │ │ │ +│ │ │ 状态: 完成 │ │ 状态: 完成 │ │ 状态: 进行 │ │ 状态: 待开始│ │ +│ │ │ │ │ │ │ │ │ │ │ │ +│ │ │ [查看] │ │ [查看] │ │ [继续] │ │ [开始] │ │ │ +│ │ └───────────┘ └───────────┘ └───────────┘ └───────────┘ │ │ +│ └───────────────────────────────────────────────────────────────┘ │ +│ │ +│ 使用的 Agent: 剧集创作 Agent │ +│ 运行模式: 全自动 / 半自动 │ +│ │ +└─────────────────────────────────────────────────────────────────────┘ +``` + +#### 2.3.2 创建项目流程 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 新建剧集项目 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 步骤 1/3: 基本信息 │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 项目名称: [宫廷风云] │ │ +│ │ 总集数: [30] 集 │ │ +│ │ 项目类型: [短剧 ▼] │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ 步骤 2/3: 选择 Agent │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 推荐 Agent: 剧集创作 Agent │ │ +│ │ │ │ +│ │ 默认配置的 Skills: │ │ +│ │ ✅ 剧本结构分析器 │ │ +│ │ ✅ 大纲生成器 │ │ +│ │ ✅ 对白写作器 (古风风格) │ │ +│ │ ✅ 一致性审核器 │ │ +│ │ │ │ +│ │ [使用默认配置] [自定义 Skills] [选择其他 Agent] │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ 步骤 3/3: 全局设定 │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 📖 世界观设定: │ │ +│ │ ┌─────────────────────────────────────────────────┐ │ │ +│ │ │ 朝代: 架空朝代"天启朝" │ │ │ +│ │ │ 地点: 皇城、边关、江南 │ │ │ +│ │ │ [详细设定...] │ │ │ +│ │ └─────────────────────────────────────────────────┘ │ │ +│ │ │ │ +│ │ 👥 主要人物: │ │ +│ │ ┌─────────────────────────────────────────────────┐ │ │ +│ │ │ 李云飞 - 边关将军,性格耿直 │ │ │ +│ │ │ 苏婉儿 - 丞相之女,聪慧隐忍 │ │ │ +│ │ │ [+ 添加人物] │ │ │ +│ │ └─────────────────────────────────────────────────┘ │ │ +│ │ │ │ +│ │ 📜 整体大纲: │ │ +│ │ ┌─────────────────────────────────────────────────┐ │ │ +│ │ │ EP01-05: 相识与相恋... │ │ │ +│ │ │ EP06-15: 宫中斗争... │ │ │ +│ │ │ [编辑大纲] │ │ │ +│ │ └─────────────────────────────────────────────────┘ │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ [返回] [保存配置] [🚀 开始创作] │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +#### 2.3.3 项目管理功能 + +| 功能 | 说明 | +|-----|-----| +| **创建项目** | 选择 Agent、配置 Skills、设定全局上下文 | +| **查看进度** | 实时监控各集的创作进度 | +| **全局设定管理** | 编辑世界观、人物小传、场景设定 | +| **记忆系统查看** | 查看事件时间线、伏笔追踪 | +| **人工审核** | 审核标记的问题,修改或重新生成 | +| **导出结果** | 导出单集或全集剧本 | + +--- + +## 三、一致性保障机制 + +### 3.1 全局上下文管理 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 全局设定 | 古装剧《宫廷风云》 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 📖 世界观设定 │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 朝代: 架空朝代"天启朝" │ │ +│ │ 地点: 皇城、边关、江南 │ │ +│ │ 特殊规则: 宫中等级森严,丞相与皇帝对立... │ │ +│ │ [编辑] [历史版本] │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ 👥 人物小传 (点击展开详情) │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 李云飞 (男主角) │ │ +│ │ - 身份: 边关将军 │ │ +│ │ - 性格: 忠诚、耿直、不懂变通 │ │ +│ │ - 状态: 活跃 / 在边关 / 被皇帝猜忌 (EP01更新) │ │ +│ │ - 关系: 与苏婉儿有婚约 / 与丞相对立 │ │ +│ │ - 说话风格: 直来直去,用词简练 │ │ +│ │ [编辑] [查看状态历史] │ │ +│ │ │ │ +│ │ 苏婉儿 (女主角) │ │ +│ │ - 身份: 丞相之女 │ │ +│ │ - 性格: 聪慧、隐忍、心怀大义 │ │ +│ │ - 状态: 活跃 / 在皇城 / 被迫入宫为妃 (EP01更新) │ │ +│ │ [编辑] [+ 添加人物] │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ 🏠 场景设定 │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 养心殿 (皇帝寝宫) - 奢华、压抑 │ │ +│ │ 丞相府 - 庄严、暗藏玄机 │ │ +│ │ 边关大营 - 肃杀、简陋 │ │ +│ │ [+ 添加场景] │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ 📜 整体大纲 │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ EP01-05: 相识与相恋,两人被迫分离 │ │ +│ │ EP06-15: 宫中斗争,婉儿周旋,云飞征战 │ │ +│ │ EP16-25: 真相大白,联手对抗丞相 │ │ +│ │ EP26-30: 最终对决,有情人终成眷属 │ │ +│ │ [编辑] │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ [保存] [导出设定] [历史版本] │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### 3.2 记忆系统 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 剧情记忆系统 (自动维护) │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 📅 事件时间线 (从已完成的集数中自动提取) │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ EP01: │ │ +│ │ • 李云飞与苏婉儿在江南初遇,定下婚约 │ │ +│ │ • 丞相苏文柏被皇帝忌惮,欲将女儿送入宫中 │ │ +│ │ • 皇帝下旨,苏婉儿入选为妃 │ │ +│ │ │ │ +│ │ EP02: │ │ +│ │ • 云飞回京请婚,被拒 │ │ +│ │ • 婉儿入宫,初见皇帝,被冷落 │ │ +│ │ • 云飞带兵离开,二人约定三年后相见 │ │ +│ │ │ │ +│ │ [+ 查看更多集数] │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ ⚠️ 待收线问题 │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ • 皇帝忌惮丞相的原因尚未揭示 (EP01 伏笔) │ │ +│ │ - 重要性: 高 │ │ +│ │ - 建议收线集数: EP15-20 │ │ +│ │ │ │ +│ │ • 云飞离京前从神秘人处获得的一封信 (EP02 伏笔) │ │ +│ │ - 重要性: 高 │ │ +│ │ - 建议收线集数: EP20-25 │ │ +│ │ │ │ +│ │ • 婉儿入宫时带的一枚玉佩,似乎有特殊含义 (EP01 伏笔) │ │ +│ │ - 重要性: 中 │ │ +│ │ - 建议收线集数: EP10-15 │ │ +│ │ │ │ +│ │ [+ 查看全部] [手动添加] │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ 🔄 人物状态追踪 │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 李云飞: │ │ +│ │ 边关将军 (初始) → (EP01) 被皇帝猜忌 → (EP02) 愤而离京 │ │ +│ │ [查看完整历史] │ │ +│ │ │ │ +│ │ 苏婉儿: │ │ +│ │ 相府千金 (初始) → (EP01) 被选为妃 → (EP02) 入宫冷落 │ │ +│ │ [查看完整历史] │ │ +│ │ │ │ +│ │ [+ 查看全部人物] │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ [AI 智能提取] [手动添加] [导出报告] │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### 3.3 可配置审核系统 + +**核心设计:审核也通过 Skill 配置实现,用户可以自定义审核标准和规则** + +审核不是硬编码的,而是通过配置"审核 Skill"来实现的。用户可以选择不同的审核维度、设置严格度、自定义审核规则。 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 审核配置界面 (用户友好) │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 📋 宫廷风云 > 审核配置 │ +│ │ +│ ┌─ 审核维度 ─────────────────────────────────────────────┐ │ +│ │ 当前使用的审核 Skills: │ │ +│ │ ┌──────────────────────────────────────────────────┐ │ │ +│ │ │ ✅ 人物一致性审核 [配置▼] [移除] │ │ │ +│ │ │ ✅ 场景设定审核 [配置▼] [移除] │ │ │ +│ │ │ ✅ 剧情逻辑审核 [配置▼] [移除] │ │ │ +│ │ │ ✅ 对话质量审核 [配置▼] [移除] │ │ │ +│ │ │ ✅ 风格一致性审核 [配置▼] [移除] │ │ │ +│ │ │ ✅ 伏笔追踪审核 [配置▼] [移除] │ │ │ +│ │ │ │ │ │ +│ │ │ [+ 添加其他审核 Skill] │ │ │ +│ │ └──────────────────────────────────────────────────┘ │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─ 审核严格度 ───────────────────────────────────────────┐ │ +│ │ 总体严格度: ━━━━●━━━ (中等) │ │ +│ │ │ │ +│ │ 各维度独立设置: │ │ +│ │ ┌──────────────────────────────────────────────────┐ │ │ +│ │ │ 人物一致性: ━━━●━━━━ (宽松) │ │ │ +│ │ │ 场景设定: ━━━━●━━━ (中等) │ │ │ +│ │ │ 剧情逻辑: ━━━━━●━━ (严格) │ │ │ +│ │ │ 对话质量: ━━━━●━━━ (中等) │ │ │ +│ │ │ 风格一致: ━━━━●━━━ (中等) │ │ │ +│ │ └──────────────────────────────────────────────────┘ │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─ 自定义审核规则 ───────────────────────────────────────┐ │ +│ │ [+ 添加自定义规则] │ │ +│ │ ┌──────────────────────────────────────────────────┐ │ │ +│ │ │ 🔴 规则 1: 避免现代词汇 │ │ │ +│ │ │ 触发条件: 检测到"OK"、"拜拜"等现代词 │ │ │ +│ │ │ 严重程度: 中等 │ │ │ +│ │ │ [编辑] [删除] │ │ │ +│ │ └──────────────────────────────────────────────────┘ │ │ +│ │ ┌──────────────────────────────────────────────────┐ │ │ +│ │ │ 🟡 规则 2: 高潮集必须有情感爆发 │ │ │ +│ │ │ 触发条件: 标记为"高潮"的集数 │ │ │ +│ │ │ 检查内容: 情感描写密度 │ │ │ +│ │ │ 严重程度: 高 │ │ │ +│ │ │ [编辑] [删除] │ │ │ +│ │ └──────────────────────────────────────────────────┘ │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─ 审核预设 ─────────────────────────────────────────────┐ │ +│ │ 快速应用预设配置: │ │ +│ │ [宽松模式] [标准模式] [严格模式] [自定义模式] │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +│ [保存配置] [重置为默认] [导出配置] │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +#### 3.3.1 审核维度详细配置 + +**人物一致性审核配置:** + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 人物一致性审核 > 配置 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 检查项目 (可多选): │ +│ ☑ 性格是否符合人物小传 │ +│ ☑ 说话风格是否一致 │ +│ ☑ 身份地位是否矛盾 │ +│ ☑ 人物关系是否合理 │ +│ ☐ 人物外貌描述是否一致 │ +│ │ +│ ──────────────────────────────────────────────────────────── │ +│ │ +│ 严格度设置: │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 宽松 ━━━●━━━━ 严格 │ │ +│ │ │ │ +│ │ 宽松: 只检查明显矛盾 │ │ +│ │ 中等: 检查性格和说话风格的主要变化 │ │ +│ │ 严格: 检查所有细节,包括微小的不一致 │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ ──────────────────────────────────────────────────────────── │ +│ │ +│ 特殊人物设置: │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 李云飞: │ │ +│ │ ☑ 需要保持耿直直率的性格 │ │ +│ │ ☑ 检查说话风格 (简洁、直接) │ │ +│ │ ☐ 检查外貌描述一致性 │ │ +│ │ 备注: 边关将军,说话不要太文绉绉 │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 苏婉儿: │ │ +│ │ ☑ 需要保持聪慧隐忍的性格 │ │ +│ │ ☑ 检查说话风格 (含蓄、委婉) │ │ +│ │ ☐ 检查外貌描述一致性 │ │ +│ │ 备注: 丞相之女,身份敏感 │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ [+ 添加更多人物] │ +│ │ +│ [保存] [取消] │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +**对话质量审核配置:** + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 对话质量审核 > 配置 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 质量标准: │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 对话自然度: ━━━━━●━━ (较严格) │ │ +│ │ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ │ │ +│ │ │ │ +│ │ 对话目的性: ━━━━●━━━ (中等) │ │ +│ │ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ │ │ +│ │ │ │ +│ │ 对话密度: ━━━●━━━━ (宽松) │ │ +│ │ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ │ │ +│ │ │ │ +│ │ 动作描写完整性: ━━━━●━━━ (中等) │ │ +│ │ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ ──────────────────────────────────────────────────────────── │ +│ │ +│ 禁止项 (检测到即标记为问题): │ +│ ☑ 现代词汇 (OK、拜拜、随便等) │ +│ ☑ 过长的人物独白 (超过 5 句) │ +│ ☐ 重复的对话内容 │ +│ ☐ 缺少说话人标签 │ +│ │ +│ ──────────────────────────────────────────────────────────── │ +│ │ +│ 风格要求: │ +│ ☑ 对话要符合人物性格 │ +│ ☑ 每句对话都有目的性 │ +│ ☐ 适度使用成语典故 │ +│ ☐ 包含情感暗示和潜台词 │ +│ │ +│ [保存] [取消] │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +#### 3.3.2 审核结果可视化 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 📊 EP03 审核结果 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ ┌─ 总体评分 ─────────────────────────────────────────────┐ │ +│ │ ┌──────────────────────────────────────────────────┐ │ │ +│ │ │ 78 / 100 ⚠️ 低于阈值 │ │ │ +│ │ │ ████████████░░░░ │ │ │ +│ │ └──────────────────────────────────────────────────┘ │ │ +│ │ │ │ +│ │ 各维度评分: │ │ +│ │ ┌──────────────────────────────────────────────────┐ │ │ +│ │ │ 人物一致性: 90分 ████████████████████░░ │ │ │ +│ │ │ 场景设定: 85分 ████████████████████░░░ │ │ │ +│ │ │ 剧情逻辑: 95分 ███████████████████████ │ │ │ +│ │ │ 对话质量: 65分 ████████████░░░░░░░░░░ ← 问题 │ │ │ +│ │ │ 风格一致: 80分 ████████████████████░░░ │ │ │ +│ │ │ 伏笔追踪: 70分 ████████████████░░░░░░░ ← 问题 │ │ │ +│ │ └──────────────────────────────────────────────────┘ │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─ 问题汇总 (按严重程度排序) ───────────────────────────┐ │ +│ │ │ │ +│ │ 🔴 严重问题 (1) - 需要处理 │ │ +│ │ ┌──────────────────────────────────────────────────┐ │ │ +│ │ │ 对话质量 > 现代词汇 │ │ │ +│ │ │ │ │ │ +│ │ │ 位置: EP03 场景 5 │ │ │ +│ │ │ 问题: 检测到"OK"、"拜拜"等现代词汇 │ │ │ +│ │ │ │ │ │ +│ │ │ 原文: │ │ │ +│ │ │ 李云飞: 这事就这样了,OK? │ │ │ +│ │ │ 苏婉儿: 好的,拜拜。 │ │ │ +│ │ │ │ │ │ +│ │ │ 建议: 修改为古风表达 │ │ │ +│ │ │ • "OK" → "可好" / "就这样定了" │ │ │ +│ │ │ • "拜拜" → "告辞" / "这便告辞" │ │ │ +│ │ │ │ │ │ +│ │ │ [跳转到该场景] [自动修复] [忽略] │ │ │ +│ │ └──────────────────────────────────────────────────┘ │ │ +│ │ │ │ +│ │ 🟡 一般问题 (2) - 建议处理 │ │ +│ │ ┌──────────────────────────────────────────────────┐ │ │ +│ │ │ 伏笔追踪 > 未跟进重要伏笔 │ │ │ +│ │ │ 位置: EP03 整体 │ │ │ +│ │ │ 问题: EP01 设定的"李云飞身世之谜"未提及 │ │ │ +│ │ │ 建议: 在后续 2-3 集内要有暗示或进展 │ │ │ +│ │ │ [添加到提醒] [忽略] │ │ │ +│ │ └──────────────────────────────────────────────────┘ │ │ +│ │ ┌──────────────────────────────────────────────────┐ │ │ +│ │ │ 对话质量 > 对话密度过高 │ │ │ +│ │ │ 位置: EP03 场景 2 │ │ │ +│ │ │ 问题: 连续 15 句对话,缺乏动作和环境描写 │ │ │ +│ │ │ 建议: 适当增加动作描写和场景切换 │ │ │ +│ │ │ [跳转到该场景] [忽略] │ │ │ +│ │ └──────────────────────────────────────────────────┘ │ │ +│ │ │ │ +│ │ 🟢 轻微问题 (3) [展开/收起] │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─ 快速操作 ─────────────────────────────────────────────┐ │ +│ │ [重新生成 EP03] [接受所有轻微问题] [导出审核报告] │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +#### 3.3.3 审核与分批次模式的配合 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ ✅ 批次 1 审核完成 - 等待你的决策 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 本批次审核结果: │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ EP01 ✅ 92分 通过 无问题 │ │ +│ │ EP02 ✅ 89分 通过 2个轻微问题 │ │ +│ │ EP03 ⚠️ 78分 需处理 1个严重问题 + 2个一般问题 │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─ 决策选项 ───────────────────────────────────────────┐ │ +│ │ • [查看 EP03 详情并修改] (推荐) │ │ +│ │ • [自动修复 EP03 的问题] (AI 尝试修复) │ │ +│ │ • [重新生成 EP03] │ │ +│ │ • [接受本批次全部] (标记 EP03 为需后续修改) │ │ +│ │ • [调整审核配置后重新审核] │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +│ ──────────────────────────────────────────────────────────── │ +│ │ +│ 💡 提示: │ +│ • 如果对审核标准不满意,可以调整审核配置后重新审核 │ +│ • 审核配置可以保存为预设,下次直接使用 │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +#### 3.3.4 审核预设模板 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 审核预设模板 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 快速应用预设: │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 📝 草稿模式 (宽松) │ │ +│ │ • 严格度: 低 │ │ +│ │ • 只检查明显矛盾 │ │ +│ │ • 适合: 快速出稿,后期精修 │ │ +│ │ [应用预设] │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 📋 标准模式 (推荐) │ │ +│ │ • 严格度: 中等 │ │ +│ │ • 检查主要维度 │ │ +│ │ • 适合: 大多数场景 │ │ +│ │ [应用预设] [当前使用中] │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 🔍 严格模式 │ │ +│ │ • 严格度: 高 │ │ +│ │ • 检查所有细节 │ │ +│ │ • 适合: 精品内容,质量要求高 │ │ +│ │ [应用预设] │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ ⚙️ 自定义模式 (你创建的预设) │ │ +│ │ • 你的定制配置... │ │ +│ │ [编辑] [应用] [删除] │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ [+ 创建新预设] │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +#### 3.3.5 审核数据结构 + +```typescript +// 审核配置 +interface ReviewConfig { + // 使用的审核 Skills + enabledReviewSkills: string[]; + + // 总体严格度 + overallStrictness: 'loose' | 'medium' | 'strict'; + + // 各维度独立设置 + dimensionSettings: { + characterConsistency: { + enabled: boolean; + strictness: number; // 0-1 + customRules?: CustomRule[]; + }; + sceneConsistency: { enabled: boolean; strictness: number }; + plotLogic: { enabled: boolean; strictness: number }; + dialogueQuality: { enabled: boolean; strictness: number }; + styleConsistency: { enabled: boolean; strictness: number }; + foreshadowing: { enabled: boolean; strictness: number }; + }; + + // 自定义规则 + customRules: CustomRule[]; +} + +// 自定义审核规则 +interface CustomRule { + id: string; + name: string; + description: string; + triggerCondition: string; // 触发条件描述 + severity: 'low' | 'medium' | 'high'; + enabled: boolean; +} + +// 审核结果 +interface ReviewResult { + episodeId: string; + episodeNumber: number; + + // 总体评分 + overallScore: number; + passed: boolean; + + // 各维度评分 + dimensionScores: { + characterConsistency: number; + sceneConsistency: number; + plotLogic: number; + dialogueQuality: number; + styleConsistency: number; + foreshadowing: number; + }; + + // 问题列表 + issues: ReviewIssue[]; +} + +// 审核问题 +interface ReviewIssue { + id: string; + type: string; + severity: 'low' | 'medium' | 'high'; + + dimension: string; // 所属维度 + + location: { + episode: number; + scene: number; + line?: number; + }; + + description: string; + originalText?: string; + + suggestion: string; + autoFixAvailable?: boolean; +} +``` + +### 3.4 一致性审核 Skill (默认内置) + +```markdown +# Skill: consistency-checker + +## 功能 +检查新一集内容与全局设定和历史剧情的一致性 + +## 输入 +- **global_context**: 全局上下文(世界设定、人物小传、场景设定) +- **memory**: 记忆系统内容(事件时间线、人物状态、伏笔追踪) +- **current_episode**: 当前集数的内容 +- **episode_history**: 前面所有集数的摘要 + +## 检查项 +1. **人物设定一致性** + - 人物性格是否符合小传 + - 说话风格是否一致 + - 身份地位是否矛盾 + +2. **场景设定一致性** + - 场景描述是否与设定一致 + - 场景转换是否合理 + +3. **剧情逻辑一致性** + - 是否与前面剧情矛盾 + - 时间顺序是否合理 + - 因果关系是否成立 + +4. **伏笔管理** + - 新增伏笔是否记录 + - 旧伏笔是否跟进 + - 是否有矛盾之处 + +## 输出 +```json +{ + "passed": true/false, + "score": 85, + "issues": [ + { + "type": "character_inconsistency", + "description": "EP03中李云飞说话变得文绉绉,与设定不符", + "severity": "high", + "location": "EP03 场景5", + "suggestion": "建议修改对话,保持直率风格" + } + ], + "new_foreshadowing": [ + { + "episode": 3, + "scene": 2, + "description": "婉儿提到'母亲留下的锦囊'", + "importance": "high" + } + ], + "resolved_threads": [ + { + "thread": "皇帝忌惮丞相的原因", + "resolved_in": "EP03", + "resolution": "丞相曾掌握先帝遗诏" + } + ] +} +``` + +## 执行步骤 +1. 读取全局上下文和记忆系统 +2. 分析当前集数内容 +3. 逐一检查一致性 +4. 如发现问题,给出具体修改建议 +5. 更新记忆系统(新增伏笔、收线问题、人物状态变化) +``` + +--- + +## 四、全自动创作模式 + +### 4.1 运行模式选择 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 选择运行模式 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ ⭐ 推荐模式:分批次执行 (最实用) │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 一次生成 3-5 集,你审核后继续 │ │ +│ │ • 批次大小: [3] 集 (可调整 1-10) │ │ +│ │ • 每集生成后你查看 → 调整 → 继续下一批 │ │ +│ │ • 可随时调整每集参数(时长、风格、重点) │ │ +│ │ • 发现问题及时修正,避免全盘重来 │ │ +│ │ │ │ +│ │ 适合: 大多数场景,平衡效率和控制 │ │ +│ │ │ │ +│ │ ▼ 批次配置 │ │ +│ │ • 本批集数: EP01-EP03 │ │ +│ │ • EP01 时长: [3] 分钟 风格: [默认▼] │ │ +│ │ • EP02 时长: [3] 分钟 风格: [默认▼] │ │ +│ │ • EP03 时长: [5] 分钟 风格: [高潮▼] ← 高潮集加长 │ │ +│ │ │ │ +│ │ [开始生成本批次] │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ 🚀 全自动模式 (适合快速出稿) │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ Agent 自主完成全部 30 集 │ │ +│ │ • 每集自动创作 → 自动审核 → 自动迭代 │ │ +│ │ • 遇到质量问题自动重试(最多3次) │ │ +│ │ • 完成后通知你审核结果 │ │ +│ │ • 你只需要最后查看/修改/导出 │ │ +│ │ │ │ +│ │ 适合: 想要快速出稿,后续再精修 │ │ +│ │ │ │ +│ │ ⚠️ 风险: 一次性生成30集,后期修改工作量大 │ │ +│ │ │ │ +│ │ ▼ 高级设置 │ │ +│ │ • 最大重试次数: [3] 次 │ │ +│ │ • 质量阈值: [85%] │ │ +│ │ • 并发集数: [1] 集 │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ 🤝 逐步审核模式 (完全控制) │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 每个关键节点等待你的确认 │ │ +│ │ • 大纲生成 → 你确认 → 继续对白 │ │ +│ │ • 每集完成 → 你审核 → 下一集 │ │ +│ │ • 可随时调整方向 │ │ +│ │ │ │ +│ │ 适合: 对创意有特定要求,想要把控每个环节 │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### 4.2 分批次执行模式 (推荐) + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 📋 分批次执行 - 宫廷风云 (30集) │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 当前进度: 第 1 批次 / 共 10 批 │ +│ ████████░░░░░░░░░░░░░░░░░ 10% (3/30 集) │ +│ │ +│ ┌─ 当前批次配置 ───────────────────────────────────────┐ │ +│ │ 批次 1: EP01 - EP03 │ │ +│ │ ┌─────────────────────────────────────────────────┐ │ │ +│ │ │ EP01 - 初识 │ │ │ +│ │ │ • 时长: [3] 分钟 │ │ │ +│ │ │ • 重点: ☑ 人物介绍 ☑ 建立关系 ☐ 悬念设置 │ │ │ +│ │ │ • 备注: 开篇要吸引人 │ │ │ +│ │ └─────────────────────────────────────────────────┘ │ │ +│ │ ┌─────────────────────────────────────────────────┐ │ │ +│ │ │ EP02 - 冲突 │ │ │ +│ │ │ • 时长: [3] 分钟 │ │ │ +│ │ │ • 重点: ☑ 矛盾显现 ☑ 情感升温 ☐ 伏笔埋设 │ │ │ +│ │ └─────────────────────────────────────────────────┘ │ │ +│ │ ┌─────────────────────────────────────────────────┐ │ │ +│ │ │ EP03 - 危机 (高潮集) │ │ │ +│ │ │ • 时长: [5] 分钟 ⚠️ 加长 │ │ │ +│ │ │ • 重点: ☑ 危机爆发 ☑ 转折点 ☑ 情感高潮 │ │ │ +│ │ │ • 备注: 这集是第一个高潮,需要更多细节 │ │ │ +│ │ └─────────────────────────────────────────────────┘ │ │ +│ │ │ │ +│ │ [开始生成本批次] [重新配置批次] │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─ 已完成的批次 ───────────────────────────────────────┐ │ +│ │ (当前无已完成批次) │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +**本批次生成中:** + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 🔄 批次 1 生成中 - EP01: 初识 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 本批次进度: ████████░░░░░░░ 66% (2/3 集) │ +│ │ +│ EP01 ✅ 已完成 EP02 ✅ 已完成 EP03 🔄 生成中 │ +│ │ +│ ┌─ EP03 实时预览 ──────────────────────────────────────┐ │ +│ │ 📜 场景 3: 花园相会 (当前生成) │ │ +│ │ ┌──────────────────────────────────────────────────┐ │ │ +│ │ │ 【场景】御花园,傍晚 │ │ │ +│ │ │ 【人物】李云飞、苏婉儿 │ │ │ +│ │ │ │ │ │ +│ │ │ 李云飞:(拱手)苏姑娘,此地不宜久留。 │ │ │ +│ │ │ 苏婉儿:(轻叹)将军所言极是... │ │ │ +│ │ │ [正在实时生成中...] │ │ │ +│ │ │ │ │ │ +│ │ │ [暂停本批次] [跳过当前场景] │ │ │ +│ │ └──────────────────────────────────────────────────┘ │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +**本批次完成,等待审核:** + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ ✅ 批次 1 生成完成 - 等待你的审核 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 本批次统计 │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ EP01 ✅ 质量分数: 92分 时长: 3分10秒 │ │ +│ │ EP02 ✅ 质量分数: 89分 时长: 3分05秒 │ │ +│ │ EP03 ⚠️ 质量分数: 78分 时长: 4分50秒 → [查看] │ │ +│ │ │ │ +│ │ 平均质量: 86.3分 │ │ +│ │ ⚠️ 需要关注: 1个 │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─ 操作选项 ───────────────────────────────────────────┐ │ +│ │ • [查看并修改 EP03] (推荐) │ │ +│ │ • [重新生成 EP03] │ │ +│ │ • [接受本批次全部] │ │ +│ │ • [调整下一批配置后继续] │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─ 下一批预览 ─────────────────────────────────────────┐ │ +│ │ 批次 2: EP04 - EP06 │ │ +│ │ • EP04 时长: [3] 分钟 风格: [默认▼] │ │ +│ │ • EP05 时长: [3] 分钟 风格: [默认▼] │ │ +│ │ • EP06 时长: [4] 分钟 风格: [小高潮▼] │ │ +│ │ │ │ +│ │ [配置下一批] [使用相同配置继续] │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### 4.4 全自动模式运行监控 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 🚀 全自动创作进行中 - 宫廷风云 (30集) │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 总进度: ████████░░░░░░░░░░░░ 27% (8/30 集) │ +│ │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 当前执行: EP09 - 边关夜话 │ │ +│ │ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ │ │ +│ │ 阶段: 🔄 一致性审核中... │ │ +│ │ Agent: 剧集创作 Agent │ │ +│ │ 已用时间: 12 分 30 秒 │ │ +│ │ 预计剩余: 约 45 分钟 │ │ +│ │ │ │ +│ │ [暂停] [停止] [查看实时日志] │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ 集数状态 │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ EP01 ✅ EP02 ✅ EP03 ✅ EP04 ✅ EP05 ✅ EP06 ✅ │ │ +│ │ EP07 ✅ EP08 ✅ EP09 🔄 EP10 ⏳ EP11 ⏳ EP12 ⏳ │ │ +│ │ EP13 ⏳ EP14-30 ⏳ │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ 质量统计 │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 平均质量分数: 92.3分 │ │ +│ │ 一次性通过: 7集 重试后通过: 1集 │ │ +│ │ │ │ +│ │ ⚠️ 警告: 0个 ❌ 错误: 0个 │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ [查看全部日志] [导出已完成集数] │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### 4.5 自动重试逻辑 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ Agent 内部的自动重试决策 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 开始创作 EP03 │ +│ ↓ │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ Step 1: 结构分析 → ✅ 通过 │ │ +│ │ Step 2: 大纲生成 → ✅ 通过 │ │ +│ │ Step 3: 对话创作 → ✅ 完成 │ │ +│ │ Step 4: 一致性审核 → ⚠️ 分数 78 (低于阈值 85) │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ ↓ │ +│ 检测到质量问题,开始自动重试 (第1次/共3次) │ +│ ↓ │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 根据审核建议重新生成: │ │ +│ │ • 李云飞对话风格略显文绉绉,需要更直率一些 │ │ +│ │ • 场景3的情感转变需要更自然的过渡 │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ ↓ │ +│ Step 3: 对话创作 (重试) → ✅ 完成 │ +│ Step 4: 一致性审核 (重试) → ✅ 分数 91 (通过!) │ +│ ↓ │ +│ Step 5: 更新记忆系统 → ✅ 完成 │ +│ ↓ │ +│ EP03 完成,继续 EP04 │ +│ │ +│ ─────────────────────────────────────────────────────────── │ +│ │ +│ 如果重试3次后仍不通过: │ +│ → 标记为"需人工审核" │ +│ → 全自动模式继续下一集 │ +│ → 完成后统一通知用户处理 │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### 4.6 完成通知 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 🎉 全自动创作完成! │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 项目: 宫廷风云 (30集) │ +│ 完成时间: 2024-01-15 14:32 │ +│ 总耗时: 3小时 45分钟 │ +│ │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 📊 质量报告 │ │ +│ │ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ │ │ +│ │ 平均质量分数: 91.5分 │ │ +│ │ 一次性通过: 26集 │ │ +│ │ 重试1次通过: 3集 │ │ +│ │ 重试2次通过: 1集 (EP15) │ │ +│ │ │ │ +│ │ 一致性检查: ✅ 全部通过 │ │ +│ │ • 人物设定一致性: 100% │ │ +│ │ • 场景设定一致性: 100% │ │ +│ │ • 剧情逻辑性: 98% (EP15有小问题已自动修复) │ │ +│ │ │ │ +│ │ 伏笔追踪: │ │ +│ │ • 新增伏笔: 12个 │ │ +│ │ • 收线伏笔: 8个 (剩余4个将在后续跟进) │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ ⚠️ 需要人工审核 (2处) │ │ +│ │ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ │ │ +│ │ 1. EP15-场景3: 李云飞的情感转变略显突兀 │ │ +│ │ [查看] [重新生成] [接受] │ │ +│ │ │ │ +│ │ 2. EP22-场景5: 两位配角对话内容重复 │ │ +│ │ [查看] [重新生成] [接受] │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ [👀 查看全部集数] [📤 导出全集] [📝 继续编辑] │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +--- + +## 五、用户友好的内容展示与交互 + +**核心原则:所有面向用户的内容,都不直接以原始文件格式(Markdown、JSON 等)呈现** + +平台的设计理念是"所见即所得",用户不应该看到任何技术文件格式。所有内容都通过精心设计的 UI 组件来呈现。 + +### 5.1 Skill 配置界面 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ Skill 配置界面的用户友好设计 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ ❌ 错误做法:直接展示原始 Markdown │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ # 古风对话大师 │ │ +│ │ [一大段 Markdown 文本,用户需要理解格式才能修改] │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ ✅ 正确做法:参数化、可视化表单界面 │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 🎭 古风对话大师 [配置] │ │ +│ │ │ │ +│ │ ┌─ 风格参数 ─────────────────────────────────────────┐ │ │ +│ │ │ 文言程度: ━━━●━━━ (0.7) │ │ │ +│ │ │ 对话密度: ━━━━━●━ (0.6) │ │ │ +│ │ │ 情感浓度: ☐含蓄 ☑适中 ☐外放 ☐浓烈 │ │ │ +│ │ └────────────────────────────────────────────────────┘ │ │ +│ │ │ │ +│ │ ┌─ 创作重点 ─────────────────────────────────────────┐ │ │ +│ │ │ ☑ 人物性格通过对话体现 │ │ │ +│ │ │ ☑ 每句对话都有目的性 │ │ │ +│ │ │ ☑ 避免现代词汇和句式 │ │ │ +│ │ └────────────────────────────────────────────────────┘ │ │ +│ │ │ │ +│ │ [保存配置] [高级模式:查看原始Markdown] │ │ +│ └─────────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +**要点:** +- 滑块、单选、多选、开关等表单组件 +- 参数调整后实时显示效果说明 +- 根据当前配置实时生成示例内容 +- 高级模式可查看原始 Markdown(非默认) + +### 5.2 剧本生成预览 + +**生成过程中的实时预览:** + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 剧本生成过程中的实时预览 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 正在生成: EP01 - 宫廷风云 │ +│ 进度: ████████████░░░░░░░░ 60% │ +│ │ +│ ┌─ 当前阶段 ─────────────────────────────────────────────┐ │ +│ │ ✅ 1. 结构分析 (已完成) │ │ +│ │ ✅ 2. 大纲生成 (已完成) │ │ +│ │ 🔄 3. 对话创作 (进行中...) │ │ +│ │ ⏳ 4. 一致性审核 (等待中) │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─ 实时预览 ─────────────────────────────────────────────┐ │ +│ │ 📜 场景 3: 花园相会 │ │ +│ │ ┌──────────────────────────────────────────────────┐ │ │ +│ │ │ 【场景】御花园,傍晚 │ │ │ +│ │ │ 【人物】李云飞、苏婉儿 │ │ │ +│ │ │ │ │ │ +│ │ │ 李云飞:(拱手)苏姑娘,此地不宜久留。 │ │ │ +│ │ │ 苏婉儿:(轻叹)将军所言极是... │ │ │ +│ │ │ [正在实时生成中...] │ │ │ +│ │ │ [暂停生成] [跳过当前场景] │ │ │ +│ │ └──────────────────────────────────────────────────┘ │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +│ 💡 提示:生成过程中可以实时查看内容,如有问题可随时暂停调整 │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +**生成完成后的查看界面:** + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 剧本查看与编辑界面 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 📺 宫廷风云 > EP01 - 初识 │ +│ │ +│ ┌─ 导航 ─────────────────────────────────────────────────┐ │ +│ │ [大纲视图] [场景列表] [逐场查看] [人物统计] [导出] │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─ 视图切换 ─────────────────────────────────────────────┐ │ +│ │ 📖 剧本阅读 📊 人物出场统计 🎭 情节曲线 │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─ 剧本内容 ─────────────────────────────────────────────┐ │ +│ │ ┌─ 场景 1: 宫殿朝堂 ──────────────────────────────┐ │ │ +│ │ │ ▼ 展开/收起 │ │ │ +│ │ │ 【场景】金銮殿,早朝时分 │ │ │ +│ │ │ 【人物】皇帝、李云飞、丞相 │ │ │ +│ │ │ │ │ │ +│ │ │ 皇帝:李爱卿,边关战事如何? │ │ │ +│ │ │ │ │ │ +│ │ │ 李云飞:(跪拜)启禀陛下,匈奴已退兵三十里... │ │ │ +│ │ │ │ │ │ +│ │ │ 丞相:陛下,此事是否另有隐情? │ │ │ +│ │ │ │ │ │ +│ │ │ [编辑本场景] [重新生成] [添加批注] │ │ │ +│ │ └──────────────────────────────────────────────────┘ │ │ +│ │ ┌─ 场景 2: 宫门外 ────────────────────────────────┐ │ │ +│ │ │ ▶ 点击展开 │ │ │ +│ │ └──────────────────────────────────────────────────┘ │ │ +│ │ [+ 添加新场景] │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─ 侧边栏: 本集信息 ─────────────────────────────────────┐ │ +│ │ 📊 质量分数: 92分 │ │ +│ │ 📝 场景数量: 5场 │ │ +│ │ 👥 出场人物: 3人 │ │ +│ │ ⏱️ 预计时长: 3分20秒 │ │ +│ │ │ │ +│ │ [继续生成下一集] [导出PDF] │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### 5.3 剧本导入预览 + +**导入源文件时的解析与预览:** + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 剧本导入与解析界面 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 📥 导入剧本 │ +│ │ +│ ┌─ 第一步:上传文件 ──────────────────────────────────────┐ │ +│ │ 拖拽文件到此处,或点击选择文件 │ │ +│ │ ┌──────────────────────────────────────────────────┐ │ │ +│ │ │ 支持格式: .txt .docx .pdf .fdx .celtx │ │ │ +│ │ │ [选择文件] │ │ │ +│ │ └──────────────────────────────────────────────────┘ │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─ 第二步:解析预览 ──────────────────────────────────────┐ │ +│ │ 📊 文件信息 │ │ +│ │ ┌──────────────────────────────────────────────────┐ │ │ +│ │ │ 文件大小: 128 KB │ │ │ +│ │ │ 预计场景: 8场 │ │ │ +│ │ │ 预计人物: 6人 │ │ │ +│ │ │ 解析结果: ✅ 成功识别 │ │ │ +│ │ │ ⚠️ 发现2处格式可能需要调整 │ │ │ +│ │ └──────────────────────────────────────────────────┘ │ │ +│ │ │ │ +│ │ 👥 识别的人物 │ │ +│ │ ┌──────────────────────────────────────────────────┐ │ │ +│ │ │ ✅ 李云飞 (出现5次) │ │ │ +│ │ │ ✅ 苏婉儿 (出现4次) │ │ │ +│ │ │ ❓ 侍卫甲 (可能是通用角色) │ │ │ +│ │ └──────────────────────────────────────────────────┘ │ │ +│ │ │ │ +│ │ [调整识别设置] [重新解析] │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─ 第三步:导入确认 ──────────────────────────────────────┐ │ +│ │ ☑ 自动识别人物 ☑ 自动拆分场景 ☐ 合并连续对话 │ │ +│ │ [返回] [确认导入] │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### 5.4 全局上下文编辑界面 + +**世界观、人物、场景的编辑都采用结构化表单:** + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 全局上下文编辑界面 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 📖 宫廷风云 > 全局设定 > 人物设定 │ +│ │ +│ ┌─ 李云飞 ─────────────────────────────────────────────┐ │ +│ │ 🗡️ 边关将军 │ │ +│ │ │ │ +│ │ 基本信息: │ │ +│ │ 年龄: [28] 性别: [男▼] 身份: [镇北将军▼] │ │ +│ │ │ │ +│ │ 性格特征 (可多选): │ │ +│ │ ☑ 耿直刚正 ☑ 忠诚勇敢 ☐ 沉默寡言 ☐ 粗中有细 │ │ +│ │ [+ 添加自定义特征] │ │ +│ │ │ │ +│ │ 外貌特征: │ │ +│ │ ┌────────────────────────────────────────────────┐ │ │ +│ │ │ 身形高大,剑眉星目,不怒自威... │ │ │ +│ │ │ [富文本编辑器,支持格式化] │ │ │ +│ │ └────────────────────────────────────────────────┘ │ │ +│ │ │ │ +│ │ 说话风格: (滑块) │ │ +│ │ 简洁 ─────●──── 冗长 │ │ +│ │ 直接 ───●────── 委婉 │ │ +│ │ │ │ +│ │ 人物关系: │ │ +│ │ ┌─ 关系网络图 ─────────────────────────────────┐ │ │ +│ │ │ 李云飞 │ │ │ +│ │ │ │ │ │ │ +│ │ │ ┌──┴──┐ │ │ │ +│ │ │ 皇帝 苏婉儿 │ │ │ +│ │ │ (君臣) (爱慕) │ │ │ +│ │ │ [+ 添加关系] │ │ │ +│ │ └────────────────────────────────────────────────┘ │ │ +│ │ [保存] [预览人物卡片] [删除] │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### 5.5 记忆系统查看界面 + +**事件时间线、伏笔追踪的可视化展示:** + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 记忆系统查看界面 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 🧠 宫廷风云 > 记忆系统 │ +│ │ +│ ┌─ 导航 ─────────────────────────────────────────────────┐ │ +│ │ [📜 事件时间线] [💡 伏笔追踪] [👥 人物状态] │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─ 事件时间线 ───────────────────────────────────────────┐ │ +│ │ ┌─ EP01 ────────────────────────────────────────────┐ │ │ +│ │ │ ▼ 展开3个事件 │ │ │ +│ │ │ • 李云飞边关退兵 │ │ │ +│ │ │ • 李云飞与苏婉儿花园初次相会 │ │ │ +│ │ │ • 苏婉儿被皇帝赐婚给丞相之子 │ │ │ +│ │ └────────────────────────────────────────────────────┘ │ │ +│ │ ┌─ EP02 ────────────────────────────────────────────┐ │ │ +│ │ │ ▶ 点击展开4个事件 │ │ │ +│ │ └────────────────────────────────────────────────────┘ │ │ +│ │ [筛选: ▼全部事件 ▼关键事件 ▼涉及人物:李云飞] │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─ 伏笔追踪 ─────────────────────────────────────────────┐ │ +│ │ 待收线伏笔 (4) │ │ +│ │ ┌──────────────────────────────────────────────────┐ │ │ +│ │ │ 💡 李云飞身世之谜 │ │ │ +│ │ │ 首次出现: EP01 最近提及: EP08 │ │ │ +│ │ │ 状态: 🔴 需要尽快收线 │ │ │ +│ │ │ 预计收线: EP12-15 [添加备注] [设置提醒] │ │ │ +│ │ └──────────────────────────────────────────────────┘ │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### 5.6 质量报告查看界面 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 质量报告可视化界面 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 📊 宫廷风云 > 质量报告 │ +│ │ +│ ┌─ 整体评分 ─────────────────────────────────────────────┐ │ +│ │ ┌──────────────────────────────────────────────────┐ │ │ +│ │ │ 91.5 / 100 │ │ │ +│ │ │ ████████████░░ │ │ │ +│ │ │ 优秀 良好 及格 需改进 │ │ │ +│ │ └──────────────────────────────────────────────────┘ │ │ +│ │ 一致性: ████████████████░ 96% │ │ +│ │ 创意性: ██████████░░░░░░ 72% │ │ +│ │ 完整度: ██████████████░░ 88% │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─ 问题汇总 ─────────────────────────────────────────────┐ │ +│ │ 🔴 严重问题 (2) │ │ +│ │ ┌──────────────────────────────────────────────────┐ │ │ +│ │ │ 1. EP15-场景3: 人物情感转变突兀 │ │ │ +│ │ │ [跳转到该场景] [重新生成] │ │ │ +│ │ └──────────────────────────────────────────────────┘ │ │ +│ │ 🟡 一般问题 (5) [展开] 🟢 轻微问题 (8) [展开] │ │ +│ └────────────────────────────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### 5.7 设计原则总结 + +| 场景 | 错误做法 | 正确做法 | +|-----|---------|---------| +| **Skill 配置** | 展示原始 Markdown | 参数化表单 + 实时预览 | +| **剧本生成** | 等待完成后查看 | 生成过程中实时流式预览 | +| **剧本查看** | 纯文本展示 | 结构化视图(场景卡片、分镜视图) | +| **剧本导入** | 直接解析使用 | 解析预览 + 确认环节 | +| **全局设定** | 文本文件编辑 | 结构化表单(富文本、滑块、选择器) | +| **记忆系统** | JSON 数据展示 | 时间线、关系图、伏笔卡片 | +| **质量报告** | 文本列表 | 可视化图表 + 交互式过滤 | + +--- + +## 六、技术架构 + +### 6.1 整体架构图 + +``` +┌─────────────────────────────────────────────────────────────────────────────┐ +│ 用户界面层 │ +│ ┌────────────┐ ┌────────────┐ ┌────────────┐ ┌────────────┐ │ +│ │Skill管理中心│ │Agent管理 │ │项目管理器 │ │ Skill市场 │ │ +│ └────────────┘ └────────────┘ └────────────┘ └────────────┘ │ +└─────────────────────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────────────────────┐ +│ 编排引擎层 │ +│ ┌──────────────────────────────────────────────────────────────────────┐ │ +│ │ Agent 执行引擎 (固定框架) │ │ +│ │ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ │ +│ │ │ 工作流引擎 │ │ 质量控制器 │ │ 重试管理器 │ │ │ +│ │ └─────────────┘ └─────────────┘ └─────────────┘ │ │ +│ └──────────────────────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────────────────────┐ +│ Skill 管理层 │ +│ ┌────────────┐ ┌────────────┐ ┌────────────┐ ┌────────────┐ │ +│ │ Skill 加载 │ │参数解析器 │ │提示词渲染 │ │工具调用器 │ │ +│ └────────────┘ └────────────┘ └────────────┘ └────────────┘ │ +│ ↓ ↓ ↓ ↓ │ +│ Skill内容融入提示词,指导Agent行为 (类似Claude Code) │ +└─────────────────────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────────────────────┐ +│ 上下文管理层 │ +│ ┌──────────────────────────────────────────────────────────────────────┐ │ +│ │ 全局上下文 + 记忆系统 │ │ +│ │ • 世界设定 • 人物小传 • 场景设定 • 事件时间线 • 伏笔追踪 │ │ +│ └──────────────────────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────────────────────┐ +│ 模型接口层 │ +│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │ +│ │ Claude API │ │ OpenAI API │ │ Gemini API │ │ 自定义模型 │ │ +│ └──────────────┘ └──────────────┘ └──────────────┘ └──────────────┘ │ +└─────────────────────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────────────────────┐ +│ 数据存储层 │ +│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │ +│ │ Skill 仓库 │ │ 项目数据 │ │ 记忆系统 │ │ 用户配置 │ │ +│ └──────────────┘ └──────────────┘ └──────────────┘ └──────────────┘ │ +└─────────────────────────────────────────────────────────────────────────────┘ +``` + +### 6.2 核心数据结构 + +```typescript +// ============= Skill ============= +interface Skill { + id: string; + name: string; + version: string; + author: string; + type: 'builtin' | 'user'; // 内置或用户创建 + + // 核心内容 - 直接影响 Agent 行为 + behaviorGuide: string; // 行为指导文档(类似Claude Code Skills) + config: SkillConfig; // 可配置参数 + template?: string; // 提示词模板 + + // 工具定义(仅内置 Skill 支持) + tools?: SkillTool[]; // 该 Skill 提供的工具 + + // 元数据 + category: string; + tags: string[]; + visibility: 'private' | 'public'; +} + +// Skill 工具定义(用于内置 Skills) +interface SkillTool { + name: string; + description: string; + script: string; // 脚本文件名 + parameters: Record<string, { type: string; description: string }>; +} + +interface SkillConfig { + // 风格预设 + preset?: string; + + // 可调节参数 + parameters: { + [key: string]: { + type: 'select' | 'slider' | 'text' | 'boolean'; + value: any; + options?: any[]; + min?: number; + max?: number; + }; + }; + + // 权重配置 + weights?: { + [dimension: string]: number; + }; +} + +// ============= Agent (平台维护) ============= +interface Agent { + id: string; + name: string; + description: string; + category: string; + + // 固定的工作流 + workflow: WorkflowStage[]; + + // 引用的 Skills (用户可配置替换) + skillBindings: { + [stageId: string]: string; // stageId -> skillId + }; + + // 决策规则 + decisionRules: DecisionRule[]; +} + +// ============= Series Project (剧集项目) ============= +interface SeriesProject { + id: string; + name: string; + type: 'series'; + + // 使用的 Agent + agentId: string; + + // 运行模式 + mode: 'auto' | 'semi-auto'; + autoRetryConfig?: { + maxRetries: number; + qualityThreshold: number; + }; + + // 全局上下文 + globalContext: { + worldSetting: string; // 世界观设定 + characterProfiles: { // 人物小传 + [characterId: string]: { + name: string; + description: string; + personality: string; + speechStyle: string; + currentState: string; + relationships: { [otherId: string]: string }; + appearance?: string; + }; + }; + sceneSettings: { // 场景设定 + [sceneId: string]: { + name: string; + description: string; + atmosphere: string; + }; + }; + overallOutline: string; // 整体大纲 + styleGuide: string; // 风格指南 + }; + + // 记忆系统 + memory: { + eventTimeline: Array<{ // 事件时间线 + episode: number; + event: string; + timestamp?: string; + }>; + pendingThreads: Array<{ // 待收线问题 + id: string; + description: string; + introducedAt: number; + importance: 'high' | 'medium' | 'low'; + resolved?: boolean; + resolvedAt?: number; + }>; + foreshadowing: Array<{ // 伏笔追踪 + episode: number; + scene: number; + description: string; + importance: 'high' | 'medium' | 'low'; + resolved?: boolean; + }>; + characterStates: { // 人物状态历史 + [characterId: string]: Array<{ + episode: number; + state: string; + change: string; + }>; + }; + }; + + // 子项目 (集数) + episodes: { + [episodeId: string]: { + number: number; + title?: string; + status: 'pending' | 'writing' | 'review' | 'completed' | 'needs-review'; + content?: string; + summary?: string; + qualityScore?: number; + retryCount?: number; + issues?: Issue[]; + createdAt: Date; + completedAt?: Date; + }; + }; + + // Skill 配置覆盖 + skillSettings: { + [skillId: string]: { + parameterOverrides?: SkillConfig['parameters']; + enabled: boolean; + }; + }; + + createdAt: Date; + updatedAt: Date; +} + +// ============= 问题 ============= +interface Issue { + type: 'character_inconsistency' | 'scene_inconsistency' | 'plot_hole' | 'dialogue_style'; + description: string; + severity: 'low' | 'medium' | 'high'; + location: { + episode: number; + scene: number; + }; + suggestion: string; +} +``` + +### 6.3 项目文件目录结构 + +``` +workspace/ +├── skills/ # 用户 Skills +│ ├── builtin/ # 内置 Skills (平台维护) +│ │ ├── skill-creator/ # - SKILL.md +│ │ │ └── scripts/ # - validate_skill.py, generate_template.py +│ │ └── agent-creator/ # - SKILL.md +│ │ └── scripts/ # - validate_agent.py, generate_workflow.py +│ │ +│ └── user/ # 用户创建的 Skills +│ ├── dialogue-writer-ancient/ # - SKILL.md (无 scripts 目录) +│ ├── consistency-checker/ +│ └── ... +│ +├── agents/ # Agent 定义 (代码驱动) +│ ├── series-creation.ts # 剧集创作 Agent 工作流代码 +│ ├── storyboard-design.ts # 分镜设计 Agent 工作流代码 +│ └── short-story.ts # 短篇小说 Agent 工作流代码 +│ +├── projects/ # 项目目录 +│ └── <project-id>/ # 具体项目 +│ ├── .project.json # 项目配置文件 +│ ├── global-context/ # 全局上下文 +│ │ ├── world-setting.md +│ │ ├── characters/ +│ │ │ ├── main-character.md +│ │ │ └── ... +│ │ └── outline.md +│ ├── memory/ # 记忆系统 (自动维护) +│ │ ├── timeline.json +│ │ ├── foreshadowing.json +│ │ └── character-states.json +│ └── episodes/ # 各集内容 +│ ├── EP01/ +│ │ ├── structure.json +│ │ ├── outline.json +│ │ ├── content.md +│ │ └── review.json +│ ├── EP02/ +│ └── ... +│ +├── .creative-studio/ # 平台配置 +│ ├── config.json # 全局配置 +│ └── marketplace/ # Skill 市场 (可选) +│ +└── cache/ # 缓存目录 + ├── llm-responses/ + └── skill-templates/ +``` + +### 6.4 Scripts 执行系统 + +#### 6.4.1 设计原则 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ Scripts 执行系统的分层设计 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 内置 Skills (如 skill-creator) │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ • 可以定义 Scripts │ │ +│ │ • Scripts 用于验证、生成模板等平台功能 │ │ +│ │ • 由平台维护,确保安全性 │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ ↓ │ +│ Scripts 执行引擎 │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ • 支持 Python、Node.js、Shell 脚本 │ │ +│ │ • 沙箱隔离执行 │ │ +│ │ • 超时控制、资源限制 │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ ↓ │ +│ 用户 Skills │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ • 只需提供行为指导 │ │ +│ │ • 不能定义 Scripts │ │ +│ │ • 简化用户使用 │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +#### 6.4.2 执行引擎实现 + +```typescript +// Scripts 执行引擎 +class SkillScriptExecutor { + + // 执行 Skill 中的脚本 + async executeScript( + skillId: string, + scriptName: string, + args: any + ): Promise<any> { + + // 安全检查:只允许内置 Skill 执行脚本 + const skill = await loadSkill(skillId); + if (skill.type !== 'builtin') { + throw new Error('Only builtin skills can execute scripts'); + } + + const scriptPath = `/skills/${skillId}/scripts/${scriptName}`; + + // 检查脚本是否存在 + if (!await this.scriptExists(scriptPath)) { + throw new Error(`Script not found: ${scriptPath}`); + } + + // 根据扩展名选择执行方式 + const ext = path.extname(scriptName); + + switch (ext) { + case '.py': + return await this.executePython(scriptPath, args); + case '.js': + return await this.executeNode(scriptPath, args); + case '.sh': + return await this.executeBash(scriptPath, args); + default: + throw new Error(`Unsupported script type: ${ext}`); + } + } + + private async executePython(scriptPath: string, args: any): Promise<any> { + const { spawn } = require('child_process'); + + return new Promise((resolve, reject) => { + // 超时控制 + const timeout = setTimeout(() => { + process.kill(); + reject(new Error('Script execution timeout')); + }, 30000); // 30秒超时 + + const process = spawn('python', [scriptPath, JSON.stringify(args)]); + + let stdout = ''; + let stderr = ''; + + process.stdout.on('data', (data) => { + stdout += data.toString(); + }); + + process.stderr.on('data', (data) => { + stderr += data.toString(); + }); + + process.on('close', (code) => { + clearTimeout(timeout); + if (code === 0) { + try { + resolve(JSON.parse(stdout)); + } catch { + resolve(stdout); + } + } else { + reject(new Error(`Script failed: ${stderr}`)); + } + }); + }); + } + + // Node.js、Bash 类似实现... +} + +// 工具注册表 +class ToolRegistry { + private tools = new Map<string, Tool>(); + + // 从内置 Skill 加载工具 + async loadToolsFromSkill(skillId: string) { + const skill = await loadSkill(skillId); + + if (skill.type === 'builtin' && skill.tools) { + for (const toolDef of skill.tools) { + this.registerTool({ + name: toolDef.name, + description: toolDef.description, + execute: async (args) => { + return await this.scriptExecutor.executeScript( + skillId, + toolDef.script, + args + ); + } + }); + } + } + } + + registerTool(tool: Tool) { + this.tools.set(tool.name, tool); + } + + getTool(name: string): Tool | undefined { + return this.tools.get(name); + } + + getAllTools(): Tool[] { + return Array.from(this.tools.values()); + } +} +``` + +#### 6.4.3 skill-creator 示例 + +```json +// skills/skill-creator/skill.json +{ + "id": "skill-creator", + "name": "Skill 创建器", + "type": "builtin", + "version": "1.0.0", + "tools": [ + { + "name": "validateSkillStructure", + "description": "验证 Skill 文件结构是否符合规范", + "script": "validate_skill.py", + "parameters": { + "skillPath": { + "type": "string", + "description": "Skill 文件路径" + } + } + }, + { + "name": "generateSkillTemplate", + "description": "生成 Skill 模板文件", + "script": "generate_template.py", + "parameters": { + "name": { + "type": "string", + "description": "Skill 名称" + }, + "category": { + "type": "string", + "description": "Skill 分类" + } + } + } + ] +} +``` + +```python +# skills/skill-creator/scripts/validate_skill.py +import sys +import json +import os +from pathlib import Path + +def validate_skill_structure(skill_path): + """验证 Skill 文件结构""" + errors = [] + warnings = [] + + skill_dir = Path(skill_path) + + # 检查 SKILL.md 是否存在 + skill_md = skill_dir / "SKILL.md" + if not skill_md.exists(): + errors.append("SKILL.md 文件不存在") + else: + # 验证 SKILL.md 格式 + content = skill_md.read_text() + if not content.strip().startswith("# Skill:"): + warnings.append("SKILL.md 应该以 '# Skill:' 开头") + + # 用户 Skill 不应有 scripts 目录 + scripts_dir = skill_dir / "scripts" + if scripts_dir.exists(): + errors.append("用户 Skill 不应包含 scripts 目录") + + return { + "valid": len(errors) == 0, + "errors": errors, + "warnings": warnings + } + +if __name__ == "__main__": + input_data = json.loads(sys.argv[1]) + result = validate_skill_structure(input_data["skillPath"]) + print(json.dumps(result, ensure_ascii=False)) +``` + +#### 6.4.4 用户创建 Skill 的流程 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 用户创建 Skill (使用 skill-creator) │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 用户点击 "创建 Skill" │ +│ ↓ │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ Agent 加载 skill-creator Skill (内置,有 Scripts) │ │ +│ │ │ │ +│ │ Step 1: 引导用户输入基本信息 │ │ +│ │ ┌─────────────────────────────────────────────────┐ │ │ +│ │ │ Skill 名称: [古风对话大师] │ │ │ +│ │ │ 分类: [编剧 > 对话风格] │ │ │ +│ │ │ 描述: [用于创作古风风格的对话...] │ │ │ +│ │ └─────────────────────────────────────────────────┘ │ │ +│ │ │ │ +│ │ Step 2: 调用 generateSkillTemplate (Scripts) │ │ +│ │ ┌─────────────────────────────────────────────────┐ │ │ +│ │ │ 执行: python generate_template.py ... │ │ │ +│ │ │ 输出: 生成 SKILL.md 模板文件 │ │ │ +│ │ └─────────────────────────────────────────────────┘ │ │ +│ │ │ │ +│ │ Step 3: 用户编辑行为指导 │ │ +│ │ ┌─────────────────────────────────────────────────┐ │ │ +│ │ │ # Skill: 古风对话大师 │ │ │ +│ │ │ ## 行为指导 │ │ │ +│ │ │ 你创作对话时必须遵循以下规则: │ │ │ +│ │ │ [用户编辑内容...] │ │ │ +│ │ └─────────────────────────────────────────────────┘ │ │ +│ │ │ │ +│ │ Step 4: 调用 validateSkillStructure (Scripts) │ │ +│ │ ┌─────────────────────────────────────────────────┐ │ │ +│ │ │ 执行: python validate_skill.py ... │ │ │ +│ │ │ 输出: {"valid": true, "errors": [], ...} │ │ │ +│ │ └─────────────────────────────────────────────────┘ │ │ +│ │ │ │ +│ │ Step 5: 保存 Skill │ │ +│ │ ┌─────────────────────────────────────────────────┐ │ │ +│ │ │ 保存到: skills/user-skills/dialogue-writer-ancie │ │ │ +│ │ │ 类型: user (无 Scripts) │ │ │ +│ │ └─────────────────────────────────────────────────┘ │ │ +│ │ │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ ↓ │ +│ 用户 Skill 创建完成 │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### 6.5 Agent 执行流程 + +```typescript +// Agent 执行单集的完整流程 +async function executeEpisode( + project: SeriesProject, + episodeNumber: number, + agent: Agent +): Promise<EpisodeResult> { + + let attempt = 0; + const maxRetries = project.autoRetryConfig?.maxRetries ?? 3; + const threshold = project.autoRetryConfig?.qualityThreshold ?? 85; + + while (attempt < maxRetries) { + // Step 1: 加载上下文 + const context = await loadContext(project, episodeNumber); + + // Step 2: 执行工作流 + let currentContext = context; + const results = {}; + + for (const stage of agent.workflow) { + // 获取该阶段使用的 Skill + const skillId = project.skillSettings[stage.skillId]?.enabled + ? stage.skillId + : agent.skillBindings[stage.id]; + + const skill = await loadSkill(skillId); + + // 关键:将 Skill 内容融入提示词 + const prompt = buildPromptWithSkill( + stage.task, + skill.behaviorGuide, // ← Skill 的行为指导 + currentContext, + skill.config + ); + + // 调用 LLM + const result = await callLLM(prompt); + + // 应用工具(如果有) + if (skill.availableTools) { + const toolResult = await applyTools(result, skill.availableTools); + results[stage.id] = toolResult; + } else { + results[stage.id] = result; + } + + currentContext = { ...currentContext, ...result }; + } + + // Step 3: 一致性审核 + const review = await consistencyCheck( + project.globalContext, + project.memory, + results, + episodeNumber + ); + + // Step 4: 质量判断 + if (review.score >= threshold) { + // 通过 → 更新记忆系统 + await updateMemory(project, episodeNumber, results, review); + return { + success: true, + content: results, + qualityScore: review.score + }; + } + + // 不通过 → 重试 + attempt++; + + if (attempt >= maxRetries) { + // 达到最大重试次数 + return { + success: false, + content: results, + needsReview: true, + review: review, + qualityScore: review.score + }; + } + + // 根据审核建议重新生成 + // 将 review 的建议作为额外上下文传入 + context.retryContext = { + attempt: attempt, + previousResult: results, + review: review + }; + } +} + +// 将 Skill 内容融入提示词 (核心机制) +function buildPromptWithSkill( + task: string, + skillBehaviorGuide: string, // Skill 的行为指导 + context: any, + skillConfig: any +): string { + return ` +你是剧集创作专家。 + +# 当前任务 +${task} + +# 行为指导 (来自 Skill) +${skillBehaviorGuide} + +# 上下文信息 +${JSON.stringify(context)} + +# 配置参数 +${JSON.stringify(skillConfig)} + +请严格按照行为指导完成任务。 +`; +} +``` + +### 6.6 数据流设计 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 单集执行数据流 │ +└─────────────────────────────────────────────────────────────────┘ + +用户点击"开始创作" + ↓ +┌───────────────────────────────────────────────────────────────┐ +│ 加载项目配置 │ +│ • Agent 定义 (固定工作流) │ +│ • 用户配置的 Skills │ +│ • 全局上下文 (世界设定、人物小传、场景设定) │ +│ • 记忆系统 (事件时间线、伏笔追踪) │ +└───────────────────────────────────────────────────────────────┘ + ↓ +┌────────────────────────────────────────────────────────��──────┐ +│ FOR EACH episode: │ +│ │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ FOR EACH workflow stage: │ │ +│ │ │ │ +│ │ 1. 获取该阶段的 Skill │ │ +│ │ 2. 将 Skill.behaviorGuide 融入提示词 │ │ +│ │ 3. 调用 LLM → 输出结果 │ │ +│ │ 4. 应用工具 (如果有) │ │ +│ │ 5. 更新上下文 │ │ +│ │ │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ ↓ 所有阶段完成后 │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ 一致性审核 (consistency-checker Skill) │ │ +│ │ │ │ +│ │ quality >= threshold? │ │ +│ │ │ │ │ +│ │ ├── YES → 更新记忆系统 → 下一集 │ │ +│ │ │ │ │ +│ │ └── NO → attempt < maxRetries? │ │ +│ │ │ │ │ +│ │ ├── YES → 重试 (带上审核建议) │ │ +│ │ │ │ │ +│ │ └── NO → 标记需审核 → 下一集 │ │ +│ │ │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +└───────────────────────────────────────────────────────────────┘ + ↓ +┌───────────────────────────────────────────────────────────────┐ +│ 全部完成 → 生成质量报告 → 通知用户 │ +└───────────────────────────────────────────────────────────────┘ +``` + +--- + +## 七、Skill Marketplace + +### 7.1 市场界面 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ Skill 市场 │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ 分类: [全部] [对话风格] [结构分析] [一致性审核] [质量评分] │ +│ 排序: [热门] [最新] [评分 ▼] │ +│ │ +│ 🔥 热门 Skills │ +│ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ │ +│ │ 🗡️ 古风对话大师 │ │ 💕 甜宠氛围营造 │ │ 🔍 悬疑伏笔布局 │ │ +│ │ │ │ │ │ │ │ +│ │ ⭐ 4.9 (2.3k) │ │ ⭐ 4.8 (1.8k) │ │ ⭐ 4.7 (1.2k) │ │ +│ │ │ │ │ │ │ │ +│ │ 对话风格 > 古风 │ │ 对话风格 > 甜宠 │ │ 剧情技巧 > 伏笔 │ │ +│ │ │ │ │ │ │ │ +│ │ [安装] [预览] │ │ [安装] [预览] │ │ [安装] [预览] │ │ +│ └─────────────────┘ └─────────────────┘ └─────────────────┘ │ +│ │ +│ 📚 更多分类 │ +│ • 对话风格 | • 叙事节奏 | • 人物塑造 | • 场景描写 │ +│ • 结构框架 | • 一致性审核 | • 质量评分 | • 格式转换 │ +│ │ +│ 🎨 我的自定义 Skills │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ ✨ 我的第一版古风 Skill (基于"古风对话大师"修改) │ │ +│ │ • 降低了对话密度,增加了环境描写 │ │ +│ │ • 调整了皇帝说话的威严程度 │ │ +│ │ ⭐ 4.5 (12次使用) │ │ +│ │ [编辑] [发布到市场] [删除] │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ [发布我的 Skill] │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### 7.2 Skill 分享机制 + +| 功能 | 说明 | +|-----|-----| +| **Fork 修改** | 基于现有 Skill 创建副本,修改后发布为独立 Skill | +| **版本管理** | Skill 支持多版本,用户可选择版本 | +| **评分评论** | 用户可评分、评论,分享使用体验 | +| **使用统计** | 显示 Skill 的使用次数、成功率 | +| **推荐算法** | 基于项目类型推荐合适的 Skills | + +--- + +## 八、技术栈建议 + +### 8.1 后端技术栈 + +| 层级 | 技术选择 | +|-----|---------| +| **语言** | Python | +| **API 框架** | FastAPI | +| **向量数据库** | Chroma | +| **文档数据库** | MongoDB (项目、记忆系统) | +| **文件存储** | 本地 / S3 | +| **消息队列** | Redis (异步任务) | +| **LLM 接口** | LangChain | + +### 8.2 前端技术栈 + +| 层级 | 技术选择 | +|-----|---------| +| **框架** | React / Vue | +| **状态管理** | Zustand / Pinia | +| **UI 组件** | shadcn/ui / Element Plus | +| **编辑器** | Monaco / CodeMirror | +| **Markdown** | Monaco + Remark | + +### 8.3 部署架构 + +``` +┌─────────────────────────────────────────────────────────────┐ +│ 负载均衡 │ +└─────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ Web 服务 (多实例) │ +│ ┌────────────┐ ┌────────────┐ ┌────────────┐ │ +│ │ Instance 1 │ │ Instance 2 │ │ Instance 3 │ │ +│ └────────────┘ └────────────┘ └────────────┘ │ +└─────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ 消息队列 (Redis) │ +└─────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ Worker 服务 (异步任务) │ +│ ┌────────────┐ ┌────────────┐ │ +│ │ LLM Worker │ │ Skill Worker│ │ +│ └────────────┘ └────────────┘ │ +└─────────────────────────────────────────────────────────────┘ +``` + +--- + +## 九、开发路线图 + +### Phase 1: MVP (最小可行产品) +- [ ] Skill 基础管理系统(创建、编辑、配置) +- [ ] Agent 固定框架(剧集创作 Agent) +- [ ] Series Project 数据结构 +- [ ] 全局上下文管理 +- [ ] 基础 UI(三大管理中心) + +### Phase 2: 核心功能 +- [ ] 记忆系统(事件时间线、伏笔追踪) +- [ ] 一致性审核 Skill +- [ ] 全自动创作模式 +- [ ] 自动重试机制 +- [ ] 单集执行完整流程 + +### Phase 3: 高级功能 +- [ ] Skill Marketplace +- [ ] Skill Fork 和分享 +- [ ] 风格预设系统 +- [ ] 质量报告生成 +- [ ] 半自动模式 + +### Phase 4: 生态扩展 +- [ ] 自定义模型接入 +- [ ] 更多 Agent 类型(分镜、小说等) +- [ ] 团队协作 +- [ ] API 开放平台 + +--- + +## 十、与原文档的对比 + +| 方面 | 原设计 | 新设计 (Skill-centric) | +|-----|-------|----------------------| +| **交互方式** | 问答式 | 点击式配置 | +| **Agent** | 用户创建和维护 | 平台提供固定模板 | +| **Skill** | 主要是提示词文件 | 可配置的行为指导单元 | +| **项目结构** | 单一项目 | Series Project + Episodes | +| **一致性保障** | 无 | 全局上下文 + 记忆系统 | +| **运行模式** | 手动逐步执行 | 全自动 + 半自动 | +| **用户操作** | 编排 Agent 工作流 | 配置 Skill 参数 | +| **多集创作** | 无机制支持 | 专门的记忆系统 | +| **Skill 工作方式** | 简单引用 | 融入提示词指导行为 | + +--- + +## 十一、核心设计原则总结 + +### 11.1 "Agent 固定,Skill 可配" + +``` +平台维护 Agent 框架 用户配置 Skill 内容 + ↓ ↓ + 稳定的执行层 灵活的能力层 +``` + +**优势**: +- 平台稳定性高(Agent 很少改动) +- 用户学习成本低(只需配置 Skill) +- 问题容易定位(框架固定,问题在 Skill) +- 生态可繁荣(Skill 可分享) + +### 11.2 "Skill 指导行为" + +``` +Skill.behaviorGuide → 融入提示词 → LLM 根据指导输出 +``` + +**本质**:与 Claude Code Skills 相同的工作方式 + +**价值**:用户写的 Skill 真正能影响 Agent 的创作行为 + +### 11.3 "全局上下文 + 记忆系统" + +``` +Series Project + ├── 全局上下文 (所有集数共享) + └── 记忆系统 (自动维护一致性) + ├── 事件时间线 + ├── 伏笔追踪 + └── 人物状态 +``` + +**解决的问题**:多集创作的一致性问题 + +### 11.4 "全自动 + 人工审核" + +``` +全自动创作 → 自动重试 → 质量报告 → 人工审核 +``` + +**用户体验**:设置好后,坐等完成,最后审核导出 + +--- + +*版本: 2.0.0* +*更新日期: 2024* +*主要变更: 重构为 Skill-centric 架构,增加记忆系统和全自动模式* diff --git a/docs/API.md b/docs/API.md new file mode 100644 index 0000000..0d3faad --- /dev/null +++ b/docs/API.md @@ -0,0 +1,734 @@ +# Creative Studio API 文档 + +## 概述 + +Creative Studio 提供 RESTful API 接口,支持项目管理、Skill 管理、记忆系统、审核系统等功能。 + +**Base URL**: `http://localhost:8000/api/v1` + +**认证方式**: Bearer Token (即将支持) + +**数据格式**: JSON + +--- + +## 目录 + +- [Skill 管理 API](#skill-管理-api) +- [项目管理 API](#项目管理-api) +- [记忆系统 API](#记忆系统-api) +- [审核系统 API](#审核系统-api) +- [错误处理](#错误处理) +- [使用示例](#使用示例) + +--- + +## Skill 管理 API + +### 1. 列出所有 Skills + +```http +GET /api/v1/skills +``` + +**查询参数**: + +| 参数 | 类型 | 必填 | 说明 | +|-----|------|-----|------| +| skill_type | string | 否 | 筛选类型 (builtin/user) | +| category | string | 否 | 筛选分类 | + +**响应示例**: + +```json +[ + { + "id": "dialogue-writer-ancient", + "name": "古风对话大师", + "version": "2.1.0", + "type": "builtin", + "category": "编剧 > 对话风格", + "behavior_guide": "你是专业的古风剧集编剧...", + "tags": ["古风", "对话", "编剧"], + "created_at": "2024-01-15T10:00:00Z" + } +] +``` + +--- + +### 2. 获取 Skill 详情 + +```http +GET /api/v1/skills/{skill_id} +``` + +**路径参数**: + +| 参数 | 类型 | 说明 | +|-----|------|------| +| skill_id | string | Skill ID | + +**响应示例**: + +```json +{ + "id": "dialogue-writer-ancient", + "name": "古风对话大师", + "version": "2.1.0", + "author": "Creative Studio Platform", + "type": "builtin", + "behavior_guide": "你是专业的古风剧集编剧...", + "config": { + "preset": null, + "parameters": {}, + "weights": null + }, + "category": "编剧 > 对话风格", + "tags": ["古风", "对话", "编剧"] +} +``` + +--- + +### 3. 创建新 Skill + +```http +POST /api/v1/skills +``` + +**请求体**: + +```json +{ + "id": "my-custom-skill", + "name": "我的自定义 Skill", + "content": "# Skill: 我的自定义 Skill\n\n## 行为指导\n你是...", + "category": "自定义", + "tags": ["测试"] +} +``` + +**响应**: 返回创建的 Skill 对象 + +--- + +### 4. 测试 Skill + +```http +POST /api/v1/skills/{skill_id}/test +``` + +**请求体**: + +```json +{ + "test_input": "请帮我创作一段古风对话", + "context": { + "scene": "御花园", + "characters": ["李云飞", "苏婉儿"] + }, + "temperature": 0.7 +} +``` + +**响应示例**: + +```json +{ + "skill_id": "dialogue-writer-ancient", + "skill_name": "古风对话大师", + "response": "【场景】御花园,傍晚\n【人物】李云飞、苏婉儿\n\n李云飞:(拱手)苏姑娘...", + "usage": { + "prompt_tokens": 150, + "completion_tokens": 300, + "total_tokens": 450 + } +} +``` + +--- + +### 5. 更新 Skill + +```http +PUT /api/v1/skills/{skill_id} +``` + +**请求体**: + +```json +{ + "name": "更新后的名称", + "content": "更新后的内容..." +} +``` + +--- + +### 6. 删除 Skill + +```http +DELETE /api/v1/skills/{skill_id} +``` + +**响应**: 204 No Content + +--- + +### 7. 重新加载 Skill + +```http +POST /api/v1/skills/{skill_id}/reload +``` + +用于清除缓存并重新加载 Skill。 + +--- + +### 8. 列出所有分类 + +```http +GET /api/v1/skills/categories/list +``` + +**响应示例**: + +```json +["编剧 > 对话风格", "审核 > 一致性", "通用"] +``` + +--- + +## 项目管理 API + +### 1. 列出所有项目 + +```http +GET /api/v1/projects +``` + +**查询参数**: + +| 参数 | 类型 | 必填 | 说明 | +|-----|------|-----|------| +| status | string | 否 | 筛选状态 | +| agent_id | string | 否 | 筛选 Agent 类型 | + +**响应示例**: + +```json +[ + { + "id": "proj-123", + "name": "我的古风剧集", + "description": "一部古装爱情剧", + "agent_id": "series-creation-agent", + "status": "active", + "created_at": "2024-01-15T10:00:00Z", + "episode_count": 10, + "completed_episodes": 3 + } +] +``` + +--- + +### 2. 创建项目 + +```http +POST /api/v1/projects +``` + +**请求体**: + +```json +{ + "name": "我的古风剧集", + "description": "一部古装爱情剧", + "agent_id": "series-creation-agent", + "global_context": { + "world_setting": "古代架空世界...", + "character_profiles": [...], + "scene_settings": [...], + "overall_outline": "..." + } +} +``` + +--- + +### 3. 获取项目详情 + +```http +GET /api/v1/projects/{project_id} +``` + +**响应示例**: + +```json +{ + "id": "proj-123", + "name": "我的古风剧集", + "description": "一部古装爱情剧", + "agent_id": "series-creation-agent", + "status": "active", + "global_context": {...}, + "memory": {...}, + "episodes": [...] +} +``` + +--- + +### 4. 更新项目 + +```http +PUT /api/v1/projects/{project_id} +``` + +--- + +### 5. 删除项目 + +```http +DELETE /api/v1/projects/{project_id} +``` + +--- + +### 6. 执行单集创作 + +```http +POST /api/v1/projects/{project_id}/execute +``` + +**请求体**: + +```json +{ + "episode_number": 4, + "config": { + "duration_minutes": 45, + "focus_areas": ["情感发展", "剧情推进"] + } +} +``` + +**响应示例**: + +```json +{ + "episode_id": "ep-proj-123-4", + "episode_number": 4, + "status": "in_progress", + "stages": { + "structure": "completed", + "outline": "in_progress", + "dialogue": "pending", + "review": "pending", + "memory": "pending" + } +} +``` + +--- + +### 7. 批量执行剧集创作 + +```http +POST /api/v1/projects/{project_id}/execute-batch +``` + +**请求体**: + +```json +{ + "start_episode": 4, + "end_episode": 6, + "batch_config": { + "quality_threshold": 85, + "max_retries": 2 + } +} +``` + +--- + +### 8. 获取剧集列表 + +```http +GET /api/v1/projects/{project_id}/episodes +``` + +--- + +### 9. 获取单集详情 + +```http +GET /api/v1/projects/{project_id}/episodes/{episode_number} +``` + +--- + +## 记忆系统 API + +### 1. 获取项目记忆 + +```http +GET /api/v1/projects/{project_id}/memory +``` + +**响应示例**: + +```json +{ + "eventTimeline": [...], + "pendingThreads": [...], + "characterStates": {...}, + "foreshadowing": [...], + "relationships": {...} +} +``` + +--- + +### 2. 获取事件时间线 + +```http +GET /api/v1/projects/{project_id}/memory/timeline +``` + +**查询参数**: + +| 参数 | 类型 | 必填 | 说明 | +|-----|------|-----|------| +| character | string | 否 | 筛选角色 | +| episode | int | 否 | 筛选集数 | +| importance | string | 否 | 筛选重要性 (low/medium/high) | + +--- + +### 3. 获取待收线问题 + +```http +GET /api/v1/projects/{project_id}/memory/threads +``` + +--- + +### 4. 添加待收线问题 + +```http +POST /api/v1/projects/{project_id}/memory/threads +``` + +**请求体**: + +```json +{ + "description": "需要解释为什么主角会失去记忆", + "importance": "high", + "reminder_episode": 5 +} +``` + +--- + +### 5. 更新待收线问题 + +```http +PUT /api/v1/projects/{project_id}/memory/threads/{thread_id} +``` + +--- + +### 6. 标记问题已解决 + +```http +POST /api/v1/projects/{project_id}/memory/resolve-thread +``` + +**请求体**: + +```json +{ + "thread_id": "thread-123", + "resolution_episode": 5, + "resolution_description": "在第五集中揭示..." +} +``` + +--- + +### 7. 获取人物状态 + +```http +GET /api/v1/projects/{project_id}/memory/characters +``` + +--- + +## 审核系统 API + +### 1. 获取审核配置 + +```http +GET /api/v1/projects/{project_id}/review-config +``` + +**响应示例**: + +```json +{ + "enabled_review_skills": ["consistency_checker", "dialogue_quality_checker"], + "overall_strictness": "medium", + "dimension_settings": {...}, + "custom_rules": [...] +} +``` + +--- + +### 2. 更新审核配置 + +```http +PUT /api/v1/projects/{project_id}/review-config +``` + +**请求体**: + +```json +{ + "enabled_review_skills": ["consistency_checker"], + "overall_strictness": "strict", + "dimension_settings": { + "character_consistency": { + "enabled": true, + "strictness": 0.8, + "weight": 0.5 + } + } +} +``` + +--- + +### 3. 获取审核配置预设 + +```http +GET /api/v1/projects/{project_id}/review-presets +``` + +--- + +### 4. 执行剧集审核 + +```http +POST /api/v1/projects/{project_id}/episodes/{episode_number}/review +``` + +**响应示例**: + +```json +{ + "episode_id": "ep-proj-123-4", + "overall_score": 85.5, + "passed": true, + "dimension_scores": { + "character_consistency": 88, + "plot_coherence": 82, + "dialogue_quality": 90 + }, + "issues": [ + { + "id": "issue-1", + "type": "character_inconsistency", + "dimension": "character_consistency", + "severity": "medium", + "location": { + "episode": 4, + "scene": 2, + "line": 15 + }, + "description": "角色行为与设定不符", + "suggestion": "建议修改为...", + "auto_fix_available": true + } + ], + "new_foreshadowing": [...], + "resolved_threads": [...] +} +``` + +--- + +### 5. 获取可用的审核维度 + +```http +GET /api/v1/review/dimensions +``` + +--- + +### 6. 添加自定义规则 + +```http +POST /api/v1/review/custom-rules +``` + +**请求体**: + +```json +{ + "project_id": "proj-123", + "rule": { + "name": "对话自然性检查", + "description": "检查对话是否过于生硬", + "trigger_condition": "对话包含超过3个连续的形容词", + "severity": "medium" + } +} +``` + +--- + +### 7. 删除自定义规则 + +```http +DELETE /api/v1/review/custom-rules/{rule_id} +``` + +--- + +## 错误处理 + +所有 API 在出错时返回以下格式: + +```json +{ + "detail": "错误描述信息" +} +``` + +**常见 HTTP 状态码**: + +| 状态码 | 说明 | +|-------|------| +| 200 | 成功 | +| 201 | 创建成功 | +| 204 | 成功(无内容) | +| 400 | 请求参数错误 | +| 404 | 资源不存在 | +| 422 | 数据验证失败 | +| 500 | 服务器内部错误 | + +--- + +## 使用示例 + +### Python 示例 + +```python +import requests + +BASE_URL = "http://localhost:8000/api/v1" + +# 列出所有 Skills +response = requests.get(f"{BASE_URL}/skills") +skills = response.json() +print(skills) + +# 测试 Skill +response = requests.post( + f"{BASE_URL}/skills/dialogue-writer-ancient/test", + json={ + "test_input": "请帮我创作一段古风对话", + "temperature": 0.7 + } +) +result = response.json() +print(result["response"]) + +# 创建项目 +response = requests.post( + f"{BASE_URL}/projects", + json={ + "name": "我的古风剧集", + "agent_id": "series-creation-agent", + "global_context": {...} + } +) +project = response.json() +print(project) +``` + +### JavaScript 示例 + +```javascript +const BASE_URL = "http://localhost:8000/api/v1"; + +// 列出所有 Skills +const response = await fetch(`${BASE_URL}/skills`); +const skills = await response.json(); +console.log(skills); + +// 测试 Skill +const result = await fetch(`${BASE_URL}/skills/dialogue-writer-ancient/test`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + test_input: "请帮我创作一段古风对话", + temperature: 0.7 + }) +}); +const data = await result.json(); +console.log(data.response); + +// 创建项目 +const project = await fetch(`${BASE_URL}/projects`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + name: "我的古风剧集", + agent_id: "series-creation-agent", + global_context: {...} + }) +}); +const projectData = await project.json(); +console.log(projectData); +``` + +--- + +## 附录 + +### Skill 类型说明 + +- **builtin**: 平台内置的 Skill,用户不可修改 +- **user**: 用户自定义的 Skill,可以编辑和删除 + +### 项目状态说明 + +- **draft**: 草稿状态 +- **active**: 活跃项目 +- **completed**: 已完成 +- **archived**: 已归档 + +### 剧集状态说明 + +- **pending**: 待创作 +- **in_progress**: 创作中 +- **completed**: 已完成 +- **needs-review**: 需要审核 +- **failed**: 创作失败 + +### 审核问题严重程度说明 + +- **low**: 低优先级问题 +- **medium**: 中等优先级问题 +- **high**: 高优先级问题 diff --git a/docs/ARCHITECTURE.md b/docs/ARCHITECTURE.md new file mode 100644 index 0000000..5f46484 --- /dev/null +++ b/docs/ARCHITECTURE.md @@ -0,0 +1,502 @@ +# Creative Studio - 项目架构说明 + +## 整体架构 + +``` +┌─────────────────────────────────────────────────────────────┐ +│ 前端 (React) │ +│ ┌──────────┐ ┌──────────┐ ┌──────────┐ ┌──────────┐ │ +│ │ Skill管理 │ │ 项目管理 │ │ 记忆系统 │ │ 审核系统 │ │ +│ └──────────┘ └──────────┘ └──────────┘ └──────────┘ │ +└─────────────────────────────────────────────────────────────┘ + ↓ HTTP/WebSocket +┌─────────────────────────────────────────────────────────────┐ +│ 后端 (FastAPI) │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ API 层 │ │ +│ │ /api/v1/skills /api/v1/projects /api/v1/memory │ │ +│ └──────────────────────────────────────────────────────┘ │ +│ ↓ │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ 核心层 │ │ +│ │ ┌────────────┐ ┌────────────┐ ┌────────────┐ │ │ +│ │ │ GLM Client │ │Skill Mgr │ │ Agent引擎 │ │ │ +│ │ └────────────┘ └────────────┘ └────────────┘ │ │ +│ │ ┌────────────┐ ┌────────────┐ │ │ +│ │ │Memory Mgr │ │Review Mgr │ │ │ +│ │ └────────────┘ └────────────┘ │ │ +│ └──────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ 外部服务 │ +│ ┌────────────┐ ┌────────────┐ ┌────────────┐ │ +│ │ GLM-4.7 │ │ MongoDB │ │ Redis │ │ +│ └────────────┘ └────────────┘ └────────────┘ │ +└─────────────────────────────────────────────────────────────┘ +``` + +--- + +## 核心设计原则 + +1. **"Agent 固定,Skill 可配"** - 平台维护 Agent 框架,用户配置 Skills +2. **"Skill 指导行为"** - Skill 行为指导融入提示词,指导 LLM 行为 +3. **"全局上下文 + 记忆系统"** - 确保多集内容一致性 +4. **"全自动 + 人工审核"** - 自动创作与人工审核相结合 + +--- + +## 核心模块说明 + +### 1. GLM Client (GLM 客户端) + +**位置**: `backend/app/core/llm/glm_client.py` + +**功能**: +- 封装智谱 AI GLM-4.7 API +- 将 Skill 的行为指导融入提示词 +- 支持同步和异步调用 +- 错误处理和重试机制 + +**核心方法**: +```python +async def chat_with_skill( + skill_behavior: str, # Skill 的行为指导 + user_input: str, # 用户输入 + context: Dict, # 上下文信息 + temperature: float # 温度参数 +) -> str +``` + +--- + +### 2. Skill Manager (Skill 管理器) + +**位置**: `backend/app/core/skills/skill_manager.py` + +**功能**: +- 加载和解析 Skill 文件 +- 提取行为指导内容 +- 缓存管理 +- Skill 验证 + +**核心方法**: +```python +async def load_skill(skill_id: str) -> Skill +async def list_skills(skill_type: str, category: str) -> List[Skill] +async def create_user_skill(skill_data: SkillCreate) -> Skill +``` + +--- + +### 3. Skill 存储结构 + +``` +skills_storage/ +├── builtin_skills/ # 内置 Skills(平台维护) +│ ├── consistency_checker/ +│ │ └── SKILL.md +│ └── dialogue_writer_ancient/ +│ └── SKILL.md +│ +└── user_skills/ # 用户 Skills(用户创建) + └── {skill_id}/ + └── SKILL.md +``` + +--- + +### 4. Memory Manager (记忆管理器) + +**位置**: `backend/app/core/memory/memory_manager.py` + +**功能**: +- 事件时间线管理 +- 伏笔追踪 +- 人物状态变化记录 +- 关系网络维护 + +**核心方法**: +```python +async def extract_events_from_episode( + episode_content: str, + episode_number: int +) -> List[TimelineEvent] + +async def detect_foreshadowing( + episode_content: str, + episode_number: int +) -> List[ForeshadowingEvent] + +async def update_character_states( + episode_content: str, + episode_number: int, + characters: List[str] +) -> Dict[str, CharacterStateChange] +``` + +--- + +### 5. Review Manager (审核管理器) + +**位置**: `backend/app/core/review/review_manager.py` + +**功能**: +- 多维度质量审核 +- 自定义规则执行 +- 问题定位和修复建议 +- 审核结果聚合 + +**核心方法**: +```python +async def review_episode( + episode: Episode, + project: SeriesProject, + config: ReviewConfig +) -> ReviewResult +``` + +--- + +### 6. Agent 执行引擎 + +**位置**: `backend/app/core/agents/series_creation_agent.py` + +**功能**: +- 固定的工作流框架 +- 五阶段执行:结构分析 → 大纲生成 → 对话创作 → 一致性审核 → 记忆更新 +- 质量控制和自动重试 + +**执行流程**: +``` +开始 + ↓ +1. 结构分析 (Structure Analysis) + ↓ +2. 大纲生成 (Outline Generation) + ↓ +3. 对话创作 (Dialogue Creation) + ↓ +4. 一致性审核 (Consistency Review) + ↓ (如果未通过,重试) +5. 记忆更新 (Memory Update) + ↓ +完成 +``` + +--- + +## 数据流设计 + +### Skill 行为指导融入流程 + +``` +用户请求 + ↓ +加载 Skill (Skill Manager) + ↓ +提取行为指导 (behavior_guide) + ↓ +构建提示词 (GLM Client) + ↓ +发送到 GLM-4.7 + ↓ +返回结果 +``` + +### 提示词构建示例 + +``` +系统提示词: +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +你是剧集创作专家。 + +以下是你的行为指导(来自配置的 Skill): +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +[Skill 的 behavior_guide 内容] +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +当前上下文: +- 场景: 御花园 +- 人物: 李云飞、苏婉儿 +- 历史: EP01中两人初遇... + +用户输入: +请创作 EP02 场景 3 的对话 +``` + +--- + +## API 路由结构 + +``` +/api/v1/ +├── /skills # Skill 管理 +│ ├── GET / # 列出所有 Skills +│ ├── GET /{id} # 获取 Skill 详情 +│ ├── POST / # 创建新 Skill +│ ├── PUT /{id} # 更新 Skill +│ ├── DELETE /{id} # 删除 Skill +│ ├── POST /{id}/test # 测试 Skill +│ └── POST /{id}/reload # 重新加载 Skill +│ +├── /projects # 项目管理 +│ ├── GET / # 列出项目 +│ ├── POST / # 创建项目 +│ ├── GET /{id} # 获取项目详情 +│ ├── PUT /{id} # 更新项目 +│ ├── DELETE /{id} # 删除项目 +│ ├── POST /{id}/execute # 执行剧集创作 +│ └── POST /{id}/execute-batch # 批量执行 +│ +├── /memory # 记忆系统 +│ ├── GET /projects/{id}/memory # 获取记忆 +│ ├── GET /projects/{id}/memory/timeline # 获取时间线 +│ ├── GET /projects/{id}/memory/threads # 获取待收线问题 +│ ├── POST /projects/{id}/memory/threads # 添加待收线问题 +│ └── POST /projects/{id}/memory/resolve-thread # 标记已解决 +│ +└── /review # 审核系统 + ├── GET /projects/{id}/review-config # 获取审核配置 + ├── PUT /projects/{id}/review-config # 更新审核配置 + ├── POST /projects/{id}/episodes/{ep_num}/review # 执行审核 + └── GET /review/dimensions # 获取可用维度 +``` + +--- + +## 前端架构 + +### 组件结构 + +``` +src/ +├── pages/ # 页面组件 +│ ├── Dashboard.tsx # 仪表板 +│ ├── Skills/ +│ │ ├── SkillList.tsx # Skill 列表 +│ │ ├── SkillDetail.tsx # Skill 详情 +│ │ └── SkillEditor.tsx # Skill 编辑器 +│ ├── Projects/ +│ │ ├── ProjectList.tsx # 项目列表 +│ │ ├── ProjectCreate.tsx # 创建项目 +│ │ └── ProjectDetail.tsx # 项目详情 +│ ├── Memory/ +│ │ └── MemorySystem.tsx # 记忆系统 +│ └── Review/ +│ ├── ReviewConfig.tsx # 审核配置 +│ └── ReviewResults.tsx # 审核结果 +│ +├── components/ # 通用组件 +│ ├── Layout/ +│ │ ├── Header.tsx +│ │ ├── Footer.tsx +│ │ └── Sidebar.tsx +│ ├── SkillCard.tsx +│ ├── EpisodeCard.tsx +│ └── QualityScore.tsx +│ +├── stores/ # Zustand 状态管理 +│ ├── skillStore.ts +│ ├── projectStore.ts +│ ├── memoryStore.ts +│ └── reviewStore.ts +│ +├── services/ # API 服务 +│ ├── api.ts # Axios 配置 +│ ├── skillService.ts +│ ├── projectService.ts +│ ├── memoryService.ts +│ └── reviewService.ts +│ +├── hooks/ # 自定义 Hooks +│ ├── useSkills.ts +│ ├── useProjects.ts +│ └── useEpisodeExecution.ts +│ +└── types/ # TypeScript 类型 + ├── skill.ts + ├── project.ts + ├── memory.ts + └── review.ts +``` + +### 状态管理 + +使用 Zustand 进行轻量级状态管理: + +```typescript +// stores/skillStore.ts +interface SkillStore { + skills: Skill[]; + loading: boolean; + error: string | null; + fetchSkills: () => Promise<void>; + createSkill: (skill: Partial<Skill>) => Promise<void>; +} + +export const useSkillStore = create<SkillStore>((set, get) => ({ + skills: [], + loading: false, + error: null, + fetchSkills: async () => { /* ... */ }, + createSkill: async (skill) => { /* ... */ } +})); +``` + +--- + +## 数据模型 + +### 核心数据模型 + +```python +# Skill +class Skill(BaseModel): + id: str + name: str + version: str + type: Literal["builtin", "user"] + category: str + behavior_guide: str + tags: List[str] + config: SkillConfig + +# Project +class SeriesProject(BaseModel): + id: str + name: str + description: str + agent_id: str + status: str + global_context: GlobalContext + memory: Memory + episodes: List[Episode] + +# Memory +class Memory(BaseModel): + eventTimeline: List[TimelineEvent] + pendingThreads: List[PendingThread] + characterStates: Dict[str, List[CharacterStateChange]] + foreshadowing: List[ForeshadowingEvent] + relationships: Dict[str, Dict[str, str]] + +# Review Result +class ReviewResult(BaseModel): + episode_id: str + overall_score: float + passed: bool + dimension_scores: Dict[str, float] + issues: List[ReviewIssue] +``` + +--- + +## 扩展指南 + +### 添加新的内置 Skill + +1. 在 `backend/skills_storage/builtin_skills/` 下创建目录 +2. 创建 `SKILL.md` 文件 +3. 按照模板编写内容 +4. 重启后端服务自动加载 + +### 添加新的 API 端点 + +1. 在 `backend/app/api/v1/` 下创建路由文件 +2. 定义路由和处理器 +3. 在 `backend/app/main.py` 中注册路由 + +### 添加新的前端页面 + +1. 在 `frontend/src/pages/` 下创建组件 +2. 在 `frontend/src/App.tsx` 中添加路由 +3. 配置导航菜单 + +--- + +## 性能优化建议 + +### 后端 + +1. **Skill 缓存**: 使用内存缓存避免频繁文件读取 +2. **异步处理**: 使用 Celery 处理耗时任务 +3. **连接池**: 复用 MongoDB 和 Redis 连接 +4. **响应压缩**: 启用 Gzip 压缩 + +### 前端 + +1. **代码分割**: 使用 React.lazy 和 Suspense +2. **状态管理**: 使用 Zustand 避免不必要的重渲染 +3. **请求缓存**: 使用 SWR 或 React Query +4. **图片优化**: 使用 WebP 格式和懒加载 + +--- + +## 安全考虑 + +1. **API Key 保护**: 使用环境变量,不提交到 Git +2. **输入验证**: 使用 Pydantic 验证所有输入 +3. **CORS 配置**: 限制允许的源 +4. **错误处理**: 不暴露敏感信息 + +--- + +## 部署架构 + +``` +┌─────────────────────────────────────────────────────────┐ +│ Nginx 反向代理 │ +│ (静态文件 + API 转发 + SSL) │ +└─────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────┐ +│ FastAPI 应用 (多实例 + Gunicorn) │ +└─────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────┐ +│ Celery Worker (异步任务 + LLM 调用) │ +└─────────────────────────────────────────────────────────┘ + +┌─────────────┐ ┌─────────────┐ ┌─────────────┐ +│ MongoDB │ │ Redis │ │ MinIO/S3 │ +│ (数据存储) │ │ (任务队列) │ │ (文件存储) │ +└─────────────┘ └─────────────┘ └─────────────┘ +``` + +--- + +## 技术栈总览 + +### 后端 + +- **Python 3.11+** - 编程语言 +- **FastAPI** - 高性能异步 Web 框架 +- **MongoDB** - 文档数据库 +- **Redis** - 缓存和消息队列 +- **zai-sdk** - 智谱 AI SDK +- **Pydantic** - 数据验证 +- **Uvicorn** - ASGI 服务器 + +### 前端 + +- **React 18** - UI 框架 +- **TypeScript** - 类型安全 +- **Vite** - 构建工具 +- **Ant Design 5** - UI 组件库 +- **Zustand** - 状态管理 +- **React Router** - 客户端路由 +- **Axios** - HTTP 客户端 +- **Recharts** - 数据可视化 + +--- + +## 扩展阅读 + +- [FastAPI 文档](https://fastapi.tiangolo.com/) +- [React 文档](https://react.dev/) +- [Ant Design 文档](https://ant.design/) +- [智谱 AI 文档](https://docs.bigmodel.cn/) +- [Zustand 文档](https://zustand-demo.pmnd.rs/) +- [Vite 文档](https://vitejs.dev/) diff --git a/frontend/.gitignore b/frontend/.gitignore new file mode 100644 index 0000000..a547bf3 --- /dev/null +++ b/frontend/.gitignore @@ -0,0 +1,24 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? diff --git a/frontend/README.md b/frontend/README.md new file mode 100644 index 0000000..01d1526 --- /dev/null +++ b/frontend/README.md @@ -0,0 +1,156 @@ +# Creative Studio Frontend + +基于 React 18 + TypeScript + Vite 的前端应用。 + +## 快速开始 + +详细的快速启动指南请参考项目根目录的 [README.md](../README.md)。 + +### 安装依赖 + +```bash +npm install +``` + +### 配置环境变量 + +创建 `.env.local` 文件: + +```env +VITE_API_BASE_URL=http://localhost:8000 +``` + +### 启动开发服务器 + +```bash +npm run dev +``` + +应用将运行在 `http://localhost:5173` + +### 构建生产版本 + +```bash +npm run build +``` + +## 技术栈 + +- React 18 +- TypeScript +- Vite +- Ant Design 5 +- Zustand (状态管理) +- React Router +- Axios +- Tailwind CSS + +## 主要功能 + +- Skill 管理界面 +- 项目管理界面 +- 记忆系统可视化 +- 审核系统配置和结果展示 +- 执行监控 + +## 开发指南 + +完整的开发指南请参考 [DEVELOPMENT_GUIDE.md](../DEVELOPMENT_GUIDE.md)。 + +## 项目结构 + +``` +frontend/ +├── src/ +│ ├── pages/ # 页面组件 +│ │ ├── HomePage.tsx # 首页 +│ │ ├── SkillManagement.tsx # Skill 管理 +│ │ ├── AgentManagement.tsx # Agent 管理 +│ │ ├── ProjectList.tsx # 项目列表 +│ │ ├── ProjectCreate.tsx # 创建项目 +│ │ ├── ProjectDetail.tsx # 项目详情 +│ │ ├── MemorySystem.tsx # 记忆系统 +│ │ ├── ReviewConfig.tsx # 审核配置 +│ │ └── ReviewResults.tsx # 审核结果 +│ │ +│ ├── components/ # 通用组件 +│ ├── stores/ # Zustand 状态管理 +│ ├── services/ # API 服务 +│ ├── hooks/ # 自定义 Hooks +│ ├── types/ # TypeScript 类型 +│ └── utils/ # 工具函数 +│ +├── index.html +├── package.json +├── vite.config.ts +└── tsconfig.json +``` + +## 可用脚本 + +```bash +# 启动开发服务器 +npm run dev + +# 构建生产版本 +npm run build + +# 预览生产构建 +npm run preview + +# 运行测试 +npm run test + +# 代码检查 +npm run lint +``` + +## 环境变量 + +| 变量名 | 说明 | 默认值 | +|-------|------|-------| +| `VITE_API_BASE_URL` | 后端 API 地址 | `http://localhost:8000` | + +## 常见问题 + +### 如何添加新页面? + +1. 在 `src/pages/` 下创建页面组件 +2. 在 `src/App.tsx` 中添加路由 +3. 在导航菜单中添加链接 + +### 如何使用 Ant Design 组件? + +```tsx +import { Button, Table } from 'antd'; + +export function MyComponent() { + return ( + <div> + <Button type="primary">点击</Button> + </div> + ); +} +``` + +### 如何进行状态管理? + +使用 Zustand: + +```typescript +import { create } from 'zustand'; + +interface MyStore { + count: number; + increment: () => void; +} + +export const useMyStore = create<MyStore>((set) => ({ + count: 0, + increment: () => set((state) => ({ count: state.count + 1 })) +})); +``` + +## 许可证 + +MIT License diff --git a/frontend/index.html b/frontend/index.html new file mode 100644 index 0000000..7fc5f34 --- /dev/null +++ b/frontend/index.html @@ -0,0 +1,13 @@ +<!doctype html> +<html lang="zh-CN"> + <head> + <meta charset="UTF-8" /> + <link rel="icon" type="image/svg+xml" href="/vite.svg" /> + <meta name="viewport" content="width=device-width, initial-scale=1.0" /> + <title>Creative Studio - AI 剧集创作平台 + + +
                              + + + diff --git a/frontend/package-lock.json b/frontend/package-lock.json new file mode 100644 index 0000000..aec7225 --- /dev/null +++ b/frontend/package-lock.json @@ -0,0 +1,7741 @@ +{ + "name": "creative-studio-frontend", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "creative-studio-frontend", + "version": "1.0.0", + "dependencies": { + "@ant-design/icons": "^5.2.6", + "@monaco-editor/react": "^4.6.0", + "antd": "^5.12.0", + "axios": "^1.6.2", + "dayjs": "^1.11.10", + "monaco-editor": "^0.45.0", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "react-markdown": "^9.0.1", + "react-router-dom": "^6.20.0", + "react-syntax-highlighter": "^15.5.0", + "recharts": "^2.10.3", + "remark-gfm": "^4.0.0", + "socket.io-client": "^4.6.0", + "zustand": "^4.4.7" + }, + "devDependencies": { + "@types/react": "^18.2.43", + "@types/react-dom": "^18.2.17", + "@typescript-eslint/eslint-plugin": "^6.14.0", + "@typescript-eslint/parser": "^6.14.0", + "@vitejs/plugin-react": "^4.2.1", + "autoprefixer": "^10.4.16", + "eslint": "^8.55.0", + "eslint-plugin-react-hooks": "^4.6.0", + "eslint-plugin-react-refresh": "^0.4.5", + "postcss": "^8.4.32", + "tailwindcss": "^3.3.6", + "typescript": "^5.2.2", + "vite": "^5.0.8" + } + }, + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", + "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@ant-design/colors": { + "version": "7.2.1", + "resolved": "https://registry.npmjs.org/@ant-design/colors/-/colors-7.2.1.tgz", + "integrity": "sha512-lCHDcEzieu4GA3n8ELeZ5VQ8pKQAWcGGLRTQ50aQM2iqPpq2evTxER84jfdPvsPAtEcZ7m44NI45edFMo8oOYQ==", + "license": "MIT", + "dependencies": { + "@ant-design/fast-color": "^2.0.6" + } + }, + "node_modules/@ant-design/cssinjs": { + "version": "1.24.0", + "resolved": "https://registry.npmjs.org/@ant-design/cssinjs/-/cssinjs-1.24.0.tgz", + "integrity": "sha512-K4cYrJBsgvL+IoozUXYjbT6LHHNt+19a9zkvpBPxLjFHas1UpPM2A5MlhROb0BT8N8WoavM5VsP9MeSeNK/3mg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "@emotion/hash": "^0.8.0", + "@emotion/unitless": "^0.7.5", + "classnames": "^2.3.1", + "csstype": "^3.1.3", + "rc-util": "^5.35.0", + "stylis": "^4.3.4" + }, + "peerDependencies": { + "react": ">=16.0.0", + "react-dom": ">=16.0.0" + } + }, + "node_modules/@ant-design/cssinjs-utils": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@ant-design/cssinjs-utils/-/cssinjs-utils-1.1.3.tgz", + "integrity": "sha512-nOoQMLW1l+xR1Co8NFVYiP8pZp3VjIIzqV6D6ShYF2ljtdwWJn5WSsH+7kvCktXL/yhEtWURKOfH5Xz/gzlwsg==", + "license": "MIT", + "dependencies": { + "@ant-design/cssinjs": "^1.21.0", + "@babel/runtime": "^7.23.2", + "rc-util": "^5.38.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@ant-design/fast-color": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@ant-design/fast-color/-/fast-color-2.0.6.tgz", + "integrity": "sha512-y2217gk4NqL35giHl72o6Zzqji9O7vHh9YmhUVkPtAOpoTCH4uWxo/pr4VE8t0+ChEPs0qo4eJRC5Q1eXWo3vA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.24.7" + }, + "engines": { + "node": ">=8.x" + } + }, + "node_modules/@ant-design/icons": { + "version": "5.6.1", + "resolved": "https://registry.npmjs.org/@ant-design/icons/-/icons-5.6.1.tgz", + "integrity": "sha512-0/xS39c91WjPAZOWsvi1//zjx6kAp4kxWwctR6kuU6p133w8RU0D2dSCvZC19uQyharg/sAvYxGYWl01BbZZfg==", + "license": "MIT", + "dependencies": { + "@ant-design/colors": "^7.0.0", + "@ant-design/icons-svg": "^4.4.0", + "@babel/runtime": "^7.24.8", + "classnames": "^2.2.6", + "rc-util": "^5.31.1" + }, + "engines": { + "node": ">=8" + }, + "peerDependencies": { + "react": ">=16.0.0", + "react-dom": ">=16.0.0" + } + }, + "node_modules/@ant-design/icons-svg": { + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/@ant-design/icons-svg/-/icons-svg-4.4.2.tgz", + "integrity": "sha512-vHbT+zJEVzllwP+CM+ul7reTEfBR0vgxFe7+lREAsAA7YGsYpboiq2sQNeQeRvh09GfQgs/GyFEvZpJ9cLXpXA==", + "license": "MIT" + }, + "node_modules/@ant-design/react-slick": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@ant-design/react-slick/-/react-slick-1.1.2.tgz", + "integrity": "sha512-EzlvzE6xQUBrZuuhSAFTdsr4P2bBBHGZwKFemEfq8gIGyIQCxalYfZW/T2ORbtQx5rU69o+WycP3exY/7T1hGA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.4", + "classnames": "^2.2.5", + "json2mq": "^0.2.0", + "resize-observer-polyfill": "^1.5.1", + "throttle-debounce": "^5.0.0" + }, + "peerDependencies": { + "react": ">=16.9.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.28.6.tgz", + "integrity": "sha512-JYgintcMjRiCvS8mMECzaEn+m3PfoQiyqukOMCCVQtoJGYJw8j/8LBJEiqkHLkfwCcs74E3pbAUFNg7d9VNJ+Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.28.5", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.6.tgz", + "integrity": "sha512-2lfu57JtzctfIrcGMz992hyLlByuzgIk58+hhGCxjKZ3rWI82NnVLjXcaTqkI2NvlcvOskZaiZ5kjUALo3Lpxg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.6.tgz", + "integrity": "sha512-H3mcG6ZDLTlYfaSNi0iOKkigqMFvkTKlGUYlD8GW7nNOYRrevuA46iTypPyv+06V3fEmvvazfntkBU34L0azAw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@babel/code-frame": "^7.28.6", + "@babel/generator": "^7.28.6", + "@babel/helper-compilation-targets": "^7.28.6", + "@babel/helper-module-transforms": "^7.28.6", + "@babel/helpers": "^7.28.6", + "@babel/parser": "^7.28.6", + "@babel/template": "^7.28.6", + "@babel/traverse": "^7.28.6", + "@babel/types": "^7.28.6", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/generator": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.6.tgz", + "integrity": "sha512-lOoVRwADj8hjf7al89tvQ2a1lf53Z+7tiXMgpZJL3maQPDxh0DgLMN62B2MKUOFcoodBHLMbDM6WAbKgNy5Suw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.28.6", + "@babel/types": "^7.28.6", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.28.6.tgz", + "integrity": "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.28.6", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.28.6.tgz", + "integrity": "sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.6.tgz", + "integrity": "sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.28.6", + "@babel/helper-validator-identifier": "^7.28.5", + "@babel/traverse": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.28.6.tgz", + "integrity": "sha512-S9gzZ/bz83GRysI7gAD4wPT/AI3uCnY+9xn+Mx/KPs2JwHJIz1W8PZkg2cqyt3RNOBM8ejcXhV6y8Og7ly/Dug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.6.tgz", + "integrity": "sha512-xOBvwq86HHdB7WUDTfKfT/Vuxh7gElQ+Sfti2Cy6yIWNW05P8iUslOVcZ4/sKbE+/jQaukQAdz/gf3724kYdqw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.6.tgz", + "integrity": "sha512-TeR9zWR18BvbfPmGbLampPMW+uW1NZnJlRuuHso8i87QZNq2JRF9i6RgxRqtEq+wQGsS19NNTWr2duhnE49mfQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.6" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-self": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz", + "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-source": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz", + "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.6.tgz", + "integrity": "sha512-05WQkdpL9COIMz4LjTxGpPNCdlpyimKppYNoJ5Di5EUObifl8t4tuLuUBBZEpoLYOmfvIWrsp9fCl0HoPRVTdA==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.28.6.tgz", + "integrity": "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.28.6", + "@babel/parser": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.6.tgz", + "integrity": "sha512-fgWX62k02qtjqdSNTAGxmKYY/7FSL9WAS1o2Hu5+I5m9T0yxZzr4cnrfXQ/MX0rIifthCSs6FKTlzYbJcPtMNg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.28.6", + "@babel/generator": "^7.28.6", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.28.6", + "@babel/template": "^7.28.6", + "@babel/types": "^7.28.6", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.6.tgz", + "integrity": "sha512-0ZrskXVEHSWIqZM/sQZ4EV3jZJXRkio/WCxaqKZP1g//CEWEPSfeZFcms4XeKBCHU0ZKnIkdJeU/kF+eRp5lBg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@emotion/hash": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/@emotion/hash/-/hash-0.8.0.tgz", + "integrity": "sha512-kBJtf7PH6aWwZ6fka3zQ0p6SBYzx4fl1LoZXE2RrnYST9Xljm7WfKJrU4g/Xr3Beg72MLrp1AWNUmuYJTL7Cow==", + "license": "MIT" + }, + "node_modules/@emotion/unitless": { + "version": "0.7.5", + "resolved": "https://registry.npmjs.org/@emotion/unitless/-/unitless-0.7.5.tgz", + "integrity": "sha512-OWORNpfjMsSSUBVrRBVGECkhWcULOAJz9ZW8uK9qgxD+87M7jHRcvh/A96XXNhXTLmKcoYSQtBEX7lHMO7YRwg==", + "license": "MIT" + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", + "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz", + "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz", + "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz", + "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz", + "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz", + "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz", + "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz", + "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz", + "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz", + "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz", + "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz", + "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz", + "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz", + "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz", + "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz", + "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz", + "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz", + "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz", + "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz", + "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz", + "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz", + "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz", + "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz", + "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.2", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz", + "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", + "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@eslint/eslintrc/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@eslint/js": { + "version": "8.57.1", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.1.tgz", + "integrity": "sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.13.0", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.13.0.tgz", + "integrity": "sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==", + "deprecated": "Use @eslint/config-array instead", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@humanwhocodes/object-schema": "^2.0.3", + "debug": "^4.3.1", + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/object-schema": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz", + "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", + "deprecated": "Use @eslint/object-schema instead", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@monaco-editor/loader": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/@monaco-editor/loader/-/loader-1.7.0.tgz", + "integrity": "sha512-gIwR1HrJrrx+vfyOhYmCZ0/JcWqG5kbfG7+d3f/C1LXk2EvzAbHSg3MQ5lO2sMlo9izoAZ04shohfKLVT6crVA==", + "license": "MIT", + "dependencies": { + "state-local": "^1.0.6" + } + }, + "node_modules/@monaco-editor/react": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/@monaco-editor/react/-/react-4.7.0.tgz", + "integrity": "sha512-cyzXQCtO47ydzxpQtCGSQGOC8Gk3ZUeBXFAxD+CWXYFo5OqZyZUonFl0DwUlTyAfRHntBfw2p3w4s9R6oe1eCA==", + "license": "MIT", + "dependencies": { + "@monaco-editor/loader": "^1.5.0" + }, + "peerDependencies": { + "monaco-editor": ">= 0.25.0 < 1", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@rc-component/async-validator": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@rc-component/async-validator/-/async-validator-5.1.0.tgz", + "integrity": "sha512-n4HcR5siNUXRX23nDizbZBQPO0ZM/5oTtmKZ6/eqL0L2bo747cklFdZGRN2f+c9qWGICwDzrhW0H7tE9PptdcA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.24.4" + }, + "engines": { + "node": ">=14.x" + } + }, + "node_modules/@rc-component/color-picker": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@rc-component/color-picker/-/color-picker-2.0.1.tgz", + "integrity": "sha512-WcZYwAThV/b2GISQ8F+7650r5ZZJ043E57aVBFkQ+kSY4C6wdofXgB0hBx+GPGpIU0Z81eETNoDUJMr7oy/P8Q==", + "license": "MIT", + "dependencies": { + "@ant-design/fast-color": "^2.0.6", + "@babel/runtime": "^7.23.6", + "classnames": "^2.2.6", + "rc-util": "^5.38.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/context": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@rc-component/context/-/context-1.4.0.tgz", + "integrity": "sha512-kFcNxg9oLRMoL3qki0OMxK+7g5mypjgaaJp/pkOis/6rVxma9nJBF/8kCIuTYHUQNr0ii7MxqE33wirPZLJQ2w==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "rc-util": "^5.27.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/mini-decimal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@rc-component/mini-decimal/-/mini-decimal-1.1.0.tgz", + "integrity": "sha512-jS4E7T9Li2GuYwI6PyiVXmxTiM6b07rlD9Ge8uGZSCz3WlzcG5ZK7g5bbuKNeZ9pgUuPK/5guV781ujdVpm4HQ==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.0" + }, + "engines": { + "node": ">=8.x" + } + }, + "node_modules/@rc-component/mutate-observer": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@rc-component/mutate-observer/-/mutate-observer-1.1.0.tgz", + "integrity": "sha512-QjrOsDXQusNwGZPf4/qRQasg7UFEj06XiCJ8iuiq/Io7CrHrgVi6Uuetw60WAMG1799v+aM8kyc+1L/GBbHSlw==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.0", + "classnames": "^2.3.2", + "rc-util": "^5.24.4" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/portal": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@rc-component/portal/-/portal-1.1.2.tgz", + "integrity": "sha512-6f813C0IsasTZms08kfA8kPAGxbbkYToa8ALaiDIGGECU4i9hj8Plgbx0sNJDrey3EtHO30hmdaxtT0138xZcg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.0", + "classnames": "^2.3.2", + "rc-util": "^5.24.4" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/qrcode": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@rc-component/qrcode/-/qrcode-1.1.1.tgz", + "integrity": "sha512-LfLGNymzKdUPjXUbRP+xOhIWY4jQ+YMj5MmWAcgcAq1Ij8XP7tRmAXqyuv96XvLUBE/5cA8hLFl9eO1JQMujrA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.24.7" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/tour": { + "version": "1.15.1", + "resolved": "https://registry.npmjs.org/@rc-component/tour/-/tour-1.15.1.tgz", + "integrity": "sha512-Tr2t7J1DKZUpfJuDZWHxyxWpfmj8EZrqSgyMZ+BCdvKZ6r1UDsfU46M/iWAAFBy961Ssfom2kv5f3UcjIL2CmQ==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.0", + "@rc-component/portal": "^1.0.0-9", + "@rc-component/trigger": "^2.0.0", + "classnames": "^2.3.2", + "rc-util": "^5.24.4" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/trigger": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@rc-component/trigger/-/trigger-2.3.1.tgz", + "integrity": "sha512-ORENF39PeXTzM+gQEshuk460Z8N4+6DkjpxlpE7Q3gYy1iBpLrx0FOJz3h62ryrJZ/3zCAUIkT1Pb/8hHWpb3A==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.23.2", + "@rc-component/portal": "^1.1.0", + "classnames": "^2.3.2", + "rc-motion": "^2.0.0", + "rc-resize-observer": "^1.3.1", + "rc-util": "^5.44.0" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@remix-run/router": { + "version": "1.23.2", + "resolved": "https://registry.npmjs.org/@remix-run/router/-/router-1.23.2.tgz", + "integrity": "sha512-Ic6m2U/rMjTkhERIa/0ZtXJP17QUi2CbWE7cqx4J58M8aA3QTfW+2UlQ4psvTX9IO1RfNVhK3pcpdjej7L+t2w==", + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-beta.27", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.27.tgz", + "integrity": "sha512-+d0F4MKMCbeVUJwG96uQ4SgAznZNSq93I3V+9NHA4OpvqG8mRCpGdKmK8l/dl02h2CCDHwW2FqilnTyDcAnqjA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.56.0.tgz", + "integrity": "sha512-LNKIPA5k8PF1+jAFomGe3qN3bbIgJe/IlpDBwuVjrDKrJhVWywgnJvflMt/zkbVNLFtF1+94SljYQS6e99klnw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.56.0.tgz", + "integrity": "sha512-lfbVUbelYqXlYiU/HApNMJzT1E87UPGvzveGg2h0ktUNlOCxKlWuJ9jtfvs1sKHdwU4fzY7Pl8sAl49/XaEk6Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.56.0.tgz", + "integrity": "sha512-EgxD1ocWfhoD6xSOeEEwyE7tDvwTgZc8Bss7wCWe+uc7wO8G34HHCUH+Q6cHqJubxIAnQzAsyUsClt0yFLu06w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.56.0.tgz", + "integrity": "sha512-1vXe1vcMOssb/hOF8iv52A7feWW2xnu+c8BV4t1F//m9QVLTfNVpEdja5ia762j/UEJe2Z1jAmEqZAK42tVW3g==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.56.0.tgz", + "integrity": "sha512-bof7fbIlvqsyv/DtaXSck4VYQ9lPtoWNFCB/JY4snlFuJREXfZnm+Ej6yaCHfQvofJDXLDMTVxWscVSuQvVWUQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.56.0.tgz", + "integrity": "sha512-KNa6lYHloW+7lTEkYGa37fpvPq+NKG/EHKM8+G/g9WDU7ls4sMqbVRV78J6LdNuVaeeK5WB9/9VAFbKxcbXKYg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.56.0.tgz", + "integrity": "sha512-E8jKK87uOvLrrLN28jnAAAChNq5LeCd2mGgZF+fGF5D507WlG/Noct3lP/QzQ6MrqJ5BCKNwI9ipADB6jyiq2A==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.56.0.tgz", + "integrity": "sha512-jQosa5FMYF5Z6prEpTCCmzCXz6eKr/tCBssSmQGEeozA9tkRUty/5Vx06ibaOP9RCrW1Pvb8yp3gvZhHwTDsJw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.56.0.tgz", + "integrity": "sha512-uQVoKkrC1KGEV6udrdVahASIsaF8h7iLG0U0W+Xn14ucFwi6uS539PsAr24IEF9/FoDtzMeeJXJIBo5RkbNWvQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.56.0.tgz", + "integrity": "sha512-vLZ1yJKLxhQLFKTs42RwTwa6zkGln+bnXc8ueFGMYmBTLfNu58sl5/eXyxRa2RarTkJbXl8TKPgfS6V5ijNqEA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.56.0.tgz", + "integrity": "sha512-FWfHOCub564kSE3xJQLLIC/hbKqHSVxy8vY75/YHHzWvbJL7aYJkdgwD/xGfUlL5UV2SB7otapLrcCj2xnF1dg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.56.0.tgz", + "integrity": "sha512-z1EkujxIh7nbrKL1lmIpqFTc/sr0u8Uk0zK/qIEFldbt6EDKWFk/pxFq3gYj4Bjn3aa9eEhYRlL3H8ZbPT1xvA==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.56.0.tgz", + "integrity": "sha512-iNFTluqgdoQC7AIE8Q34R3AuPrJGJirj5wMUErxj22deOcY7XwZRaqYmB6ZKFHoVGqRcRd0mqO+845jAibKCkw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.56.0.tgz", + "integrity": "sha512-MtMeFVlD2LIKjp2sE2xM2slq3Zxf9zwVuw0jemsxvh1QOpHSsSzfNOTH9uYW9i1MXFxUSMmLpeVeUzoNOKBaWg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.56.0.tgz", + "integrity": "sha512-in+v6wiHdzzVhYKXIk5U74dEZHdKN9KH0Q4ANHOTvyXPG41bajYRsy7a8TPKbYPl34hU7PP7hMVHRvv/5aCSew==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.56.0.tgz", + "integrity": "sha512-yni2raKHB8m9NQpI9fPVwN754mn6dHQSbDTwxdr9SE0ks38DTjLMMBjrwvB5+mXrX+C0npX0CVeCUcvvvD8CNQ==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.56.0.tgz", + "integrity": "sha512-zhLLJx9nQPu7wezbxt2ut+CI4YlXi68ndEve16tPc/iwoylWS9B3FxpLS2PkmfYgDQtosah07Mj9E0khc3Y+vQ==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.56.0.tgz", + "integrity": "sha512-MVC6UDp16ZSH7x4rtuJPAEoE1RwS8N4oK9DLHy3FTEdFoUTCFVzMfJl/BVJ330C+hx8FfprA5Wqx4FhZXkj2Kw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.56.0.tgz", + "integrity": "sha512-ZhGH1eA4Qv0lxaV00azCIS1ChedK0V32952Md3FtnxSqZTBTd6tgil4nZT5cU8B+SIw3PFYkvyR4FKo2oyZIHA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.56.0.tgz", + "integrity": "sha512-O16XcmyDeFI9879pEcmtWvD/2nyxR9mF7Gs44lf1vGGx8Vg2DRNx11aVXBEqOQhWb92WN4z7fW/q4+2NYzCbBA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.56.0.tgz", + "integrity": "sha512-LhN/Reh+7F3RCgQIRbgw8ZMwUwyqJM+8pXNT6IIJAqm2IdKkzpCh/V9EdgOMBKuebIrzswqy4ATlrDgiOwbRcQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.56.0.tgz", + "integrity": "sha512-kbFsOObXp3LBULg1d3JIUQMa9Kv4UitDmpS+k0tinPBz3watcUiV2/LUDMMucA6pZO3WGE27P7DsfaN54l9ing==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.56.0.tgz", + "integrity": "sha512-vSSgny54D6P4vf2izbtFm/TcWYedw7f8eBrOiGGecyHyQB9q4Kqentjaj8hToe+995nob/Wv48pDqL5a62EWtg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.56.0.tgz", + "integrity": "sha512-FeCnkPCTHQJFbiGG49KjV5YGW/8b9rrXAM2Mz2kiIoktq2qsJxRD5giEMEOD2lPdgs72upzefaUvS+nc8E3UzQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.56.0.tgz", + "integrity": "sha512-H8AE9Ur/t0+1VXujj90w0HrSOuv0Nq9r1vSZF2t5km20NTfosQsGGUXDaKdQZzwuLts7IyL1fYT4hM95TI9c4g==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@socket.io/component-emitter": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@socket.io/component-emitter/-/component-emitter-3.1.2.tgz", + "integrity": "sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA==", + "license": "MIT" + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/d3-array": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.2.tgz", + "integrity": "sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw==", + "license": "MIT" + }, + "node_modules/@types/d3-color": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz", + "integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==", + "license": "MIT" + }, + "node_modules/@types/d3-ease": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.2.tgz", + "integrity": "sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==", + "license": "MIT" + }, + "node_modules/@types/d3-interpolate": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz", + "integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==", + "license": "MIT", + "dependencies": { + "@types/d3-color": "*" + } + }, + "node_modules/@types/d3-path": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.1.1.tgz", + "integrity": "sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg==", + "license": "MIT" + }, + "node_modules/@types/d3-scale": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.9.tgz", + "integrity": "sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==", + "license": "MIT", + "dependencies": { + "@types/d3-time": "*" + } + }, + "node_modules/@types/d3-shape": { + "version": "3.1.8", + "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.8.tgz", + "integrity": "sha512-lae0iWfcDeR7qt7rA88BNiqdvPS5pFVPpo5OfjElwNaT2yyekbM0C9vK+yqBqEmHr6lDkRnYNoTBYlAgJa7a4w==", + "license": "MIT", + "dependencies": { + "@types/d3-path": "*" + } + }, + "node_modules/@types/d3-time": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.4.tgz", + "integrity": "sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==", + "license": "MIT" + }, + "node_modules/@types/d3-timer": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.2.tgz", + "integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==", + "license": "MIT" + }, + "node_modules/@types/debug": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", + "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", + "license": "MIT", + "dependencies": { + "@types/ms": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "license": "MIT" + }, + "node_modules/@types/estree-jsx": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz", + "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==", + "license": "MIT", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@types/hast": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/mdast": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/ms": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz", + "integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==", + "license": "MIT" + }, + "node_modules/@types/prop-types": { + "version": "15.7.15", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.15.tgz", + "integrity": "sha512-F6bEyamV9jKGAFBEmlQnesRPGOQqS2+Uwi0Em15xenOxHaf2hv6L8YCVn3rPdPJOiJfPiCnLIRyvwVaqMY3MIw==", + "license": "MIT" + }, + "node_modules/@types/react": { + "version": "18.3.27", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.27.tgz", + "integrity": "sha512-cisd7gxkzjBKU2GgdYrTdtQx1SORymWyaAFhaxQPK9bYO9ot3Y5OikQRvY0VYQtvwjeQnizCINJAenh/V7MK2w==", + "license": "MIT", + "peer": true, + "dependencies": { + "@types/prop-types": "*", + "csstype": "^3.2.2" + } + }, + "node_modules/@types/react-dom": { + "version": "18.3.7", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.7.tgz", + "integrity": "sha512-MEe3UeoENYVFXzoXEWsvcpg6ZvlrFNlOQ7EOsvhI3CfAXwzPfO8Qwuxd40nepsYKqyyVQnTdEfv68q91yLcKrQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "@types/react": "^18.0.0" + } + }, + "node_modules/@types/semver": { + "version": "7.7.1", + "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.7.1.tgz", + "integrity": "sha512-FmgJfu+MOcQ370SD0ev7EI8TlCAfKYU+B4m5T3yXc1CiRN94g/SZPtsCkk506aUDtlMnFZvasDwHHUcZUEaYuA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.21.0.tgz", + "integrity": "sha512-oy9+hTPCUFpngkEZUSzbf9MxI65wbKFoQYsgPdILTfbUldp5ovUuphZVe4i30emU9M/kP+T64Di0mxl7dSw3MA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.5.1", + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/type-utils": "6.21.0", + "@typescript-eslint/utils": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4", + "graphemer": "^1.4.0", + "ignore": "^5.2.4", + "natural-compare": "^1.4.0", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^6.0.0 || ^6.0.0-alpha", + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.21.0.tgz", + "integrity": "sha512-tbsV1jPne5CkFQCgPBcDOt30ItF7aJoZL997JSF7MhGQqOeT3svWRYxiqlfA5RUdlHN6Fi+EI9bxqbdyAUZjYQ==", + "dev": true, + "license": "BSD-2-Clause", + "peer": true, + "dependencies": { + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/typescript-estree": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.21.0.tgz", + "integrity": "sha512-OwLUIWZJry80O99zvqXVEioyniJMa+d2GrqpUTqi5/v5D5rOrppJVBPa0yKCblcigC0/aYAzxxqQ1B+DS2RYsg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.21.0.tgz", + "integrity": "sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/typescript-estree": "6.21.0", + "@typescript-eslint/utils": "6.21.0", + "debug": "^4.3.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.21.0.tgz", + "integrity": "sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.21.0.tgz", + "integrity": "sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "9.0.3", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.21.0.tgz", + "integrity": "sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "@types/json-schema": "^7.0.12", + "@types/semver": "^7.5.0", + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/typescript-estree": "6.21.0", + "semver": "^7.5.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.21.0.tgz", + "integrity": "sha512-JJtkDduxLi9bivAB+cYOVMtbkqdPOhZ+ZI5LC47MIRrDV4Yn2o+ZnW10Nkmr28xRpSpdJ6Sm42Hjf2+REYXm0A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@ungap/structured-clone": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz", + "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==", + "license": "ISC" + }, + "node_modules/@vitejs/plugin-react": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.7.0.tgz", + "integrity": "sha512-gUu9hwfWvvEDBBmgtAowQCojwZmJ5mcLn3aufeCsitijs3+f2NsrPtlAWIR6OPiqljl96GVCUbLe0HyqIpVaoA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.28.0", + "@babel/plugin-transform-react-jsx-self": "^7.27.1", + "@babel/plugin-transform-react-jsx-source": "^7.27.1", + "@rolldown/pluginutils": "1.0.0-beta.27", + "@types/babel__core": "^7.20.5", + "react-refresh": "^0.17.0" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "peerDependencies": { + "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0" + } + }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "dev": true, + "license": "MIT", + "peer": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/antd": { + "version": "5.29.3", + "resolved": "https://registry.npmjs.org/antd/-/antd-5.29.3.tgz", + "integrity": "sha512-3DdbGCa9tWAJGcCJ6rzR8EJFsv2CtyEbkVabZE14pfgUHfCicWCj0/QzQVLDYg8CPfQk9BH7fHCoTXHTy7MP/A==", + "license": "MIT", + "dependencies": { + "@ant-design/colors": "^7.2.1", + "@ant-design/cssinjs": "^1.23.0", + "@ant-design/cssinjs-utils": "^1.1.3", + "@ant-design/fast-color": "^2.0.6", + "@ant-design/icons": "^5.6.1", + "@ant-design/react-slick": "~1.1.2", + "@babel/runtime": "^7.26.0", + "@rc-component/color-picker": "~2.0.1", + "@rc-component/mutate-observer": "^1.1.0", + "@rc-component/qrcode": "~1.1.0", + "@rc-component/tour": "~1.15.1", + "@rc-component/trigger": "^2.3.0", + "classnames": "^2.5.1", + "copy-to-clipboard": "^3.3.3", + "dayjs": "^1.11.11", + "rc-cascader": "~3.34.0", + "rc-checkbox": "~3.5.0", + "rc-collapse": "~3.9.0", + "rc-dialog": "~9.6.0", + "rc-drawer": "~7.3.0", + "rc-dropdown": "~4.2.1", + "rc-field-form": "~2.7.1", + "rc-image": "~7.12.0", + "rc-input": "~1.8.0", + "rc-input-number": "~9.5.0", + "rc-mentions": "~2.20.0", + "rc-menu": "~9.16.1", + "rc-motion": "^2.9.5", + "rc-notification": "~5.6.4", + "rc-pagination": "~5.1.0", + "rc-picker": "~4.11.3", + "rc-progress": "~4.0.0", + "rc-rate": "~2.13.1", + "rc-resize-observer": "^1.4.3", + "rc-segmented": "~2.7.0", + "rc-select": "~14.16.8", + "rc-slider": "~11.1.9", + "rc-steps": "~6.0.1", + "rc-switch": "~4.1.0", + "rc-table": "~7.54.0", + "rc-tabs": "~15.7.0", + "rc-textarea": "~1.10.2", + "rc-tooltip": "~6.4.0", + "rc-tree": "~5.13.1", + "rc-tree-select": "~5.27.0", + "rc-upload": "~4.11.0", + "rc-util": "^5.44.4", + "scroll-into-view-if-needed": "^3.1.0", + "throttle-debounce": "^5.0.2" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/ant-design" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/any-promise": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", + "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", + "dev": true, + "license": "MIT" + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", + "dev": true, + "license": "MIT" + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "license": "MIT" + }, + "node_modules/autoprefixer": { + "version": "10.4.23", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.23.tgz", + "integrity": "sha512-YYTXSFulfwytnjAPlw8QHncHJmlvFKtczb8InXaAx9Q0LbfDnfEYDE55omerIJKihhmU61Ft+cAOSzQVaBUmeA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "browserslist": "^4.28.1", + "caniuse-lite": "^1.0.30001760", + "fraction.js": "^5.3.4", + "picocolors": "^1.1.1", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/axios": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.2.tgz", + "integrity": "sha512-VPk9ebNqPcy5lRGuSlKx752IlDatOjT9paPlm8A7yOuW2Fbvp4X3JznJtT4f0GzGLLiWE9W8onz51SqLYwzGaA==", + "license": "MIT", + "dependencies": { + "follow-redirects": "^1.15.6", + "form-data": "^4.0.4", + "proxy-from-env": "^1.1.0" + } + }, + "node_modules/bail": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", + "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/baseline-browser-mapping": { + "version": "2.9.18", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.18.tgz", + "integrity": "sha512-e23vBV1ZLfjb9apvfPk4rHVu2ry6RIr2Wfs+O324okSidrX7pTAnEJPCh/O5BtRlr7QtZI7ktOP3vsqr7Z5XoA==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.js" + } + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.28.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz", + "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "peer": true, + "dependencies": { + "baseline-browser-mapping": "^2.9.0", + "caniuse-lite": "^1.0.30001759", + "electron-to-chromium": "^1.5.263", + "node-releases": "^2.0.27", + "update-browserslist-db": "^1.2.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase-css": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", + "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001766", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001766.tgz", + "integrity": "sha512-4C0lfJ0/YPjJQHagaE9x2Elb69CIqEPZeG0anQt9SIvIoOH4a4uaRl73IavyO+0qZh6MDLH//DrXThEYKHkmYA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/ccount": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz", + "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/character-entities": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-html4": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz", + "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", + "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-reference-invalid": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", + "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/classnames": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/classnames/-/classnames-2.5.1.tgz", + "integrity": "sha512-saHYOzhIQs6wy2sVxTM6bUDsQO4F50V9RQ22qBpEdCW+I+/Wmke2HOl6lS6dTpdxVhb88/I6+Hs+438c3lfUow==", + "license": "MIT" + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/comma-separated-tokens": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", + "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/compute-scroll-into-view": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/compute-scroll-into-view/-/compute-scroll-into-view-3.1.1.tgz", + "integrity": "sha512-VRhuHOLoKYOy4UbilLbUzbYg93XLjv2PncJC50EuTWPA3gaja1UjBsUP/D/9/juV3vQFr6XBEzn9KCAHdUvOHw==", + "license": "MIT" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/copy-to-clipboard": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/copy-to-clipboard/-/copy-to-clipboard-3.3.3.tgz", + "integrity": "sha512-2KV8NhB5JqC3ky0r9PMCAZKbUHSwtEo4CwCs0KXgruG43gX5PMqDEBbVU4OUzw2MuAWUfsuFmWvEKG5QRfSnJA==", + "license": "MIT", + "dependencies": { + "toggle-selection": "^1.0.6" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "dev": true, + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "license": "MIT" + }, + "node_modules/d3-array": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz", + "integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==", + "license": "ISC", + "dependencies": { + "internmap": "1 - 2" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-color": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz", + "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-ease": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz", + "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-format": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-3.1.2.tgz", + "integrity": "sha512-AJDdYOdnyRDV5b6ArilzCPPwc1ejkHcoyFarqlPqT7zRYjhavcT3uSrqcMvsgh2CgoPbK3RCwyHaVyxYcP2Arg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-interpolate": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz", + "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-path": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz", + "integrity": "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-scale": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz", + "integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==", + "license": "ISC", + "dependencies": { + "d3-array": "2.10.0 - 3", + "d3-format": "1 - 3", + "d3-interpolate": "1.2.0 - 3", + "d3-time": "2.1.1 - 3", + "d3-time-format": "2 - 4" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-shape": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz", + "integrity": "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==", + "license": "ISC", + "dependencies": { + "d3-path": "^3.1.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-time": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz", + "integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==", + "license": "ISC", + "dependencies": { + "d3-array": "2 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-time-format": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz", + "integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==", + "license": "ISC", + "dependencies": { + "d3-time": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-timer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz", + "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/dayjs": { + "version": "1.11.19", + "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.19.tgz", + "integrity": "sha512-t5EcLVS6QPBNqM2z8fakk/NKel+Xzshgt8FFKAn+qwlD1pzZWxh0nVCrvFK7ZDb6XucZeF9z8C7CBWTRIVApAw==", + "license": "MIT", + "peer": true + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decimal.js-light": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/decimal.js-light/-/decimal.js-light-2.5.1.tgz", + "integrity": "sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg==", + "license": "MIT" + }, + "node_modules/decode-named-character-reference": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.3.0.tgz", + "integrity": "sha512-GtpQYB283KrPp6nRw50q3U9/VfOutZOe103qlN7BPP6Ad27xYnOIWv4lPzo8HCAL+mMZofJ9KEy30fq6MfaK6Q==", + "license": "MIT", + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "license": "MIT", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/didyoumean": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz", + "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/dlv": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", + "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", + "dev": true, + "license": "MIT" + }, + "node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/dom-helpers": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/dom-helpers/-/dom-helpers-5.2.1.tgz", + "integrity": "sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.8.7", + "csstype": "^3.0.2" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.278", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.278.tgz", + "integrity": "sha512-dQ0tM1svDRQOwxnXxm+twlGTjr9Upvt8UFWAgmLsxEzFQxhbti4VwxmMjsDxVC51Zo84swW7FVCXEV+VAkhuPw==", + "dev": true, + "license": "ISC" + }, + "node_modules/engine.io-client": { + "version": "6.6.4", + "resolved": "https://registry.npmjs.org/engine.io-client/-/engine.io-client-6.6.4.tgz", + "integrity": "sha512-+kjUJnZGwzewFDw951CDWcwj35vMNf2fcj7xQWOctq1F2i1jkDdVvdFG9kM/BEChymCH36KgjnW0NsL58JYRxw==", + "license": "MIT", + "dependencies": { + "@socket.io/component-emitter": "~3.1.0", + "debug": "~4.4.1", + "engine.io-parser": "~5.2.1", + "ws": "~8.18.3", + "xmlhttprequest-ssl": "~2.1.1" + } + }, + "node_modules/engine.io-parser": { + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-5.2.3.tgz", + "integrity": "sha512-HqD3yTBfnBxIrbnM1DoD6Pcq8NECnh8d4As1Qgh0z5Gg3jRRIqijury0CL3ghu/edArpUYiYqQiDUQBIs4np3Q==", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/esbuild": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz", + "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.21.5", + "@esbuild/android-arm": "0.21.5", + "@esbuild/android-arm64": "0.21.5", + "@esbuild/android-x64": "0.21.5", + "@esbuild/darwin-arm64": "0.21.5", + "@esbuild/darwin-x64": "0.21.5", + "@esbuild/freebsd-arm64": "0.21.5", + "@esbuild/freebsd-x64": "0.21.5", + "@esbuild/linux-arm": "0.21.5", + "@esbuild/linux-arm64": "0.21.5", + "@esbuild/linux-ia32": "0.21.5", + "@esbuild/linux-loong64": "0.21.5", + "@esbuild/linux-mips64el": "0.21.5", + "@esbuild/linux-ppc64": "0.21.5", + "@esbuild/linux-riscv64": "0.21.5", + "@esbuild/linux-s390x": "0.21.5", + "@esbuild/linux-x64": "0.21.5", + "@esbuild/netbsd-x64": "0.21.5", + "@esbuild/openbsd-x64": "0.21.5", + "@esbuild/sunos-x64": "0.21.5", + "@esbuild/win32-arm64": "0.21.5", + "@esbuild/win32-ia32": "0.21.5", + "@esbuild/win32-x64": "0.21.5" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "8.57.1", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.1.tgz", + "integrity": "sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==", + "deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.4", + "@eslint/js": "8.57.1", + "@humanwhocodes/config-array": "^0.13.0", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "@ungap/structured-clone": "^1.2.0", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-plugin-react-hooks": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.2.tgz", + "integrity": "sha512-QzliNJq4GinDBcD8gPB5v0wh6g8q3SUi6EFF0x8N/BL9PoVs0atuGc47ozMRyOWAKdwaZ5OnbOEa3WR+dSGKuQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0" + } + }, + "node_modules/eslint-plugin-react-refresh": { + "version": "0.4.26", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.26.tgz", + "integrity": "sha512-1RETEylht2O6FM/MvgnyvT+8K21wLqDNg4qD51Zj3guhjt433XbnnkVttHMyaVyAFD03QSV4LPS5iE3VQmO7XQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "eslint": ">=8.40" + } + }, + "node_modules/eslint-scope": { + "version": "7.2.2", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/eslint/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/espree": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz", + "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estree-util-is-identifier-name": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz", + "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eventemitter3": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", + "license": "MIT" + }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + "license": "MIT" + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-equals": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/fast-equals/-/fast-equals-5.4.0.tgz", + "integrity": "sha512-jt2DW/aNFNwke7AUd+Z+e6pz39KO5rzdbbFCg2sGafS4mk13MI7Z8O5z9cADNn5lhGODIgLwug6TZO2ctf7kcw==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fastq": { + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.20.1.tgz", + "integrity": "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fault": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/fault/-/fault-1.0.4.tgz", + "integrity": "sha512-CJ0HCB5tL5fYTEA7ToAq5+kTwd++Borf1/bifxd9iT70QcXr4MRrO3Llf8Ifs70q+SJcGHFtnIE/Nw6giCtECA==", + "license": "MIT", + "dependencies": { + "format": "^0.2.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/file-entry-cache": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", + "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", + "dev": true, + "license": "MIT", + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", + "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.3", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/flatted": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", + "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + "dev": true, + "license": "ISC" + }, + "node_modules/follow-redirects": { + "version": "1.15.11", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", + "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/form-data": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz", + "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==", + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/format": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/format/-/format-0.2.2.tgz", + "integrity": "sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==", + "engines": { + "node": ">=0.4.x" + } + }, + "node_modules/fraction.js": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-5.3.4.tgz", + "integrity": "sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "*" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true, + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/glob/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true, + "license": "MIT" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/hast-util-parse-selector": { + "version": "2.2.5", + "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-2.2.5.tgz", + "integrity": "sha512-7j6mrk/qqkSehsM92wQjdIgWM2/BW61u/53G6xmC8i1OmEdKLHbk419QKQUjz6LglWsfqoiHmyMRkP1BGjecNQ==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-jsx-runtime": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.6.tgz", + "integrity": "sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-js": "^1.0.0", + "unist-util-position": "^5.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-whitespace": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", + "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hastscript": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-6.0.0.tgz", + "integrity": "sha512-nDM6bvd7lIqDUiYEiu5Sl/+6ReP0BMk/2f4U/Rooccxkj0P5nm+acM5PrGJ/t5I8qPGiqZSE6hVAwZEdZIvP4w==", + "license": "MIT", + "dependencies": { + "@types/hast": "^2.0.0", + "comma-separated-tokens": "^1.0.0", + "hast-util-parse-selector": "^2.0.0", + "property-information": "^5.0.0", + "space-separated-tokens": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hastscript/node_modules/@types/hast": { + "version": "2.3.10", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.10.tgz", + "integrity": "sha512-McWspRw8xx8J9HurkVBfYj0xKoE25tOFlHGdx4MJ5xORQrMGZNqJhVQWaIbm6Oyla5kYOXtDiopzKRJzEOkwJw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^2" + } + }, + "node_modules/hastscript/node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==", + "license": "MIT" + }, + "node_modules/hastscript/node_modules/comma-separated-tokens": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-1.0.8.tgz", + "integrity": "sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/hastscript/node_modules/property-information": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-5.6.0.tgz", + "integrity": "sha512-YUHSPk+A30YPv+0Qf8i9Mbfe/C0hdPXk1s1jPVToV8pk8BQtpw10ct89Eo7OWkutrwqvT0eicAxlOg3dOAu8JA==", + "license": "MIT", + "dependencies": { + "xtend": "^4.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/hastscript/node_modules/space-separated-tokens": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-1.1.5.tgz", + "integrity": "sha512-q/JSVd1Lptzhf5bkYm4ob4iWPjx0KiRe3sRFBNrVqbJkFaBm5vbbowy1mymoPNLRa52+oadOhJ+K49wsSeSjTA==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/highlight.js": { + "version": "10.7.3", + "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-10.7.3.tgz", + "integrity": "sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A==", + "license": "BSD-3-Clause", + "engines": { + "node": "*" + } + }, + "node_modules/highlightjs-vue": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/highlightjs-vue/-/highlightjs-vue-1.0.0.tgz", + "integrity": "sha512-PDEfEF102G23vHmPhLyPboFCD+BkMGu+GuJe2d9/eH4FsCwvgBpnc9n0pGE+ffKdph38s6foEZiEjdgHdzp+IA==", + "license": "CC0-1.0" + }, + "node_modules/html-url-attributes": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/html-url-attributes/-/html-url-attributes-3.0.1.tgz", + "integrity": "sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/inline-style-parser": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.7.tgz", + "integrity": "sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==", + "license": "MIT" + }, + "node_modules/internmap": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz", + "integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/is-alphabetical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", + "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-alphanumerical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", + "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", + "license": "MIT", + "dependencies": { + "is-alphabetical": "^2.0.0", + "is-decimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-decimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", + "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-hexadecimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", + "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/jiti": { + "version": "1.21.7", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz", + "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", + "dev": true, + "license": "MIT", + "peer": true, + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/json2mq": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/json2mq/-/json2mq-0.2.0.tgz", + "integrity": "sha512-SzoRg7ux5DWTII9J2qkrZrqV1gt+rTaoufMxEzXbS26Uid0NwaJd123HcoB80TgubEppxxIGdNxCx50fEoEWQA==", + "license": "MIT", + "dependencies": { + "string-convert": "^0.2.0" + } + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lilconfig": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.3.tgz", + "integrity": "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true, + "license": "MIT" + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.23", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz", + "integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==", + "license": "MIT" + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/longest-streak": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", + "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "license": "MIT", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lowlight": { + "version": "1.20.0", + "resolved": "https://registry.npmjs.org/lowlight/-/lowlight-1.20.0.tgz", + "integrity": "sha512-8Ktj+prEb1RoCPkEOrPMYUN/nCggB7qAWe3a7OpMjWQkh3l2RD5wKRQ+o8Q8YuI9RG/xs95waaI/E6ym/7NsTw==", + "license": "MIT", + "dependencies": { + "fault": "^1.0.0", + "highlight.js": "~10.7.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/markdown-table": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.4.tgz", + "integrity": "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/mdast-util-find-and-replace": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.2.tgz", + "integrity": "sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-from-markdown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.2.tgz", + "integrity": "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark": "^4.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.1.0.tgz", + "integrity": "sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ==", + "license": "MIT", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-gfm-autolink-literal": "^2.0.0", + "mdast-util-gfm-footnote": "^2.0.0", + "mdast-util-gfm-strikethrough": "^2.0.0", + "mdast-util-gfm-table": "^2.0.0", + "mdast-util-gfm-task-list-item": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz", + "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-find-and-replace": "^3.0.0", + "micromark-util-character": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-strikethrough": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", + "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", + "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-task-list-item": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", + "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-expression": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz", + "integrity": "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-jsx": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.2.0.tgz", + "integrity": "sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdxjs-esm": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", + "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-phrasing": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz", + "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast": { + "version": "13.2.1", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.1.tgz", + "integrity": "sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@ungap/structured-clone": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.2.tgz", + "integrity": "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^4.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", + "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromark": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz", + "integrity": "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.3.tgz", + "integrity": "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-destination": "^2.0.0", + "micromark-factory-label": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-title": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-html-tag-name": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz", + "integrity": "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==", + "license": "MIT", + "dependencies": { + "micromark-extension-gfm-autolink-literal": "^2.0.0", + "micromark-extension-gfm-footnote": "^2.0.0", + "micromark-extension-gfm-strikethrough": "^2.0.0", + "micromark-extension-gfm-table": "^2.0.0", + "micromark-extension-gfm-tagfilter": "^2.0.0", + "micromark-extension-gfm-task-list-item": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz", + "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==", + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-strikethrough": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz", + "integrity": "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-table": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.1.tgz", + "integrity": "sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-tagfilter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz", + "integrity": "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==", + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz", + "integrity": "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-factory-destination": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz", + "integrity": "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz", + "integrity": "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz", + "integrity": "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz", + "integrity": "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-chunked": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz", + "integrity": "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz", + "integrity": "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-combine-extensions": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz", + "integrity": "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-chunked": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz", + "integrity": "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.1.tgz", + "integrity": "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-encode": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz", + "integrity": "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-html-tag-name": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz", + "integrity": "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz", + "integrity": "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-resolve-all": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz", + "integrity": "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz", + "integrity": "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-subtokenize": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.1.0.tgz", + "integrity": "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-types": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.2.tgz", + "integrity": "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/minimatch": { + "version": "9.0.3", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz", + "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/monaco-editor": { + "version": "0.45.0", + "resolved": "https://registry.npmjs.org/monaco-editor/-/monaco-editor-0.45.0.tgz", + "integrity": "sha512-mjv1G1ZzfEE3k9HZN0dQ2olMdwIfaeAAjFiwNprLfYNRSz7ctv9XuCT7gPtBGrMUeV1/iZzYKj17Khu1hxoHOA==", + "license": "MIT", + "peer": true + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/mz": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", + "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/node-releases": { + "version": "2.0.27", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", + "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-hash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz", + "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-entities": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.2.tgz", + "integrity": "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "character-entities-legacy": "^3.0.0", + "character-reference-invalid": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0", + "is-hexadecimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/parse-entities/node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==", + "license": "MIT" + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pirates": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", + "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/postcss": { + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "peer": true, + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-import": { + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz", + "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==", + "dev": true, + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.0.0", + "read-cache": "^1.0.0", + "resolve": "^1.1.7" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "postcss": "^8.0.0" + } + }, + "node_modules/postcss-js": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.1.0.tgz", + "integrity": "sha512-oIAOTqgIo7q2EOwbhb8UalYePMvYoIeRY2YKntdpFQXNosSu3vLrniGgmH9OKs/qAkfoj5oB3le/7mINW1LCfw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "camelcase-css": "^2.0.1" + }, + "engines": { + "node": "^12 || ^14 || >= 16" + }, + "peerDependencies": { + "postcss": "^8.4.21" + } + }, + "node_modules/postcss-load-config": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-6.0.1.tgz", + "integrity": "sha512-oPtTM4oerL+UXmx+93ytZVN82RrlY/wPUV8IeDxFrzIjXOLF1pN+EmKPLbubvKHT2HC20xXsCAH2Z+CKV6Oz/g==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "lilconfig": "^3.1.1" + }, + "engines": { + "node": ">= 18" + }, + "peerDependencies": { + "jiti": ">=1.21.0", + "postcss": ">=8.0.9", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + }, + "postcss": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/postcss-nested": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.2.0.tgz", + "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.1.1" + }, + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "postcss": "^8.2.14" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz", + "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/prismjs": { + "version": "1.30.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.30.0.tgz", + "integrity": "sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/prop-types/node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", + "license": "MIT" + }, + "node_modules/property-information": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz", + "integrity": "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", + "license": "MIT" + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/rc-cascader": { + "version": "3.34.0", + "resolved": "https://registry.npmjs.org/rc-cascader/-/rc-cascader-3.34.0.tgz", + "integrity": "sha512-KpXypcvju9ptjW9FaN2NFcA2QH9E9LHKq169Y0eWtH4e/wHQ5Wh5qZakAgvb8EKZ736WZ3B0zLLOBsrsja5Dag==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.25.7", + "classnames": "^2.3.1", + "rc-select": "~14.16.2", + "rc-tree": "~5.13.0", + "rc-util": "^5.43.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-checkbox": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/rc-checkbox/-/rc-checkbox-3.5.0.tgz", + "integrity": "sha512-aOAQc3E98HteIIsSqm6Xk2FPKIER6+5vyEFMZfo73TqM+VVAIqOkHoPjgKLqSNtVLWScoaM7vY2ZrGEheI79yg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.3.2", + "rc-util": "^5.25.2" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-collapse": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/rc-collapse/-/rc-collapse-3.9.0.tgz", + "integrity": "sha512-swDdz4QZ4dFTo4RAUMLL50qP0EY62N2kvmk2We5xYdRwcRn8WcYtuetCJpwpaCbUfUt5+huLpVxhvmnK+PHrkA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "2.x", + "rc-motion": "^2.3.4", + "rc-util": "^5.27.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-dialog": { + "version": "9.6.0", + "resolved": "https://registry.npmjs.org/rc-dialog/-/rc-dialog-9.6.0.tgz", + "integrity": "sha512-ApoVi9Z8PaCQg6FsUzS8yvBEQy0ZL2PkuvAgrmohPkN3okps5WZ5WQWPc1RNuiOKaAYv8B97ACdsFU5LizzCqg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "@rc-component/portal": "^1.0.0-8", + "classnames": "^2.2.6", + "rc-motion": "^2.3.0", + "rc-util": "^5.21.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-drawer": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/rc-drawer/-/rc-drawer-7.3.0.tgz", + "integrity": "sha512-DX6CIgiBWNpJIMGFO8BAISFkxiuKitoizooj4BDyee8/SnBn0zwO2FHrNDpqqepj0E/TFTDpmEBCyFuTgC7MOg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.23.9", + "@rc-component/portal": "^1.1.1", + "classnames": "^2.2.6", + "rc-motion": "^2.6.1", + "rc-util": "^5.38.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-dropdown": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/rc-dropdown/-/rc-dropdown-4.2.1.tgz", + "integrity": "sha512-YDAlXsPv3I1n42dv1JpdM7wJ+gSUBfeyPK59ZpBD9jQhK9jVuxpjj3NmWQHOBceA1zEPVX84T2wbdb2SD0UjmA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@rc-component/trigger": "^2.0.0", + "classnames": "^2.2.6", + "rc-util": "^5.44.1" + }, + "peerDependencies": { + "react": ">=16.11.0", + "react-dom": ">=16.11.0" + } + }, + "node_modules/rc-field-form": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/rc-field-form/-/rc-field-form-2.7.1.tgz", + "integrity": "sha512-vKeSifSJ6HoLaAB+B8aq/Qgm8a3dyxROzCtKNCsBQgiverpc4kWDQihoUwzUj+zNWJOykwSY4dNX3QrGwtVb9A==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.0", + "@rc-component/async-validator": "^5.0.3", + "rc-util": "^5.32.2" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-image": { + "version": "7.12.0", + "resolved": "https://registry.npmjs.org/rc-image/-/rc-image-7.12.0.tgz", + "integrity": "sha512-cZ3HTyyckPnNnUb9/DRqduqzLfrQRyi+CdHjdqgsyDpI3Ln5UX1kXnAhPBSJj9pVRzwRFgqkN7p9b6HBDjmu/Q==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.2", + "@rc-component/portal": "^1.0.2", + "classnames": "^2.2.6", + "rc-dialog": "~9.6.0", + "rc-motion": "^2.6.2", + "rc-util": "^5.34.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-input": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/rc-input/-/rc-input-1.8.0.tgz", + "integrity": "sha512-KXvaTbX+7ha8a/k+eg6SYRVERK0NddX8QX7a7AnRvUa/rEH0CNMlpcBzBkhI0wp2C8C4HlMoYl8TImSN+fuHKA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-util": "^5.18.1" + }, + "peerDependencies": { + "react": ">=16.0.0", + "react-dom": ">=16.0.0" + } + }, + "node_modules/rc-input-number": { + "version": "9.5.0", + "resolved": "https://registry.npmjs.org/rc-input-number/-/rc-input-number-9.5.0.tgz", + "integrity": "sha512-bKaEvB5tHebUURAEXw35LDcnRZLq3x1k7GxfAqBMzmpHkDGzjAtnUL8y4y5N15rIFIg5IJgwr211jInl3cipag==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "@rc-component/mini-decimal": "^1.0.1", + "classnames": "^2.2.5", + "rc-input": "~1.8.0", + "rc-util": "^5.40.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-mentions": { + "version": "2.20.0", + "resolved": "https://registry.npmjs.org/rc-mentions/-/rc-mentions-2.20.0.tgz", + "integrity": "sha512-w8HCMZEh3f0nR8ZEd466ATqmXFCMGMN5UFCzEUL0bM/nGw/wOS2GgRzKBcm19K++jDyuWCOJOdgcKGXU3fXfbQ==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.22.5", + "@rc-component/trigger": "^2.0.0", + "classnames": "^2.2.6", + "rc-input": "~1.8.0", + "rc-menu": "~9.16.0", + "rc-textarea": "~1.10.0", + "rc-util": "^5.34.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-menu": { + "version": "9.16.1", + "resolved": "https://registry.npmjs.org/rc-menu/-/rc-menu-9.16.1.tgz", + "integrity": "sha512-ghHx6/6Dvp+fw8CJhDUHFHDJ84hJE3BXNCzSgLdmNiFErWSOaZNsihDAsKq9ByTALo/xkNIwtDFGIl6r+RPXBg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "@rc-component/trigger": "^2.0.0", + "classnames": "2.x", + "rc-motion": "^2.4.3", + "rc-overflow": "^1.3.1", + "rc-util": "^5.27.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-motion": { + "version": "2.9.5", + "resolved": "https://registry.npmjs.org/rc-motion/-/rc-motion-2.9.5.tgz", + "integrity": "sha512-w+XTUrfh7ArbYEd2582uDrEhmBHwK1ZENJiSJVb7uRxdE7qJSYjbO2eksRXmndqyKqKoYPc9ClpPh5242mV1vA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-util": "^5.44.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-notification": { + "version": "5.6.4", + "resolved": "https://registry.npmjs.org/rc-notification/-/rc-notification-5.6.4.tgz", + "integrity": "sha512-KcS4O6B4qzM3KH7lkwOB7ooLPZ4b6J+VMmQgT51VZCeEcmghdeR4IrMcFq0LG+RPdnbe/ArT086tGM8Snimgiw==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "2.x", + "rc-motion": "^2.9.0", + "rc-util": "^5.20.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-overflow": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/rc-overflow/-/rc-overflow-1.5.0.tgz", + "integrity": "sha512-Lm/v9h0LymeUYJf0x39OveU52InkdRXqnn2aYXfWmo8WdOonIKB2kfau+GF0fWq6jPgtdO9yMqveGcK6aIhJmg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-resize-observer": "^1.0.0", + "rc-util": "^5.37.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-pagination": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/rc-pagination/-/rc-pagination-5.1.0.tgz", + "integrity": "sha512-8416Yip/+eclTFdHXLKTxZvn70duYVGTvUUWbckCCZoIl3jagqke3GLsFrMs0bsQBikiYpZLD9206Ej4SOdOXQ==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.3.2", + "rc-util": "^5.38.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-picker": { + "version": "4.11.3", + "resolved": "https://registry.npmjs.org/rc-picker/-/rc-picker-4.11.3.tgz", + "integrity": "sha512-MJ5teb7FlNE0NFHTncxXQ62Y5lytq6sh5nUw0iH8OkHL/TjARSEvSHpr940pWgjGANpjCwyMdvsEV55l5tYNSg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.24.7", + "@rc-component/trigger": "^2.0.0", + "classnames": "^2.2.1", + "rc-overflow": "^1.3.2", + "rc-resize-observer": "^1.4.0", + "rc-util": "^5.43.0" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "date-fns": ">= 2.x", + "dayjs": ">= 1.x", + "luxon": ">= 3.x", + "moment": ">= 2.x", + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + }, + "peerDependenciesMeta": { + "date-fns": { + "optional": true + }, + "dayjs": { + "optional": true + }, + "luxon": { + "optional": true + }, + "moment": { + "optional": true + } + } + }, + "node_modules/rc-progress": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/rc-progress/-/rc-progress-4.0.0.tgz", + "integrity": "sha512-oofVMMafOCokIUIBnZLNcOZFsABaUw8PPrf1/y0ZBvKZNpOiu5h4AO9vv11Sw0p4Hb3D0yGWuEattcQGtNJ/aw==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.2.6", + "rc-util": "^5.16.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-rate": { + "version": "2.13.1", + "resolved": "https://registry.npmjs.org/rc-rate/-/rc-rate-2.13.1.tgz", + "integrity": "sha512-QUhQ9ivQ8Gy7mtMZPAjLbxBt5y9GRp65VcUyGUMF3N3fhiftivPHdpuDIaWIMOTEprAjZPC08bls1dQB+I1F2Q==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.2.5", + "rc-util": "^5.0.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-resize-observer": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/rc-resize-observer/-/rc-resize-observer-1.4.3.tgz", + "integrity": "sha512-YZLjUbyIWox8E9i9C3Tm7ia+W7euPItNWSPX5sCcQTYbnwDb5uNpnLHQCG1f22oZWUhLw4Mv2tFmeWe68CDQRQ==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.20.7", + "classnames": "^2.2.1", + "rc-util": "^5.44.1", + "resize-observer-polyfill": "^1.5.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-segmented": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/rc-segmented/-/rc-segmented-2.7.1.tgz", + "integrity": "sha512-izj1Nw/Dw2Vb7EVr+D/E9lUTkBe+kKC+SAFSU9zqr7WV2W5Ktaa9Gc7cB2jTqgk8GROJayltaec+DBlYKc6d+g==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-motion": "^2.4.4", + "rc-util": "^5.17.0" + }, + "peerDependencies": { + "react": ">=16.0.0", + "react-dom": ">=16.0.0" + } + }, + "node_modules/rc-select": { + "version": "14.16.8", + "resolved": "https://registry.npmjs.org/rc-select/-/rc-select-14.16.8.tgz", + "integrity": "sha512-NOV5BZa1wZrsdkKaiK7LHRuo5ZjZYMDxPP6/1+09+FB4KoNi8jcG1ZqLE3AVCxEsYMBe65OBx71wFoHRTP3LRg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "@rc-component/trigger": "^2.1.1", + "classnames": "2.x", + "rc-motion": "^2.0.1", + "rc-overflow": "^1.3.1", + "rc-util": "^5.16.1", + "rc-virtual-list": "^3.5.2" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": "*", + "react-dom": "*" + } + }, + "node_modules/rc-slider": { + "version": "11.1.9", + "resolved": "https://registry.npmjs.org/rc-slider/-/rc-slider-11.1.9.tgz", + "integrity": "sha512-h8IknhzSh3FEM9u8ivkskh+Ef4Yo4JRIY2nj7MrH6GQmrwV6mcpJf5/4KgH5JaVI1H3E52yCdpOlVyGZIeph5A==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.2.5", + "rc-util": "^5.36.0" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-steps": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/rc-steps/-/rc-steps-6.0.1.tgz", + "integrity": "sha512-lKHL+Sny0SeHkQKKDJlAjV5oZ8DwCdS2hFhAkIjuQt1/pB81M0cA0ErVFdHq9+jmPmFw1vJB2F5NBzFXLJxV+g==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.16.7", + "classnames": "^2.2.3", + "rc-util": "^5.16.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-switch": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/rc-switch/-/rc-switch-4.1.0.tgz", + "integrity": "sha512-TI8ufP2Az9oEbvyCeVE4+90PDSljGyuwix3fV58p7HV2o4wBnVToEyomJRVyTaZeqNPAp+vqeo4Wnj5u0ZZQBg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.21.0", + "classnames": "^2.2.1", + "rc-util": "^5.30.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-table": { + "version": "7.54.0", + "resolved": "https://registry.npmjs.org/rc-table/-/rc-table-7.54.0.tgz", + "integrity": "sha512-/wDTkki6wBTjwylwAGjpLKYklKo9YgjZwAU77+7ME5mBoS32Q4nAwoqhA2lSge6fobLW3Tap6uc5xfwaL2p0Sw==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "@rc-component/context": "^1.4.0", + "classnames": "^2.2.5", + "rc-resize-observer": "^1.1.0", + "rc-util": "^5.44.3", + "rc-virtual-list": "^3.14.2" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-tabs": { + "version": "15.7.0", + "resolved": "https://registry.npmjs.org/rc-tabs/-/rc-tabs-15.7.0.tgz", + "integrity": "sha512-ZepiE+6fmozYdWf/9gVp7k56PKHB1YYoDsKeQA1CBlJ/POIhjkcYiv0AGP0w2Jhzftd3AVvZP/K+V+Lpi2ankA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.2", + "classnames": "2.x", + "rc-dropdown": "~4.2.0", + "rc-menu": "~9.16.0", + "rc-motion": "^2.6.2", + "rc-resize-observer": "^1.0.0", + "rc-util": "^5.34.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-textarea": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/rc-textarea/-/rc-textarea-1.10.2.tgz", + "integrity": "sha512-HfaeXiaSlpiSp0I/pvWpecFEHpVysZ9tpDLNkxQbMvMz6gsr7aVZ7FpWP9kt4t7DB+jJXesYS0us1uPZnlRnwQ==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.2.1", + "rc-input": "~1.8.0", + "rc-resize-observer": "^1.0.0", + "rc-util": "^5.27.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-tooltip": { + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/rc-tooltip/-/rc-tooltip-6.4.0.tgz", + "integrity": "sha512-kqyivim5cp8I5RkHmpsp1Nn/Wk+1oeloMv9c7LXNgDxUpGm+RbXJGL+OPvDlcRnx9DBeOe4wyOIl4OKUERyH1g==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.2", + "@rc-component/trigger": "^2.0.0", + "classnames": "^2.3.1", + "rc-util": "^5.44.3" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-tree": { + "version": "5.13.1", + "resolved": "https://registry.npmjs.org/rc-tree/-/rc-tree-5.13.1.tgz", + "integrity": "sha512-FNhIefhftobCdUJshO7M8uZTA9F4OPGVXqGfZkkD/5soDeOhwO06T/aKTrg0WD8gRg/pyfq+ql3aMymLHCTC4A==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "2.x", + "rc-motion": "^2.0.1", + "rc-util": "^5.16.1", + "rc-virtual-list": "^3.5.1" + }, + "engines": { + "node": ">=10.x" + }, + "peerDependencies": { + "react": "*", + "react-dom": "*" + } + }, + "node_modules/rc-tree-select": { + "version": "5.27.0", + "resolved": "https://registry.npmjs.org/rc-tree-select/-/rc-tree-select-5.27.0.tgz", + "integrity": "sha512-2qTBTzwIT7LRI1o7zLyrCzmo5tQanmyGbSaGTIf7sYimCklAToVVfpMC6OAldSKolcnjorBYPNSKQqJmN3TCww==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.25.7", + "classnames": "2.x", + "rc-select": "~14.16.2", + "rc-tree": "~5.13.0", + "rc-util": "^5.43.0" + }, + "peerDependencies": { + "react": "*", + "react-dom": "*" + } + }, + "node_modules/rc-upload": { + "version": "4.11.0", + "resolved": "https://registry.npmjs.org/rc-upload/-/rc-upload-4.11.0.tgz", + "integrity": "sha512-ZUyT//2JAehfHzjWowqROcwYJKnZkIUGWaTE/VogVrepSl7AFNbQf4+zGfX4zl9Vrj/Jm8scLO0R6UlPDKK4wA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.3", + "classnames": "^2.2.5", + "rc-util": "^5.2.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-util": { + "version": "5.44.4", + "resolved": "https://registry.npmjs.org/rc-util/-/rc-util-5.44.4.tgz", + "integrity": "sha512-resueRJzmHG9Q6rI/DfK6Kdv9/Lfls05vzMs1Sk3M2P+3cJa+MakaZyWY8IPfehVuhPJFKrIY1IK4GqbiaiY5w==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.3", + "react-is": "^18.2.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-virtual-list": { + "version": "3.19.2", + "resolved": "https://registry.npmjs.org/rc-virtual-list/-/rc-virtual-list-3.19.2.tgz", + "integrity": "sha512-Ys6NcjwGkuwkeaWBDqfI3xWuZ7rDiQXlH1o2zLfFzATfEgXcqpk8CkgMfbJD81McqjcJVez25a3kPxCR807evA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.20.0", + "classnames": "^2.2.6", + "rc-resize-observer": "^1.0.0", + "rc-util": "^5.36.0" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/react": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", + "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", + "license": "MIT", + "peer": true, + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", + "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", + "license": "MIT", + "peer": true, + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.2" + }, + "peerDependencies": { + "react": "^18.3.1" + } + }, + "node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "license": "MIT" + }, + "node_modules/react-markdown": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/react-markdown/-/react-markdown-9.1.0.tgz", + "integrity": "sha512-xaijuJB0kzGiUdG7nc2MOMDUDBWPyGAjZtUrow9XxUeua8IqeP+VlIfAZ3bphpcLTnSZXz6z9jcVC/TCwbfgdw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "hast-util-to-jsx-runtime": "^2.0.0", + "html-url-attributes": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "remark-parse": "^11.0.0", + "remark-rehype": "^11.0.0", + "unified": "^11.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "@types/react": ">=18", + "react": ">=18" + } + }, + "node_modules/react-refresh": { + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.17.0.tgz", + "integrity": "sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-router": { + "version": "6.30.3", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-6.30.3.tgz", + "integrity": "sha512-XRnlbKMTmktBkjCLE8/XcZFlnHvr2Ltdr1eJX4idL55/9BbORzyZEaIkBFDhFGCEWBBItsVrDxwx3gnisMitdw==", + "license": "MIT", + "dependencies": { + "@remix-run/router": "1.23.2" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "react": ">=16.8" + } + }, + "node_modules/react-router-dom": { + "version": "6.30.3", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-6.30.3.tgz", + "integrity": "sha512-pxPcv1AczD4vso7G4Z3TKcvlxK7g7TNt3/FNGMhfqyntocvYKj+GCatfigGDjbLozC4baguJ0ReCigoDJXb0ag==", + "license": "MIT", + "dependencies": { + "@remix-run/router": "1.23.2", + "react-router": "6.30.3" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "react": ">=16.8", + "react-dom": ">=16.8" + } + }, + "node_modules/react-smooth": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/react-smooth/-/react-smooth-4.0.4.tgz", + "integrity": "sha512-gnGKTpYwqL0Iii09gHobNolvX4Kiq4PKx6eWBCYYix+8cdw+cGo3do906l1NBPKkSWx1DghC1dlWG9L2uGd61Q==", + "license": "MIT", + "dependencies": { + "fast-equals": "^5.0.1", + "prop-types": "^15.8.1", + "react-transition-group": "^4.4.5" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/react-syntax-highlighter": { + "version": "15.6.6", + "resolved": "https://registry.npmjs.org/react-syntax-highlighter/-/react-syntax-highlighter-15.6.6.tgz", + "integrity": "sha512-DgXrc+AZF47+HvAPEmn7Ua/1p10jNoVZVI/LoPiYdtY+OM+/nG5yefLHKJwdKqY1adMuHFbeyBaG9j64ML7vTw==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.3.1", + "highlight.js": "^10.4.1", + "highlightjs-vue": "^1.0.0", + "lowlight": "^1.17.0", + "prismjs": "^1.30.0", + "refractor": "^3.6.0" + }, + "peerDependencies": { + "react": ">= 0.14.0" + } + }, + "node_modules/react-transition-group": { + "version": "4.4.5", + "resolved": "https://registry.npmjs.org/react-transition-group/-/react-transition-group-4.4.5.tgz", + "integrity": "sha512-pZcd1MCJoiKiBR2NRxeCRg13uCXbydPnmB4EOeRrY7480qNWO8IIgQG6zlDkm6uRMsURXPuKq0GWtiM59a5Q6g==", + "license": "BSD-3-Clause", + "dependencies": { + "@babel/runtime": "^7.5.5", + "dom-helpers": "^5.0.1", + "loose-envify": "^1.4.0", + "prop-types": "^15.6.2" + }, + "peerDependencies": { + "react": ">=16.6.0", + "react-dom": ">=16.6.0" + } + }, + "node_modules/read-cache": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", + "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pify": "^2.3.0" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/recharts": { + "version": "2.15.4", + "resolved": "https://registry.npmjs.org/recharts/-/recharts-2.15.4.tgz", + "integrity": "sha512-UT/q6fwS3c1dHbXv2uFgYJ9BMFHu3fwnd7AYZaEQhXuYQ4hgsxLvsUXzGdKeZrW5xopzDCvuA2N41WJ88I7zIw==", + "license": "MIT", + "dependencies": { + "clsx": "^2.0.0", + "eventemitter3": "^4.0.1", + "lodash": "^4.17.21", + "react-is": "^18.3.1", + "react-smooth": "^4.0.4", + "recharts-scale": "^0.4.4", + "tiny-invariant": "^1.3.1", + "victory-vendor": "^36.6.8" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "react": "^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/recharts-scale": { + "version": "0.4.5", + "resolved": "https://registry.npmjs.org/recharts-scale/-/recharts-scale-0.4.5.tgz", + "integrity": "sha512-kivNFO+0OcUNu7jQquLXAxz1FIwZj8nrj+YkOKc5694NbjCvcT6aSZiIzNzd2Kul4o4rTto8QVR9lMNtxD4G1w==", + "license": "MIT", + "dependencies": { + "decimal.js-light": "^2.4.1" + } + }, + "node_modules/refractor": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/refractor/-/refractor-3.6.0.tgz", + "integrity": "sha512-MY9W41IOWxxk31o+YvFCNyNzdkc9M20NoZK5vq6jkv4I/uh2zkWcfudj0Q1fovjUQJrNewS9NMzeTtqPf+n5EA==", + "license": "MIT", + "dependencies": { + "hastscript": "^6.0.0", + "parse-entities": "^2.0.0", + "prismjs": "~1.27.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/refractor/node_modules/character-entities": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-1.2.4.tgz", + "integrity": "sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/refractor/node_modules/character-entities-legacy": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz", + "integrity": "sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/refractor/node_modules/character-reference-invalid": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz", + "integrity": "sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/refractor/node_modules/is-alphabetical": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz", + "integrity": "sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/refractor/node_modules/is-alphanumerical": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz", + "integrity": "sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==", + "license": "MIT", + "dependencies": { + "is-alphabetical": "^1.0.0", + "is-decimal": "^1.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/refractor/node_modules/is-decimal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.4.tgz", + "integrity": "sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/refractor/node_modules/is-hexadecimal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz", + "integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/refractor/node_modules/parse-entities": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-2.0.0.tgz", + "integrity": "sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==", + "license": "MIT", + "dependencies": { + "character-entities": "^1.0.0", + "character-entities-legacy": "^1.0.0", + "character-reference-invalid": "^1.0.0", + "is-alphanumerical": "^1.0.0", + "is-decimal": "^1.0.0", + "is-hexadecimal": "^1.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/refractor/node_modules/prismjs": { + "version": "1.27.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.27.0.tgz", + "integrity": "sha512-t13BGPUlFDR7wRB5kQDG4jjl7XeuH6jbJGt11JHPL96qwsEHNX2+68tFXqc1/k+/jALsbSWJKUOT/hcYAZ5LkA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/remark-gfm": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.1.tgz", + "integrity": "sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-gfm": "^3.0.0", + "micromark-extension-gfm": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-stringify": "^11.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz", + "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-rehype": { + "version": "11.1.2", + "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.2.tgz", + "integrity": "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "mdast-util-to-hast": "^13.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-stringify": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz", + "integrity": "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-to-markdown": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/resize-observer-polyfill": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/resize-observer-polyfill/-/resize-observer-polyfill-1.5.1.tgz", + "integrity": "sha512-LwZrotdHOo12nQuZlHEmtuXdqGoOD0OhaxopaNFxWzInpEgaLWoVuAMbTzixuosCx2nEG58ngzW3vxdWoxIgdg==", + "license": "MIT" + }, + "node_modules/resolve": { + "version": "1.22.11", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", + "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rollup": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.56.0.tgz", + "integrity": "sha512-9FwVqlgUHzbXtDg9RCMgodF3Ua4Na6Gau+Sdt9vyCN4RhHfVKX2DCHy3BjMLTDd47ITDhYAnTwGulWTblJSDLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.56.0", + "@rollup/rollup-android-arm64": "4.56.0", + "@rollup/rollup-darwin-arm64": "4.56.0", + "@rollup/rollup-darwin-x64": "4.56.0", + "@rollup/rollup-freebsd-arm64": "4.56.0", + "@rollup/rollup-freebsd-x64": "4.56.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.56.0", + "@rollup/rollup-linux-arm-musleabihf": "4.56.0", + "@rollup/rollup-linux-arm64-gnu": "4.56.0", + "@rollup/rollup-linux-arm64-musl": "4.56.0", + "@rollup/rollup-linux-loong64-gnu": "4.56.0", + "@rollup/rollup-linux-loong64-musl": "4.56.0", + "@rollup/rollup-linux-ppc64-gnu": "4.56.0", + "@rollup/rollup-linux-ppc64-musl": "4.56.0", + "@rollup/rollup-linux-riscv64-gnu": "4.56.0", + "@rollup/rollup-linux-riscv64-musl": "4.56.0", + "@rollup/rollup-linux-s390x-gnu": "4.56.0", + "@rollup/rollup-linux-x64-gnu": "4.56.0", + "@rollup/rollup-linux-x64-musl": "4.56.0", + "@rollup/rollup-openbsd-x64": "4.56.0", + "@rollup/rollup-openharmony-arm64": "4.56.0", + "@rollup/rollup-win32-arm64-msvc": "4.56.0", + "@rollup/rollup-win32-ia32-msvc": "4.56.0", + "@rollup/rollup-win32-x64-gnu": "4.56.0", + "@rollup/rollup-win32-x64-msvc": "4.56.0", + "fsevents": "~2.3.2" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/scheduler": { + "version": "0.23.2", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", + "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/scroll-into-view-if-needed": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/scroll-into-view-if-needed/-/scroll-into-view-if-needed-3.1.0.tgz", + "integrity": "sha512-49oNpRjWRvnU8NyGVmUaYG4jtTkNonFZI86MmGRDqBphEK2EXT9gdEUoQPZhuBM8yWHxCWbobltqYO5M4XrUvQ==", + "license": "MIT", + "dependencies": { + "compute-scroll-into-view": "^3.0.2" + } + }, + "node_modules/semver": { + "version": "7.7.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", + "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/socket.io-client": { + "version": "4.8.3", + "resolved": "https://registry.npmjs.org/socket.io-client/-/socket.io-client-4.8.3.tgz", + "integrity": "sha512-uP0bpjWrjQmUt5DTHq9RuoCBdFJF10cdX9X+a368j/Ft0wmaVgxlrjvK3kjvgCODOMMOz9lcaRzxmso0bTWZ/g==", + "license": "MIT", + "dependencies": { + "@socket.io/component-emitter": "~3.1.0", + "debug": "~4.4.1", + "engine.io-client": "~6.6.1", + "socket.io-parser": "~4.2.4" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/socket.io-parser": { + "version": "4.2.5", + "resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.2.5.tgz", + "integrity": "sha512-bPMmpy/5WWKHea5Y/jYAP6k74A+hvmRCQaJuJB6I/ML5JZq/KfNieUVo/3Mh7SAqn7TyFdIo6wqYHInG1MU1bQ==", + "license": "MIT", + "dependencies": { + "@socket.io/component-emitter": "~3.1.0", + "debug": "~4.4.1" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/space-separated-tokens": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", + "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/state-local": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/state-local/-/state-local-1.0.7.tgz", + "integrity": "sha512-HTEHMNieakEnoe33shBYcZ7NX83ACUjCu8c40iOGEZsngj9zRnkqS9j1pqQPXwobB0ZcVTk27REb7COQ0UR59w==", + "license": "MIT" + }, + "node_modules/string-convert": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/string-convert/-/string-convert-0.2.1.tgz", + "integrity": "sha512-u/1tdPl4yQnPBjnVrmdLo9gtuLvELKsAoRapekWggdiQNvvvum+jYF329d84NAa660KQw7pB2n36KrIKVoXa3A==", + "license": "MIT" + }, + "node_modules/stringify-entities": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz", + "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==", + "license": "MIT", + "dependencies": { + "character-entities-html4": "^2.0.0", + "character-entities-legacy": "^3.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/style-to-js": { + "version": "1.1.21", + "resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.21.tgz", + "integrity": "sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ==", + "license": "MIT", + "dependencies": { + "style-to-object": "1.0.14" + } + }, + "node_modules/style-to-object": { + "version": "1.0.14", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.14.tgz", + "integrity": "sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw==", + "license": "MIT", + "dependencies": { + "inline-style-parser": "0.2.7" + } + }, + "node_modules/stylis": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.3.6.tgz", + "integrity": "sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==", + "license": "MIT" + }, + "node_modules/sucrase": { + "version": "3.35.1", + "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.1.tgz", + "integrity": "sha512-DhuTmvZWux4H1UOnWMB3sk0sbaCVOoQZjv8u1rDoTV0HTdGem9hkAZtl4JZy8P2z4Bg0nT+YMeOFyVr4zcG5Tw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", + "commander": "^4.0.0", + "lines-and-columns": "^1.1.6", + "mz": "^2.7.0", + "pirates": "^4.0.1", + "tinyglobby": "^0.2.11", + "ts-interface-checker": "^0.1.9" + }, + "bin": { + "sucrase": "bin/sucrase", + "sucrase-node": "bin/sucrase-node" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/tailwindcss": { + "version": "3.4.19", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.19.tgz", + "integrity": "sha512-3ofp+LL8E+pK/JuPLPggVAIaEuhvIz4qNcf3nA1Xn2o/7fb7s/TYpHhwGDv1ZU3PkBluUVaF8PyCHcm48cKLWQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@alloc/quick-lru": "^5.2.0", + "arg": "^5.0.2", + "chokidar": "^3.6.0", + "didyoumean": "^1.2.2", + "dlv": "^1.1.3", + "fast-glob": "^3.3.2", + "glob-parent": "^6.0.2", + "is-glob": "^4.0.3", + "jiti": "^1.21.7", + "lilconfig": "^3.1.3", + "micromatch": "^4.0.8", + "normalize-path": "^3.0.0", + "object-hash": "^3.0.0", + "picocolors": "^1.1.1", + "postcss": "^8.4.47", + "postcss-import": "^15.1.0", + "postcss-js": "^4.0.1", + "postcss-load-config": "^4.0.2 || ^5.0 || ^6.0", + "postcss-nested": "^6.2.0", + "postcss-selector-parser": "^6.1.2", + "resolve": "^1.22.8", + "sucrase": "^3.35.0" + }, + "bin": { + "tailwind": "lib/cli.js", + "tailwindcss": "lib/cli.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true, + "license": "MIT" + }, + "node_modules/thenify": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", + "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0" + } + }, + "node_modules/thenify-all": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", + "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "thenify": ">= 3.1.0 < 4" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/throttle-debounce": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/throttle-debounce/-/throttle-debounce-5.0.2.tgz", + "integrity": "sha512-B71/4oyj61iNH0KeCamLuE2rmKuTO5byTOSVwECM5FA7TiAiAW+UqTKZ9ERueC4qvgSttUhdmq1mXC3kJqGX7A==", + "license": "MIT", + "engines": { + "node": ">=12.22" + } + }, + "node_modules/tiny-invariant": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz", + "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==", + "license": "MIT" + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinyglobby/node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/tinyglobby/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/toggle-selection": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/toggle-selection/-/toggle-selection-1.0.6.tgz", + "integrity": "sha512-BiZS+C1OS8g/q2RRbJmy59xpyghNBqrr6k5L/uKBGRsTfxmu3ffiRnd8mlGPUVayg8pvfi5urfnu8TU7DVOkLQ==", + "license": "MIT" + }, + "node_modules/trim-lines": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", + "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/trough": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz", + "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/ts-api-utils": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.4.3.tgz", + "integrity": "sha512-i3eMG77UTMD0hZhgRS562pv83RC6ukSAC2GMNWc+9dieh/+jDM5u5YG+NHX6VNDRHQcHwmsTHctP9LhbC3WxVw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=16" + }, + "peerDependencies": { + "typescript": ">=4.2.0" + } + }, + "node_modules/ts-interface-checker": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", + "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "peer": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/unified": { + "version": "11.0.5", + "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz", + "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "bail": "^2.0.0", + "devlop": "^1.0.0", + "extend": "^3.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-is": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.1.tgz", + "integrity": "sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz", + "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.1.0.tgz", + "integrity": "sha512-m+vIdyeCOpdr/QeQCu2EzxX/ohgS8KbnPDgFni4dQsfSCtpz8UqDyY5GjRru8PDKuYn7Fq19j1CQ+nJSsGKOzg==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.2.tgz", + "integrity": "sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/use-sync-external-store": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.6.0.tgz", + "integrity": "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==", + "license": "MIT", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true, + "license": "MIT" + }, + "node_modules/vfile": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz", + "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-message": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.3.tgz", + "integrity": "sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/victory-vendor": { + "version": "36.9.2", + "resolved": "https://registry.npmjs.org/victory-vendor/-/victory-vendor-36.9.2.tgz", + "integrity": "sha512-PnpQQMuxlwYdocC8fIJqVXvkeViHYzotI+NJrCuav0ZYFoq912ZHBk3mCeuj+5/VpodOjPe1z0Fk2ihgzlXqjQ==", + "license": "MIT AND ISC", + "dependencies": { + "@types/d3-array": "^3.0.3", + "@types/d3-ease": "^3.0.0", + "@types/d3-interpolate": "^3.0.1", + "@types/d3-scale": "^4.0.2", + "@types/d3-shape": "^3.1.0", + "@types/d3-time": "^3.0.0", + "@types/d3-timer": "^3.0.0", + "d3-array": "^3.1.6", + "d3-ease": "^3.0.1", + "d3-interpolate": "^3.0.1", + "d3-scale": "^4.0.2", + "d3-shape": "^3.1.0", + "d3-time": "^3.0.0", + "d3-timer": "^3.0.1" + } + }, + "node_modules/vite": { + "version": "5.4.21", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.21.tgz", + "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "esbuild": "^0.21.3", + "postcss": "^8.4.43", + "rollup": "^4.20.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/ws": { + "version": "8.18.3", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz", + "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/xmlhttprequest-ssl": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/xmlhttprequest-ssl/-/xmlhttprequest-ssl-2.1.2.tgz", + "integrity": "sha512-TEU+nJVUUnA4CYJFLvK5X9AOeH4KvDvhIfm0vV1GaQRtchnG0hgK5p8hw/xjv8cunWYCsiPCSDzObPyhEwq3KQ==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "license": "MIT", + "engines": { + "node": ">=0.4" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true, + "license": "ISC" + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zustand": { + "version": "4.5.7", + "resolved": "https://registry.npmjs.org/zustand/-/zustand-4.5.7.tgz", + "integrity": "sha512-CHOUy7mu3lbD6o6LJLfllpjkzhHXSBlX8B9+qPddUsIfeF5S/UZ5q0kmCsnRqT1UHFQZchNFDDzMbQsuesHWlw==", + "license": "MIT", + "dependencies": { + "use-sync-external-store": "^1.2.2" + }, + "engines": { + "node": ">=12.7.0" + }, + "peerDependencies": { + "@types/react": ">=16.8", + "immer": ">=9.0.6", + "react": ">=16.8" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "immer": { + "optional": true + }, + "react": { + "optional": true + } + } + }, + "node_modules/zwitch": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", + "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + } + } +} diff --git a/frontend/package.json b/frontend/package.json new file mode 100644 index 0000000..c8b9933 --- /dev/null +++ b/frontend/package.json @@ -0,0 +1,44 @@ +{ + "name": "creative-studio-frontend", + "version": "1.0.0", + "description": "Creative Studio - AI 剧集创作平台前端", + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc && vite build", + "preview": "vite preview", + "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0" + }, + "dependencies": { + "react": "^18.2.0", + "react-dom": "^18.2.0", + "react-router-dom": "^6.20.0", + "antd": "^5.12.0", + "@ant-design/icons": "^5.2.6", + "zustand": "^4.4.7", + "axios": "^1.6.2", + "socket.io-client": "^4.6.0", + "dayjs": "^1.11.10", + "monaco-editor": "^0.45.0", + "@monaco-editor/react": "^4.6.0", + "react-markdown": "^9.0.1", + "remark-gfm": "^4.0.0", + "react-syntax-highlighter": "^15.5.0", + "recharts": "^2.10.3" + }, + "devDependencies": { + "@types/react": "^18.2.43", + "@types/react-dom": "^18.2.17", + "@typescript-eslint/eslint-plugin": "^6.14.0", + "@typescript-eslint/parser": "^6.14.0", + "@vitejs/plugin-react": "^4.2.1", + "autoprefixer": "^10.4.16", + "eslint": "^8.55.0", + "eslint-plugin-react-hooks": "^4.6.0", + "eslint-plugin-react-refresh": "^0.4.5", + "postcss": "^8.4.32", + "tailwindcss": "^3.3.6", + "typescript": "^5.2.2", + "vite": "^5.0.8" + } +} diff --git a/frontend/postcss.config.js b/frontend/postcss.config.js new file mode 100644 index 0000000..2e7af2b --- /dev/null +++ b/frontend/postcss.config.js @@ -0,0 +1,6 @@ +export default { + plugins: { + tailwindcss: {}, + autoprefixer: {}, + }, +} diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx new file mode 100644 index 0000000..84d3f02 --- /dev/null +++ b/frontend/src/App.tsx @@ -0,0 +1,88 @@ +import React from 'react' +import { BrowserRouter, Routes, Route, Navigate } from 'react-router-dom' +import { ConfigProvider, Layout, Menu, theme } from 'antd' +import { DashboardOutlined, PlusOutlined, BookOutlined, SettingOutlined } from '@ant-design/icons' + +import { ProjectList } from './pages/ProjectList' +import { ProjectCreateEnhanced } from './pages/ProjectCreateEnhanced' +import { ProjectDetail } from './pages/ProjectDetail' +import { ProjectWorkspace } from './pages/ProjectWorkspace' +import { SkillManagement } from './pages/SkillManagement' +import { AgentManagement } from './pages/AgentManagement' +import { ExecutionMonitor } from './pages/ExecutionMonitor' +import MemorySystem from './pages/MemorySystem' +import ReviewConfig from './pages/ReviewConfig' +import ReviewResults from './pages/ReviewResults' + +const { Header, Content } = Layout + +function App() { + const currentPath = window.location.pathname + + const menuItems = [ + { key: '/projects', icon: , label: '项目列表' }, + { key: '/projects/new', icon: , label: '创建项目' }, + { key: '/skills', icon: , label: 'Skills 管理' }, + { key: '/agents', icon: , label: 'Agents 管理' }, + ] + + return ( + + + +
                              +
                              +
                              + Creative Studio +
                              + (window.location.href = key)} + style={{ flex: 1, border: 'none' }} + /> +
                              +
                              + + + + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + + +
                              +
                              +
                              + ) +} + +export default App diff --git a/frontend/src/components/ContentEditor.tsx b/frontend/src/components/ContentEditor.tsx new file mode 100644 index 0000000..7277c98 --- /dev/null +++ b/frontend/src/components/ContentEditor.tsx @@ -0,0 +1,421 @@ +/** + * 剧集内容预览和编辑组件 + * + * 提供: + * - 内容预览(Markdown 渲染) + * - 在线编辑 + * - 审核功能(通过/拒绝) + * - 导出功能 + * - 版本历史 + */ +import { useState, useEffect } from 'react' +import { + Modal, + Card, + Button, + Space, + Alert, + Input, + Tag, + Divider, + Row, + Col, + Statistic, + Progress, + Dropdown, + message, + Tabs, + List, + Switch +} from 'antd' +import { + CheckCircleOutlined, + CloseCircleOutlined, + DownloadOutlined, + SaveOutlined, + EyeOutlined, + EditOutlined, + HistoryOutlined, + FileTextOutlined, + FullscreenOutlined, + FullscreenExitOutlined +} from '@ant-design/icons' +import TextArea from 'antd/es/input/TextArea' + +import { episodeContentService, EpisodeContent, ContentStatus, ExportFormat } from '@/services/episodeContentService' +import { MarkdownRenderer } from '@/components/MarkdownRenderer' + +const { TabPane } = Tabs +const { Text } = Input + +interface ContentEditorProps { + visible: boolean + projectId: string + episodeNumber?: number + content?: EpisodeContent + onClose: () => void + onSave?: () => void +} + +export const ContentEditor: React.FC = ({ + visible, + projectId, + episodeNumber, + content: initialContent, + onClose, + onSave +}) => { + // 内容状态 + const [content, setContent] = useState(initialContent || null) + const [editing, setEditing] = useState(false) + const [editValue, setEditValue] = useState('') + const [saving, setSaving] = useState(false) + + // 预览模式 + const [isFullscreen, setIsFullscreen] = useState(false) + + // 加载内容 + useEffect(() => { + if (visible && episodeNumber && !initialContent) { + loadContent() + } else if (initialContent) { + setContent(initialContent) + setEditValue(initialContent.content) + } + }, [visible, episodeNumber, initialContent]) + + const loadContent = async () => { + if (!episodeNumber) return + + try { + const data = await episodeContentService.getContent(projectId, episodeNumber) + setContent(data) + setEditValue(data.content) + } catch (error) { + message.error(`加载内容失败: ${(error as Error).message}`) + } + } + + // 保存编辑 + const handleSave = async () => { + if (!content) return + + setSaving(true) + try { + await episodeContentService.updateContent(content.id, { + content: editValue + }) + + message.success('保存成功') + setEditing(false) + + // 重新加载内容 + await loadContent() + + if (onSave) { + onSave() + } + } catch (error) { + message.error(`保存失败: ${(error as Error).message}`) + } finally { + setSaving(false) + } + } + + // 审核内容 + const handleReview = async (approved: boolean, comment?: string) => { + if (!content) return + + try { + await episodeContentService.reviewContent(content.id, { + approved, + comment + }) + + message.success(approved ? '已通过审核' : '已拒绝') + + // 重新加载内容 + await loadContent() + + if (onSave) { + onSave() + } + } catch (error) { + message.error(`审核失败: ${(error as Error).message}`) + } + } + + // 导出内容 + const handleExport = async (format: ExportFormat) => { + try { + const result = await episodeContentService.exportContents(projectId, { + format, + episode_numbers: content?.episode_number ? [content.episode_number] : undefined + }) + + if (result.content) { + // 创建下载链接 + const blob = new Blob([result.content], { type: 'text/plain;charset=utf-8' }) + const url = URL.createObjectURL(blob) + const a = document.createElement('a') + a.href = url + a.download = result.filename || `episode_${content?.episode_number}.${format}` + document.body.appendChild(a) + a.click() + document.body.removeChild(a) + URL.revokeObjectURL(url) + + message.success('导出成功') + } else { + message.info(result.message || '导出功能待完善') + } + } catch (error) { + message.error(`导出失败: ${(error as Error).message}`) + } + } + + // 导出菜单 + const exportMenu = { + items: [ + { key: 'markdown', label: '导出为 Markdown (.md)' }, + { key: 'txt', label: '导出为文本 (.txt)' }, + { key: 'pdf', label: '导出为 PDF (.pdf)' }, + { key: 'docx', label: '导出为 Word (.docx)' } + ], + onClick: ({ key }: { key: string }) => { + handleExport(key as ExportFormat) + } + } + + // 状态标签颜色 + const getStatusColor = (status: ContentStatus) => { + const colors = { + draft: 'default', + generating: 'processing', + pending_review: 'warning', + approved: 'success', + rejected: 'error' + } + return colors[status] || 'default' + } + + // 状态标签文字 + const getStatusText = (status: ContentStatus) => { + const texts = { + draft: '草稿', + generating: '生成中', + pending_review: '待审核', + approved: '已通过', + rejected: '已拒绝' + } + return texts[status] || status + } + + if (!content) { + return ( + + + + ) + } + + return ( + + + 第 {content.episode_number} 集 - {content.title || '无标题'} + {getStatusText(content.status)} + + } + open={visible} + onCancel={onClose} + width={1000} + footer={ + + + {content.status === 'pending_review' && ( + <> + + + + )} + + + + + + + + + + } + > + + {/* 统计信息 */} + + + + + + + + + = 80 ? '#3f8600' : '#cf1322' }} + /> + + + + + + + + + + + + + + + {/* 审核问题 */} + {content.review_issues && content.review_issues.length > 0 && ( + ( + + + {issue.dimension} + + {issue.description} + + )} + /> + } + type="warning" + showIcon + /> + )} + + {/* 内容预览/编辑 */} + + {!editing && ( + + )} + {editing ? ( + <> + + + + ) : ( + + )} + + } + > + {editing ? ( +