diff --git a/.env.example b/.env.example new file mode 100644 index 00000000..d52737ce --- /dev/null +++ b/.env.example @@ -0,0 +1,16 @@ +# API Keys for LLM Services +GOOGLE_API_KEY=your_gemini_api_key_here +OPENAI_API_KEY=your_openai_api_key_here + +# API Configuration +USE_OPENAI_API_KEY=True # Set to True to use OpenAI, False to use Azure OpenAI +OPENAI_MODEL_NAME=gpt-4 # Model name for OpenAI + +# Azure OpenAI Configuration (if USE_OPENAI_API_KEY is False) +AZURE_OPENAI_ENDPOINT=https://your-resource-name.openai.azure.com/ +OPENAI_API_VERSION=2023-05-15 +AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=your_deployment_name + +# UI Configuration +NEXT_PUBLIC_API_URL=http://localhost:5000 # URL of the API server +NEXT_PUBLIC_DEFAULT_MODEL=Gemini # Default model to use diff --git a/.gitignore b/.gitignore index 8a30d258..0185b414 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,9 @@ +# Virtual Environment +venv/ + +# Node Modules +ui/node_modules/ + ## Ignore Visual Studio temporary files, build results, and ## files generated by popular Visual Studio add-ons. ## diff --git a/.vercelignore b/.vercelignore new file mode 100644 index 00000000..54279388 --- /dev/null +++ b/.vercelignore @@ -0,0 +1,46 @@ +# Ignore files not needed for Vercel deployment + +# Python virtual environments +venv/ +env/ +.env/ + +# Development files +.git/ +.github/ +.gitignore +.vscode/ +.idea/ + +# Large data files +demos/ +data/ + +# Documentation +docs/ +*.md +!README.md +!VERCEL_DEPLOYMENT.md + +# Test files +**/tests/ +**/__pycache__/ +**/*.pyc +**/*.pyo +**/*.pyd + +# Build artifacts +**/dist/ +**/build/ +**/*.egg-info/ + +# Node.js +**/node_modules/ +**/.next/ +**/out/ +**/.cache/ + +# Temporary files +**/tmp/ +**/temp/ +**/.DS_Store diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..082b1943 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "makefile.configureOnOpen": false +} \ No newline at end of file diff --git a/DEPLOYMENT.md b/DEPLOYMENT.md new file mode 100644 index 00000000..560cd41f --- /dev/null +++ b/DEPLOYMENT.md @@ -0,0 +1,232 @@ +# PromptWizard Deployment Guide + +This guide provides instructions for deploying PromptWizard in various environments. + +## Table of Contents + +- [Local Development](#local-development) +- [Docker Deployment](#docker-deployment) +- [Cloud Deployment](#cloud-deployment) + - [Azure](#azure) + - [AWS](#aws) + - [Google Cloud](#google-cloud) +- [Environment Variables](#environment-variables) +- [Security Considerations](#security-considerations) +- [Troubleshooting](#troubleshooting) + +## Local Development + +### Prerequisites + +- Python 3.8+ +- Node.js 18+ +- npm or yarn + +### API Setup + +1. Navigate to the API directory: + ```bash + cd api + ``` + +2. Create a virtual environment: + ```bash + python -m venv venv + ``` + +3. Activate the virtual environment: + - Windows: `venv\Scripts\activate` + - macOS/Linux: `source venv/bin/activate` + +4. Install dependencies: + ```bash + pip install -r requirements.txt + ``` + +5. Start the API server: + ```bash + python app.py + ``` + +The API will be available at http://localhost:5000. + +### UI Setup + +1. Navigate to the UI directory: + ```bash + cd ui + ``` + +2. Install dependencies: + ```bash + npm install + ``` + +3. Create a `.env.local` file with the following content: + ``` + NEXT_PUBLIC_API_URL=http://localhost:5000 + ``` + +4. Start the development server: + ```bash + npm run dev + ``` + +The UI will be available at http://localhost:3000. + +## Docker Deployment + +### Prerequisites + +- Docker +- Docker Compose + +### Deployment Steps + +1. Clone the repository: + ```bash + git clone https://github.com/microsoft/PromptWizard.git + cd PromptWizard + ``` + +2. Create a `.env` file in the root directory with your API keys: + ``` + GOOGLE_API_KEY=your_gemini_api_key + OPENAI_API_KEY=your_openai_api_key + ``` + +3. Build and start the containers: + ```bash + docker-compose up -d + ``` + +4. Access the UI at http://localhost:3000 + +### Individual Container Deployment + +If you prefer to deploy the API and UI separately: + +#### API Container + +```bash +cd api +docker build -t promptwizard-api . +docker run -p 5000:5000 -e GOOGLE_API_KEY=your_key -e OPENAI_API_KEY=your_key promptwizard-api +``` + +#### UI Container + +```bash +cd ui +docker build -t promptwizard-ui . +docker run -p 3000:3000 -e NEXT_PUBLIC_API_URL=http://localhost:5000 promptwizard-ui +``` + +## Cloud Deployment + +### Azure + +#### Azure App Service + +1. Create two App Services (one for API, one for UI) +2. Deploy the API: + ```bash + cd api + az webapp up --sku B1 --name promptwizard-api + ``` +3. Deploy the UI: + ```bash + cd ui + az webapp up --sku B1 --name promptwizard-ui + ``` +4. Configure environment variables in the Azure Portal + +#### Azure Container Instances + +1. Build and push Docker images to Azure Container Registry +2. Deploy containers using Azure CLI or Azure Portal +3. Configure networking to allow communication between containers + +### AWS + +#### AWS Elastic Beanstalk + +1. Create two Elastic Beanstalk environments +2. Deploy the API: + ```bash + cd api + eb init && eb create promptwizard-api + ``` +3. Deploy the UI: + ```bash + cd ui + eb init && eb create promptwizard-ui + ``` +4. Configure environment variables in the Elastic Beanstalk console + +#### AWS ECS + +1. Create an ECS cluster +2. Define task definitions for API and UI +3. Create services for each task +4. Configure load balancers and networking + +### Google Cloud + +#### Google Cloud Run + +1. Build and push Docker images to Google Container Registry +2. Deploy the API: + ```bash + cd api + gcloud run deploy promptwizard-api --image gcr.io/your-project/promptwizard-api + ``` +3. Deploy the UI: + ```bash + cd ui + gcloud run deploy promptwizard-ui --image gcr.io/your-project/promptwizard-ui + ``` +4. Configure environment variables in the Cloud Run console + +## Environment Variables + +### API Environment Variables + +- `GOOGLE_API_KEY`: API key for Google Gemini +- `OPENAI_API_KEY`: API key for OpenAI +- `FLASK_ENV`: Set to `production` for production deployment +- `FLASK_APP`: Set to `app.py` + +### UI Environment Variables + +- `NEXT_PUBLIC_API_URL`: URL of the PromptWizard API +- `NEXT_PUBLIC_DEFAULT_MODEL`: Default model to use (Gemini, GPT-4, etc.) + +## Security Considerations + +1. **API Keys**: Never commit API keys to version control. Use environment variables or secrets management. +2. **CORS**: The API has CORS enabled for the frontend. In production, restrict CORS to your frontend domain. +3. **Rate Limiting**: Consider implementing rate limiting to prevent abuse. +4. **Input Validation**: All user input is validated, but be cautious when deploying to production. +5. **HTTPS**: Always use HTTPS in production environments. + +## Troubleshooting + +### Common Issues + +1. **API Connection Error**: + - Check if the API server is running + - Verify the `NEXT_PUBLIC_API_URL` environment variable + - Check network connectivity between UI and API + +2. **Model API Errors**: + - Verify API keys are correct + - Check if you have sufficient quota/credits + - Ensure the model is available in your region + +3. **Docker Issues**: + - Run `docker-compose logs` to view container logs + - Check if ports are correctly mapped + - Verify environment variables are set correctly + +For more help, please open an issue on the GitHub repository. diff --git a/README.md b/README.md index 5d622569..22e6da80 100644 --- a/README.md +++ b/README.md @@ -1,210 +1,334 @@ +# ✨ PromptWizard UI ✨ -# PromptWizard 🧙 - -

- - - - - - Blog Post - - - - Project Website +

+ + Based on Microsoft PromptWizard -

+
+
-> **PromptWizard: Task-Aware Prompt Optimization Framework**
-> Eshaan Agarwal, Joykirat Singh, Vivek Dani, Raghav Magazine, Tanuja Ganu, Akshay Nambi
+> ### 🚀 **Welcome to PromptWizard UI!** +> +> This project is a modern web interface built on top of Microsoft's PromptWizard framework. Created in a single coding session, it provides an intuitive way to optimize prompts for various LLM tasks. +> +> ⚠️ **Note:** As this was developed rapidly, some features may not be fully implemented. Currently, the Gemini model integration has been tested and works reliably. Other models have code written but haven't been thoroughly tested. +> +> 🧪 **Quick Testing:** Use the "Test Values" button to auto-fill the form with sample data for image generation prompts and quickly test the functionality. +> +> 🤝 **Contributions Welcome:** Whether it's bug fixes, feature enhancements, or documentation improvements, all contributions are appreciated! This is an open project that welcomes community involvement. -## Overview 🌟 -

Overview of the PromptWizard framework

- +## Overview -PromptWizard is a discrete prompt optimization framework that employs a self-evolving mechanism where the LLM generates, critiques, and refines its own prompts and examples, continuously improving through iterative feedback and synthesis. This self-adaptive approach ensures holistic optimization by evolving both the instructions and in-context learning examples for better task performance. +PromptWizard UI provides a sleek web interface for optimizing prompts using the PromptWizard framework. It allows users to: -Three key components of PromptWizard are te following : +- Enter task descriptions and base instructions +- Configure optimization parameters through an intuitive tabbed interface +- Select models and datasets with visual feedback +- Optimize prompts with a single click +- Export optimized prompts and configurations -- Feedback-driven Refinement: LLM generates, critiques, and refines its own prompts and examples, continuously improving through iterative feedback and synthesis​ -- Critique and Synthesize diverse examples: Generates synthetic examples that are robust, diverse and task-aware. Also it optimizes both prompt and examples in tandem​ -- Self generated Chain of Thought (CoT) steps with combination of positive, negative and synthetic examples +## Quick Demo Video -

Stage 1: Iterative optimization of instructions

- + + PromptWizard Demo Video + +
+ Click to watch a quick demo of PromptWizard UI in action

-

Stage 2: Sequential optimization of instruction and examples

-

- -

+## Test Values Feature + +The "Test Values" button allows you to quickly populate the form with pre-configured values for image generation prompts with Ghibli-style aesthetics. + +
+ Test Values Sequence Diagram + +```mermaid +sequenceDiagram + participant User + participant UI as PromptWizard UI + participant API as Backend API + participant LLM as Language Model (Gemini/GPT) + + Note over User,LLM: Initial Setup Phase + User->>UI: Enter task description + User->>UI: Enter/select base instruction + User->>UI: Set mutation rounds (N) + User->>UI: Set refinement steps (M) + User->>UI: Click "Optimize Prompt" + + UI->>API: Send optimization request + + Note over API,LLM: Optimization Process Begins + + API->>API: Initialize parameters + + loop For each mutation round (1 to N) + Note over API,LLM: Mutation Phase + API->>LLM: Send current prompt for mutation + LLM->>API: Return mutated prompt + + Note over API,LLM: Refinement Phase + loop For each refinement step (1 to M) + API->>LLM: Send mutated prompt for refinement + LLM->>API: Return refined prompt + end + + API->>API: Update current prompt with refined version + end + + API->>UI: Return final optimized prompt + UI->>User: Display optimized prompt +``` +
+ +## Application Flow + +
+ Application Flow + +```mermaid +sequenceDiagram + title PromptWizard Test Values Feature Flow + + actor User + participant UI as PromptWizard UI + participant API as Backend API + participant LLM as Gemini API + + User->>+UI: Clicks "Test Values" button + + rect rgb(240, 240, 255) + Note over UI: Form Auto-Population Phase + UI->>UI: Fill task description with
image generation prompt + UI->>UI: Fill base instruction with
Ghibli-style aesthetics + UI->>UI: Set model to Gemini + UI->>UI: Set refine steps to 2 + UI->>UI: Configure optimization parameters + UI->>UI: Set evaluation criteria + end + + UI-->>-User: Display populated form + + User->>UI: Reviews pre-filled values + User->>+UI: Enters API key + UI->>UI: Validate API key + UI-->>-User: Show validation result + + User->>+UI: Clicks "Optimize Prompt" button + + rect rgb(255, 240, 240) + Note over UI,API: Request Processing Phase + UI->>+API: Send optimization request with
pre-filled values + API->>API: Parse and validate request + API->>API: Prepare optimization parameters + end + + rect rgb(240, 255, 240) + Note over API,LLM: Optimization Phase + API->>+LLM: Send request to Gemini API + LLM->>LLM: Generate prompt variations + LLM->>LLM: Perform mutation rounds + LLM->>LLM: Evaluate variations + LLM->>LLM: Refine best prompts + LLM-->>-API: Return optimized prompt + end + + rect rgb(255, 255, 240) + Note over API,UI: Result Processing Phase + API->>API: Process optimization results + API->>API: Calculate performance metrics + API->>-UI: Return optimization results + end + + UI->>UI: Format results for display + UI-->>-User: Display optimized prompt
and performance metrics + + rect rgb(240, 255, 255) + Note over User,UI: Post-Optimization Actions + User->>+UI: Click "Copy to Clipboard" + UI-->>-User: Prompt copied to clipboard + + alt Export Configuration + User->>+UI: Click "Export Config" + UI->>UI: Generate YAML configuration + UI-->>-User: Download configuration file + end + end + + Note over User: Ready to use optimized prompt
in actual applications +``` +
+ +The optimization process follows these steps: + +1. **User Input**: The user provides task description, base instruction, and configuration +2. **API Processing**: The backend processes the request and prepares for optimization +3. **LLM Interaction**: The system interacts with the selected LLM (Gemini/GPT-4) +4. **Optimization Loop**: Multiple rounds of mutation and refinement occur +5. **Result Generation**: The optimized prompt is generated and returned +6. **UI Display**: Results are displayed to the user with evaluation metrics + +## Project Structure + +- `ui/` - Frontend Next.js application +- `api/` - Backend Flask API + +## Getting Started 🚀 + +### Prerequisites + +- Node.js (v18+) +- Python (v3.8+) +- API keys for LLMs (Gemini API key required for testing) + +### Installation + +1. Install frontend dependencies: + ```bash + cd ui + npm install + ``` + +2. Install backend dependencies: + ```bash + cd api + pip install -r requirements.txt + ``` + +3. Set up environment variables: + - Create a `.env` file in the `api/` directory + - Add your API keys: + ``` + GOOGLE_API_KEY=your_gemini_api_key + OPENAI_API_KEY=your_openai_api_key + ``` + +### Running the Application + +1. Start the backend API: + ```bash + cd api + python app.py + ``` + +2. Start the frontend development server: + ```bash + cd ui + npm run dev + ``` + +3. Open your browser and navigate to `http://localhost:3000` + +### Quick Testing with Test Values + +1. Open the application in your browser +2. Click the "Test Values" button in the top-right corner +3. Review the pre-filled form with sample values for image generation +4. Enter your Gemini API key +5. Click "Optimize Prompt" to test the functionality + +> **Note:** The Test Values feature is designed to work with the Gemini model, which has been thoroughly tested. Other models may require additional configuration. + +## Features + +### Prompt Input +Enter task descriptions and base instructions for optimization. + +### Dataset Selection +Choose from predefined datasets (GSM8k, SVAMP, AQUARAT, BBII) or use custom data. + +### Configuration Options +- **Mutation Rounds**: Number of iterations for prompt mutation +- **Refine Steps**: Number of refinement steps +- **In-context Examples**: Option to use examples during optimization + +### Model Selection +Choose between Gemini, GPT-4, or custom models. + +### Evaluation Metrics +Select criteria for evaluating prompts: +- Accuracy +- Clarity +- Completeness +- Relevance +- Conciseness + +### Export Options +- Download optimized prompts as text files +- Export configurations as YAML files -## Installation ⬇️ +## Deployment 🌐 -Follow these steps to set up the development environment and install the package: +### One-Click Vercel Deployment (Recommended) -1) Clone the repository - ``` - git clone https://github.com/microsoft/PromptWizard - cd PromptWizard - ``` -2) Create and activate a virtual environment - - On Windows - ``` - python -m venv venv - venv\Scripts\activate - ``` - On macOS/Linux: - ``` - python -m venv venv - source venv/bin/activate - ``` -3) Install the package in development mode: - ``` - pip install -e . - ``` - - -## Quickstart 🏃 - -There are three main ways to use PromptWizard: -- Scenario 1 : Optimizing prompts without examples -- Scenario 2 : Generating synthetic examples and using them to optimize prompts -- Scenario 3 : Optimizing prompts with training data - -**NOTE** : Refer this [notebook](demos/scenarios/dataset_scenarios_demo.ipynb) to get a detailed understanding of the usage for each of the scenarios. **This serves as a starting point to understand the usage of PromptWizard** - -#### High level overview of using PromptWizard -- Decide your scenario -- Fix the configuration and environmental varibles for API calling - - Use ```promptopt_config.yaml``` to set configurations. For example for GSM8k this [file](demos/gsm8k/configs/promptopt_config.yaml) can be used - - Use ```.env``` to set environmental varibles. For GSM8k this [file](demos/gsm8k/.env) can be used - ``` - USE_OPENAI_API_KEY="XXXX" - # Replace with True/False based on whether or not to use OPENAI API key - - # If the first variable is set to True then fill the following two - OPENAI_API_KEY="XXXX" - OPENAI_MODEL_NAME ="XXXX" - - # If the first variable is set to False then fill the following three - AZURE_OPENAI_ENDPOINT="XXXXX" - # Replace with your Azure OpenAI Endpoint - - OPENAI_API_VERSION="XXXX" - # Replace with the version of your API - - AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="XXXXX" - # Create a deployment for the model and place the deployment name here. - ``` -- Run the code - - To run PromptWizard on your custom dataset please jump [here](#run-on-custom-dataset) - -#### Running PromptWizard with training data (Scenario 3) -- We support [GSM8k](https://huggingface.co/datasets/openai/gsm8k), [SVAMP](https://huggingface.co/datasets/ChilleD/SVAMP), [AQUARAT](https://huggingface.co/datasets/deepmind/aqua_rat) and [Instruction_Induction(BBII)](https://github.com/xqlin98/INSTINCT/tree/main/Induction/experiments/data/instruction_induction/raw) datasets -- Please note that time taken for prompt optimzation is dependent on the dataset. In our experiments for the above mentioned datasets, it took around 20 - 30 minutes on average. - -#### Running on GSM8k (AQUARAT/SVAMP) - -- Please note that this code requires access to LLMs via API calling for which we support AZURE endpoints or OPENAI keys -- Set the AZURE endpoint configurations in [.env](demos/gsm8k/.env) -- Follow the steps in [demo.ipynb](demos/gsm8k/demo.ipynb) to download the data, run the prompt optimization and carry out inference. - -#### Running on BBII - -- BBII has many datasets in it, based on the dataset set the configs [here](demos/bbh/configs/promptopt_config.yaml) -- In configs ```task_description```,```base_instruction``` and ```answer_format``` need to be changed for different datasets in BBII, the rest of the configs remain the same -- A demo is presented in [demo.ipynb](demos/bbh/demo.ipynb) - - - -## Run on Custom Datasets 🗃️ - -### Create Custom Dataset -- Our code expects the dataset to be in ```.jsonl``` file format -- Both the train and test set follow the same format -- Every sample in the ```.jsonl``` should have 2 fields : - 1) ```question``` : It should contain the complete question that is to asked to the LLM - 2) ```answer``` : It should contain the ground truth answer which can be verbose or concise - - -### Run on Custom Dataset - -NOTE : Refer to [demos](demos) folder for examples of folders for four datasets. The ```.ipynb``` in each of the folders shows how to run PromptWizard on that particular dataset. A similar procedure can be followed for a new dataset. Below is the explanation of each of the components of the ```.ipynb``` and the dataset specifc folder structure in detail - -#### Steps to be followed for custom datasets - -1) Every new dataset needs to have the following - - ```configs``` folder to store files for defining optimization hyperparameters and setup configs - - ```data``` folder to store ```train.jsonl``` and ```test.jsonl``` as curated [here](#create-custom-dataset) (this is done in the notebooks) - - ```.env``` file for environment varibles to be used for API calling - - ```.py/.ipynb``` script to run the code - -2) Set the hyperparameters like number of mutations, refine steps, in-context examples etc. - - Set the following in [promptopt_config.yaml](demos/gsm8k/configs/promptopt_config.yaml) : - - ```task_description``` : Desciption of the task at hand which will be fed into the prompt - - For GSM8k a description like the following can be used - ``` - You are a mathematics expert. You will be given a mathematics problem which you need to solve - ``` - - ```base_instruction``` : Base instruction in line with the dataset - - A commonly used base instruction could be - ``` - Lets think step by step. - ``` - - ```answer_format``` : Instruction for specifying the answer format - - It is crucial to set the ```answer_format``` properly to ensure correct extraction by ```def extract_final_answer()``` - - Answer format could be : - ``` - At the end, wrap only your final option between and tags - ``` - Then in ```def extract_final_answer()``` we can simply write code to extract string between the tags - - - ```seen_set_size``` : The number of train samples to be used for prompt optimization - - In our experiments we set this to be 25. In general any number between 20-50 would work - - ```few_shot_count``` : The number of in-context examples needed in the prompt - - The value can be set to any positive integer based on the requirement - - For generating zero-shot prompts, set the values to a small number (i.e between 2-5) and after the final prompt is generated the in-context examples can be removed. We suggest using some in-context examples as during the optimization process the instructions in the prompt are refined using in-context examples hence setting it to a small number will give better zero-shot instructions in the prompt - - ```generate_reasoning``` : Whether or not to generate reasoning for the in-context examples - - In our experiments we found it to improve the prompt overall as it provides a step-by-step approach to reach the final answer. However if there is a constraint on the prompt length or number of prompt tokens, it can be turned off to get smaller sized prompts - - ```generate_expert_identity``` and ```generate_intent_keywords``` : Having these helped improve the prompt as they help making the prompt relevant to the task - - Refer ```promptopt_config.yaml``` files in folders present [here](demos) for the descriptions used for AQUARAT, SVAMP and GSM8k. For BBII refer [description.py](demos/bbh/description.py) which has the meta instructions for each of the datasets - - Following are the global parameters which can be set based on the availability of the training data - - ```run_without_train_examples``` is a global hyperparameter which can be used when there are no training samples and in-context examples are not required in the final prompt - - ```generate_synthetic_examples``` is a global hyperparameter which can be used when there are no training samples and we want to generate synthetic data for training - - ```use_examples``` is a global hyperparameter which can be used to optimize prompts using training data -3) Create a dataset specific class which inherits ```class DatasetSpecificProcessing``` similar to ```GSM8k(DatasetSpecificProcessing)``` in [demo.ipynb](demos/gsm8k/demo.ipynb) and define the following functions in it - 1) In ```def extract_answer_from_output()``` : This is a dataset specific function, given the ```answer``` from the dataset it should extract and return a concise form of the answer. Note that based on the dataset it can also simply return the ```answer``` as it is like in case of SVAMP and AQUARAT datasets - 2) ```def extract_final_answer()``` : This is a LLM output specific function, given the verbose answer from the LLM it should extract and return the concise final answer - 3) Define ```def access_answer()``` : This function takes an input the LLM output, then does the following: - - Extracts the concise answer using ```def extract_final_answer()``` from the LLM output as defined above - - Evaluates the extracted answer with the ground truth and retuns - - Extracted answer from LLM output - - Boolean value indicating if answer is correct or not - - The evaluation done here is dataset specific, for datasets like GSM8k, SVAMP and AQUARAT which have final answer as an number, we can do a direct match between the numbers generated and the ground truth, while for datasets where the answer is a sentence or paragraph it would be better to do evaluation with llm-as-a-judge, to compare the generated and ground truth paragraph/sentence. An example is available in ```def access_answer()``` in [this](demos/bbh/demo.ipynb) notebook +The application is pre-configured for seamless deployment on Vercel: +1. Push your code to a GitHub repository +2. Connect the repository to Vercel +3. Set up environment variables in the Vercel dashboard: + (refer to .env.example for all env key names) + - `GOOGLE_API_KEY`: Your Gemini API key (required for testing) + - `OPENAI_API_KEY`: Your OpenAI API key (optional) +4. Click "Deploy" + +> **Note:** For detailed Vercel deployment instructions, see our [Vercel Deployment Guide](VERCEL_DEPLOYMENT.md). + +### Other Deployment Options + +- **Docker**: Use our Docker configuration for containerized deployment +- **Cloud Platforms**: Deploy to AWS, Azure, or Google Cloud +- **Traditional Hosting**: Deploy to any platform that supports Node.js and Python + +For more deployment options, see our [Deployment Guide](DEPLOYMENT.md). + +## System Architecture 🏗️ + +The PromptWizard UI system consists of three main components: + +1. **Frontend (Next.js)**: Provides the user interface for configuring and running prompt optimizations +2. **Backend API (Flask)**: Handles requests from the frontend and communicates with the PromptWizard core +3. **PromptWizard Core**: Microsoft's optimization engine that performs the actual prompt optimization + +### Data Flow Diagram + +## Integration with Microsoft PromptWizard 🔄 + +This UI is built on top of Microsoft's PromptWizard framework, providing a user-friendly interface for prompt optimization. It leverages the powerful core functionality of PromptWizard while making it accessible to users without coding experience. + +### What Microsoft PromptWizard Provides: +- Core prompt optimization algorithms +- Training and evaluation logic +- Dataset handling capabilities + +### What Our UI Adds: +- Intuitive tabbed interface +- Visual configuration of parameters +- One-click optimization +- Export and sharing capabilities +- Quick testing with pre-configured values +- Simplified deployment options ## How PromptWizard Works 🔍 -- Using the problem description and initial prompt instruction, PW generates variations of the instruction by prompting LLMs to mutate it. Based on performance, the best prompt is selected. PW incorporates a critique component that provides feedback, thus guiding and refining the prompt over multiple iterations. +- Using the problem description and initial prompt instruction, PW generates variations of the instruction by prompting LLMs to mutate it. Based on performance, the best prompt is selected. PW incorporates a critique component that provides feedback, thus guiding and refining the prompt over multiple iterations. - PW also optimizes in-context examples. PW selects a diverse set of examples from the training data, identifying positive and negative examples based on their performance with -the modified prompt. Negative examples help inform further prompt refinements. -- Examples and instructions are sequentially optimized, using the critique to generate synthetic examples that address the current prompt’s weaknesses. These examples are integrated to further refine the prompt. -- PW generates detailed reasoning chains via Chain-of-Thought (CoT), enriching the prompt’s capacity for problem-solving. +the modified prompt. Negative examples help inform further prompt refinements. +- Examples and instructions are sequentially optimized, using the critique to generate synthetic examples that address the current prompt’s weaknesses. These examples are integrated to further refine the prompt. +- PW generates detailed reasoning chains via Chain-of-Thought (CoT), enriching the prompt’s capacity for problem-solving. - PW aligns prompts with human reasoning by integrating task intent and expert personas, enhancing both model performance and interpretability. -## Configurations ⚙️ +## Configurations ⚙️ Here we define the various hyperparameters used in prompt optimization process found in [promptopt_config.yaml](demos/gsm8k/configs/promptopt_config.yaml) - ```mutate_refine_iterations```: Number of iterations for conducting mutation of task description followed by refinement of instructions - ```mutation_rounds```: Number of rounds of mutation to be performed when generating different styles -- ```refine_task_eg_iterations```: Number of iterations for refining task description and in context examples +- ```refine_task_eg_iterations```: Number of iterations for refining task description and in context examples - ```style_variation```: Number of thinking style variations to be used in prompt mutation - ```questions_batch_size```: Number of questions to be asked to LLM in a single batch, during training step - ```min_correct_count```: Minimum number of batches of questions to correctly answered, for a prompt to be considered as performing good @@ -213,14 +337,55 @@ Here we define the various hyperparameters used in prompt optimization process f - ```seen_set_size```: Number of samples from trainset to be used for training - ```few_shot_count```: Number of in-context examples required in final prompt +## Web UI Features 🖥️ + +The PromptWizard Web UI provides a user-friendly interface for prompt optimization with the following features: + +### Tabbed Interface +- **Basic Info**: Configure task description, base instruction, answer format, model, and API key +- **Data Selection**: Choose datasets, configure in-context examples, and preview data +- **Prompt Configuration**: Select optimization scenarios and configure advanced parameters +- **Evaluation**: Set evaluation criteria and manage optimization sessions + +#### Basic Info Tab + +#### Data Selection Tab + +#### Prompt Configuration Tab + +#### Evaluation Tab + +### Advanced Features +- **Advanced Optimization Parameters**: Fine-tune the optimization process with parameters like mutate refine iterations, refine task examples iterations, and more +- **Advanced Evaluation Metrics**: Use metrics like Faithfulness, Semantic Similarity, Context Relevancy, and more +- **Dataset Preview**: Visualize and inspect your dataset before optimization +- **Multimodal Support**: Optimize prompts for image-based tasks with image uploads +- **Session Management**: Save and load optimization sessions for later use +- **Test Values Button**: ✨ Quickly populate the form with pre-configured values for image generation prompts to test functionality + +### Results Page + +The results page displays: +- The optimized prompt +- Performance metrics and evaluation scores +- Comparison with the original prompt +- Export options (copy to clipboard, download as text, export configuration) + +### Deployment Options +- **Local Development**: Run the UI and API locally for development +- **Docker Deployment**: Use Docker for containerized deployment ([see Docker instructions](DEPLOYMENT.md#docker-deployment)) +- **Vercel Deployment**: One-click deployment to Vercel ([see Vercel guide](VERCEL_DEPLOYMENT.md)) +- **Cloud Deployment**: Deploy to platforms like Azure, AWS, or GCP ([see Cloud instructions](DEPLOYMENT.md#cloud-deployment)) + ## Best Practices 💡 -Following are some of best pracitices we followed during are experiments +Following are some of best pracitices we followed during are experiments - Regarding the parameters in [promptopt_config.yaml](demos/gsm8k/configs/promptopt_config.yaml) - We found the best performing values for ```mutate_refine_iterations```,```mutation_rounds```,```refine_task_eg_iterations``` to be 3 or 5 - Other parameters have been set to their ideal values. ```seen_set_size``` can be increased to 50 and ```few_shot_count``` can be set based on the use case - The prompts generated at the end of the training process are usually very detailed, however user supervision can help tune it further for the task at hand -- Trying both configurations of having synthetic in-context examples or in-context examples from the train set can be tried to find the best prompt based on use case. +- Trying both configurations of having synthetic in-context examples or in-context examples from the train set can be tried to find the best prompt based on use case. +- When using the Web UI, the "Test Values" button provides a good starting point with pre-configured values for image generation prompts ## Results 📈 @@ -231,35 +396,57 @@ thresholds, maintaining the highest p(τ) values, indicating that it consistentl possible accuracy across all tasks

- - The fiqure shows the performance profile curve for the instruction induction tasks. The performance profile curve visualizes how frequently different approaches’ performance is within a given distance of the best performance. In this curve, the x-axis (τ) represents the performance ratio relative to the best-performing method, and the y-axis (p(τ )) reflects the fraction of tasks where a method’s performance is within this ratio. So for a given -method, the curve tells what percentage of the tasks are within τ distance to the best performance. +method, the curve tells what percentage of the tasks are within τ distance to the best performance. +## Contributing 🤝 -## How to contribute: ✋ -This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. -When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repositories using our CLA. -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact opencode@microsoft.com with any additional questions or comments. +### We'd Love Your Help! -## Citation 📝 +This project was built in a single coding session, so there's plenty of room for improvement and expansion. Your contributions are not just welcome—they're essential to making this tool better for everyone! + +### Areas Where You Can Help: -If you make use of our work, please cite our paper: +- **Feature Implementation**: Help complete and test features for different LLM models +- **UI Enhancements**: Improve the user interface and experience +- **Documentation**: Enhance the documentation with examples and tutorials +- **Bug Fixes**: Help identify and fix bugs in the codebase +- **Testing**: Contribute to testing different features and models + +### How to Contribute: + +1. **Fork the repository** +2. **Create a feature branch**: `git checkout -b feature/amazing-feature` +3. **Commit your changes**: `git commit -m 'Add some amazing feature'` +4. **Push to the branch**: `git push origin feature/amazing-feature` +5. **Open a Pull Request** + +### Original PromptWizard Contribution Guidelines: + +This project builds on Microsoft's PromptWizard. For contributions to the core framework, please note that most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/). + +## Citation 📝 ``` @misc{agarwal2024promptwizardtaskawarepromptoptimization, - title={PromptWizard: Task-Aware Prompt Optimization Framework}, + title={PromptWizard: Task-Aware Prompt Optimization Framework}, author={Eshaan Agarwal and Joykirat Singh and Vivek Dani and Raghav Magazine and Tanuja Ganu and Akshay Nambi}, year={2024}, eprint={2405.18369}, archivePrefix={arXiv}, primaryClass={cs.CL}, - url={https://arxiv.org/abs/2405.18369}, + url={https://arxiv.org/abs/2405.18369}, } ``` -## Responsible AI Considerations +## Responsible AI Considerations For guidelines and best practices related to Responsible AI, please refer to our [Responsible AI Guidelines](RESPONSIBLE_AI.md). +## Contributing + +Contributions are welcome! Please feel free to submit a Pull Request. diff --git a/VERCEL_DEPLOYMENT.md b/VERCEL_DEPLOYMENT.md new file mode 100644 index 00000000..6139d529 --- /dev/null +++ b/VERCEL_DEPLOYMENT.md @@ -0,0 +1,130 @@ +# Deploying PromptWizard on Vercel + +This guide provides step-by-step instructions for deploying PromptWizard on Vercel. + +## Prerequisites + +- A [Vercel](https://vercel.com) account +- A GitHub account (for connecting your repository) +- API keys for the LLM services you plan to use (Gemini, OpenAI, etc.) + +## Deployment Steps + +### 1. Fork or Clone the Repository + +First, fork or clone the PromptWizard repository to your GitHub account. + +### 2. Connect to Vercel + +1. Log in to your Vercel account +2. Click "Add New..." and select "Project" +3. Import your GitHub repository +4. Select the PromptWizard repository + +### 3. Configure Project Settings + +1. **Framework Preset**: Select "Other" +2. **Root Directory**: Leave as is (should be the root of the repository) +3. **Build Command**: Leave blank (defined in vercel.json) +4. **Output Directory**: Leave blank (defined in vercel.json) + +### 4. Environment Variables + +Add the following environment variables: + +| Name | Value | Description | +|------|-------|-------------| +| `GOOGLE_API_KEY` | Your Gemini API key | Required for Gemini model | +| `OPENAI_API_KEY` | Your OpenAI API key | Required for GPT-4 model | +| `NEXT_PUBLIC_API_URL` | `/api` | API URL for the frontend | + +You can add these as plain text or as [Vercel Secrets](https://vercel.com/docs/concepts/projects/environment-variables#securing-environment-variables) for better security. + +### 5. Deploy + +Click "Deploy" and wait for the deployment to complete. Vercel will automatically build and deploy both the API and UI components based on the configuration in `vercel.json`. + +## Vercel Configuration + +The `vercel.json` file in the repository root configures the deployment: + +```json +{ + "version": 2, + "builds": [ + { + "src": "api/app.py", + "use": "@vercel/python" + }, + { + "src": "ui/package.json", + "use": "@vercel/next" + } + ], + "routes": [ + { + "src": "/api/(.*)", + "dest": "api/app.py" + }, + { + "src": "/(.*)", + "dest": "ui/$1" + } + ] +} +``` + +This configuration: +- Builds the Python API using the Vercel Python runtime +- Builds the Next.js UI using the Vercel Next.js runtime +- Routes API requests to the Python backend +- Routes all other requests to the Next.js frontend +- Sets up environment variables + +## Vercel Serverless Functions Limitations + +Vercel serverless functions have some limitations to be aware of: + +1. **Execution Time**: Functions have a maximum execution time of 10 seconds on the Hobby plan and 60 seconds on the Pro plan. Prompt optimization can take longer than this. + +2. **Memory**: Functions are limited to 1GB of memory on the Hobby plan and 3GB on the Pro plan. + +3. **Cold Starts**: Serverless functions may experience cold starts, which can add latency to the first request after a period of inactivity. + +For production use with heavy optimization workloads, consider: +- Upgrading to a Vercel Pro plan +- Using a different deployment option like Docker on a VPS +- Implementing a queue system for long-running tasks + +## Troubleshooting + +### API Connection Issues + +If the UI cannot connect to the API, check: +1. The `NEXT_PUBLIC_API_URL` environment variable is set correctly +2. The API routes in `vercel.json` are correct +3. The API is successfully deployed (check Vercel logs) + +### Long-Running Operations + +If prompt optimization times out: +1. Consider implementing a queue system for long-running tasks +2. Break down the optimization process into smaller steps +3. Use a different deployment option for production workloads + +### API Key Issues + +If you encounter API key errors: +1. Verify the API keys are correctly set in the Vercel environment variables +2. Check that the API keys have the necessary permissions +3. Ensure you have sufficient quota/credits for the LLM services + +## Monitoring and Logs + +Vercel provides logs and monitoring for your deployment: +1. Go to your project in the Vercel dashboard +2. Click on "Deployments" to see all deployments +3. Select a deployment to view its logs +4. Use the "Functions" tab to see serverless function metrics + +For more detailed monitoring, consider integrating with services like Sentry or LogRocket. diff --git a/api/Dockerfile b/api/Dockerfile new file mode 100644 index 00000000..50dbbaac --- /dev/null +++ b/api/Dockerfile @@ -0,0 +1,22 @@ +FROM python:3.9-slim + +WORKDIR /app + +# Copy requirements file +COPY requirements.txt . + +# Install dependencies +RUN pip install --no-cache-dir -r requirements.txt + +# Copy application code +COPY . . + +# Expose port +EXPOSE 5000 + +# Set environment variables +ENV FLASK_APP=app.py +ENV FLASK_ENV=production + +# Run the application with Gunicorn +CMD ["gunicorn", "--bind", "0.0.0.0:5000", "app:app"] diff --git a/api/README.md b/api/README.md new file mode 100644 index 00000000..6c581fe1 --- /dev/null +++ b/api/README.md @@ -0,0 +1,127 @@ +# PromptWizard API 🚀 + +Backend API for the PromptWizard UI. + +

+ API Architecture +
+ PromptWizard API architecture and components +

+ +## Overview + +This API provides endpoints for optimizing prompts using the PromptWizard framework. It's built with Flask and designed to work with the PromptWizard UI frontend. + +

+ API Sequence Diagram +
+ Sequence diagram showing the interaction between UI, API, and LLM services +

+ +## API Endpoints + +

+ API Endpoints +
+ Overview of PromptWizard API endpoints and their functions +

+ +### `POST /api/optimize_prompt` + +Optimizes a prompt based on the provided parameters. + +

+ Optimize Prompt Flow +
+ The prompt optimization process flow in the API +

+ +**Request Body:** + +```json +{ + "taskDescription": "String - Description of the task", + "baseInstruction": "String - Initial prompt to optimize", + "answerFormat": "String - Desired output format", + "model": "String - Model to use (Gemini, GPT-4, etc.)", + "mutationRounds": "Number - Number of mutation rounds", + "refineSteps": "Number - Number of refinement steps", + "mutateRefineIterations": "Number - Number of iterations for mutation and refinement", + "refineTaskEgIterations": "Number - Number of iterations for refining task examples", + "refineInstruction": "Boolean - Whether to refine instructions after mutation", + "minCorrectCount": "Number - Minimum number of correct answers required", + "maxEvalBatches": "Number - Maximum number of evaluation batches", + "topN": "Number - Number of top prompts to consider", + "questionsBatchSize": "Number - Batch size for questions during training", + "useExamples": "Boolean - Whether to use in-context examples", + "generateSyntheticExamples": "Boolean - Whether to generate synthetic examples", + "generateExpertIdentity": "Boolean - Whether to generate expert identity", + "generateIntentKeywords": "Boolean - Whether to generate intent keywords", + "styleVariation": "Number - Number of style variations to generate", + "fewShotCount": "Number - Number of few-shot examples to include", + "dataset": "String - Dataset to use (GSM8k, SVAMP, etc.)", + "evaluationCriteria": "Array - Basic criteria for evaluation", + "advancedEvaluationMetrics": "Array - Advanced metrics for evaluation", + "enableMultimodal": "Boolean - Whether to enable multimodal support", + "saveSession": "Boolean - Whether to save the optimization session", + "sessionName": "String - Name for the saved session", + "apiKey": "String - API key for the selected model" +} +``` + +**Response:** + +```json +{ + "success": true, + "optimizedPrompt": "String - The optimized prompt" +} +``` + +## Getting Started + +### Prerequisites + +- Python 3.8+ +- API keys for LLMs (Gemini, OpenAI, etc.) + +### Installation + +1. Install dependencies: + ``` + pip install -r requirements.txt + ``` + +2. Set up environment variables: + - Create a `.env` file in the root directory + - Add your API keys: + ``` + GOOGLE_API_KEY=your_gemini_api_key + OPENAI_API_KEY=your_openai_api_key + ``` + +### Running the API + +``` +python app.py +``` + +The API will be available at `http://localhost:5000`. + +## Deployment + +The API is configured for deployment on Vercel using the Vercel Python runtime. + +## Error Handling + +The API returns appropriate HTTP status codes and error messages: + +- `200 OK`: Request successful +- `400 Bad Request`: Invalid request parameters +- `500 Internal Server Error`: Server-side error + +## Security Considerations + +- API keys are stored in environment variables +- CORS is enabled for the frontend +- Input validation is performed on all requests diff --git a/api/app.py b/api/app.py new file mode 100644 index 00000000..896fc66c --- /dev/null +++ b/api/app.py @@ -0,0 +1,415 @@ +from flask import Flask, request, jsonify +from flask_cors import CORS +import os +import yaml +import google.generativeai as genai +from dotenv import load_dotenv +from dataclasses import dataclass +import logging + +# Configure logging +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') +logger = logging.getLogger(__name__) + +app = Flask(__name__) +CORS(app) # Enable CORS for all routes + +# Load environment variables +load_dotenv() + +# Configure global error handlers to ensure JSON responses +@app.errorhandler(404) +def not_found(e): + return jsonify({"success": False, "error": "Endpoint not found"}), 404 + +@app.errorhandler(500) +def server_error(e): + return jsonify({"success": False, "error": "Internal server error"}), 500 + +@app.errorhandler(Exception) +def handle_exception(e): + logger.error(f"Unhandled exception: {str(e)}") + return jsonify({ + "success": False, + "error": str(e) if app.debug else "Internal server error" + }), 500 + +# Health check endpoint +@app.route('/api/health', methods=['GET']) +def health_check(): + return jsonify({ + "success": True, + "status": "API is running", + "version": "1.0.0" + }) + +# API key validation endpoint +@app.route('/api/validate_key', methods=['POST']) +def validate_key(): + try: + data = request.json + api_key = data.get('apiKey', '') + model = data.get('model', 'Gemini') + + if not api_key: + return jsonify({ + "success": False, + "valid": False, + "message": "No API key provided" + }) + + # For Gemini model + if model == 'Gemini': + try: + # Try to initialize Gemini with the provided key + genai.configure(api_key=api_key) + model = genai.GenerativeModel('gemini-2.0-flash') + + # Try a simple generation to validate the key + logger.info("Testing API key with a simple generation...") + response = model.generate_content("Hello, testing API key validation.") + logger.info(f"API key validation successful: {response.text[:30]}...") + + return jsonify({ + "success": True, + "valid": True, + "message": "API key is valid" + }) + except Exception as e: + error_message = str(e) + logger.error(f"API key validation failed: {error_message}") + + if "API key not valid" in error_message: + message = "Invalid API key" + else: + message = f"Error validating API key: {error_message}" + + return jsonify({ + "success": False, + "valid": False, + "message": message + }) + + # For other models (mock validation for now) + return jsonify({ + "success": True, + "valid": True, + "message": f"API key validation for {model} is not implemented yet. Assuming valid." + }) + + except Exception as e: + logger.error(f"Error in validate_key endpoint: {str(e)}") + return jsonify({ + "success": False, + "valid": False, + "message": f"Error: {str(e)}" + }), 500 + +@dataclass +class PromptOptimizationParams: + task_description: str + base_instruction: str + answer_format: str + max_iterations: int + evaluation_criteria: list + temperature: float + mutate_refine_iterations: int = 3 + refine_task_eg_iterations: int = 3 + refine_instruction: bool = True + min_correct_count: int = 3 + max_eval_batches: int = 6 + top_n: int = 1 + questions_batch_size: int = 1 + generate_expert_identity: bool = True + generate_intent_keywords: bool = False + style_variation: int = 5 + few_shot_count: int = 5 + advanced_evaluation_metrics: list = None + +class PromptOptimizer: + def __init__(self, setup_config, task_description): + self.setup_config = setup_config + self.task_description = task_description + + # Initialize Gemini + api_key = os.getenv("GOOGLE_API_KEY") + logger.info(f"API key from environment: {'VALID' if api_key and api_key != 'your-gemini-api-key' else 'NOT VALID OR MISSING'}") + + if not api_key or api_key == "your-gemini-api-key": + logger.warning("GOOGLE_API_KEY not found or not valid in environment variables") + self.use_mock = True + else: + try: + # Configure the API + genai.configure(api_key=api_key) + + # Test the API with a simple request + logger.info("Testing Gemini API connection...") + self.model = genai.GenerativeModel('gemini-2.0-flash') + test_response = self.model.generate_content("Hello, testing the API connection.") + logger.info(f"API test successful: {test_response.text[:30]}...") + + self.use_mock = False + logger.info("Using REAL Gemini API for optimization") + except Exception as e: + import traceback + error_traceback = traceback.format_exc() + logger.error(f"Error initializing Gemini: {str(e)}\n{error_traceback}") + self.use_mock = True + logger.warning("Falling back to MOCK implementation due to API initialization error") + + def optimize_prompt(self, base_prompt, criteria): + if self.use_mock: + # Return a mock optimized prompt for testing + logger.info(f"Using MOCK optimization (no valid API key) for: {base_prompt[:50]}...") + + # Create a simple mock optimization by adding some text + optimized = base_prompt + "\n\nAdditional instructions: Please ensure all responses are clear, concise, and directly address the query. Maintain a helpful and informative tone throughout." + + # Log a warning about using mock implementation + logger.warning("USING MOCK IMPLEMENTATION: No valid API key provided. To use the actual Gemini API, please provide a valid API key in the .env file or through the UI.") + return optimized + else: + # Use the actual Gemini model + try: + chat = self.model.start_chat(history=[]) + + optimization_prompt = f""" + Task: {self.task_description} + Base prompt: {base_prompt} + Evaluation criteria: {', '.join(criteria)} + + Please optimize this prompt to better meet the evaluation criteria. + Return only the optimized prompt without any explanations. + """ + + logger.info(f"Sending optimization request to Gemini with prompt: {optimization_prompt[:100]}...") + response = chat.send_message(optimization_prompt) + logger.info(f"Received response from Gemini: {response.text[:100]}...") + return response.text + except Exception as e: + import traceback + error_traceback = traceback.format_exc() + logger.error(f"Error in optimize_prompt: {str(e)}\n{error_traceback}") + # Fallback to mock response with error details + return base_prompt + f"\n\n[Error occurred during optimization: {str(e)}]" + + def get_best_prompt(self, params, use_examples=False, run_without_train_examples=True, generate_synthetic_examples=False): + current_prompt = params.base_instruction + + logger.info(f"Starting prompt optimization with {params.max_iterations} iterations") + logger.info(f"Task: {params.task_description}") + logger.info(f"Advanced options: generate_expert_identity={params.generate_expert_identity}, generate_intent_keywords={params.generate_intent_keywords}") + logger.info(f"Examples options: use_examples={use_examples}, run_without_train_examples={run_without_train_examples}, generate_synthetic_examples={generate_synthetic_examples}") + + # If generate_expert_identity is enabled, add expert profile to the prompt + if params.generate_expert_identity: + expert_profile = self.generate_expert_profile(params.task_description) + if not self.use_mock: + current_prompt = f"You are an expert in {params.task_description}.\n{expert_profile}\n\n{current_prompt}" + logger.info("Added expert profile to prompt") + + # If generate_intent_keywords is enabled, add keywords to the prompt + if params.generate_intent_keywords: + keywords = self.generate_keywords(params.task_description) + if not self.use_mock: + current_prompt = f"{current_prompt}\n\nKeywords: {keywords}" + logger.info("Added intent keywords to prompt") + + for i in range(params.max_iterations): + logger.info(f"Iteration {i+1}/{params.max_iterations}") + optimized_prompt = self.optimize_prompt( + current_prompt, + params.evaluation_criteria + ) + current_prompt = optimized_prompt + + # Return a tuple with the optimized prompt and None to match the expected return format + return current_prompt, None + + def generate_expert_profile(self, task_description): + """Generate an expert profile based on the task description""" + if self.use_mock: + return "Expert in the field with extensive knowledge and experience." + + try: + prompt = f""" + Generate a detailed expert profile for someone who is highly skilled at: {task_description} + The profile should describe their expertise, background, and skills. + Keep it to 2-3 sentences maximum. + """ + + response = self.model.generate_content(prompt) + return response.text.strip() + except Exception as e: + logger.error(f"Error generating expert profile: {str(e)}") + return "Expert in the field with extensive knowledge and experience." + + def generate_keywords(self, task_description): + """Generate keywords based on the task description""" + if self.use_mock: + return "expertise, knowledge, skills, professional" + + try: + prompt = f""" + Generate 5-7 keywords that capture the essence of this task: {task_description} + Return only the keywords separated by commas. + """ + + response = self.model.generate_content(prompt) + return response.text.strip() + except Exception as e: + logger.error(f"Error generating keywords: {str(e)}") + return "expertise, knowledge, skills, professional" + +@app.route('/api/optimize_prompt', methods=['POST']) +def optimize_prompt(): + try: + data = request.json + logger.info(f"Received optimization request: {data}") + + # Extract parameters from request + task_description = data.get('taskDescription', '') + base_instruction = data.get('baseInstruction', '') + answer_format = data.get('answerFormat', '') + model = data.get('model', 'Gemini') + mutation_rounds = int(data.get('mutationRounds', 3)) + refine_steps = int(data.get('refineSteps', 2)) + mutate_refine_iterations = int(data.get('mutateRefineIterations', 3)) + refine_task_eg_iterations = int(data.get('refineTaskEgIterations', 3)) + refine_instruction = data.get('refineInstruction', True) + min_correct_count = int(data.get('minCorrectCount', 3)) + max_eval_batches = int(data.get('maxEvalBatches', 6)) + top_n = int(data.get('topN', 1)) + questions_batch_size = int(data.get('questionsBatchSize', 1)) + evaluation_criteria = data.get('evaluationCriteria', []) + advanced_evaluation_metrics = data.get('advancedEvaluationMetrics', []) + dataset = data.get('dataset', 'Custom') + custom_dataset = data.get('customDataset') + use_examples = data.get('useExamples', False) + generate_synthetic_examples = data.get('generateSyntheticExamples', False) + generate_expert_identity = data.get('generateExpertIdentity', True) + generate_intent_keywords = data.get('generateIntentKeywords', False) + style_variation = int(data.get('styleVariation', 5)) + few_shot_count = int(data.get('fewShotCount', 5)) + enable_multimodal = data.get('enableMultimodal', False) + save_session = data.get('saveSession', False) + session_name = data.get('sessionName', '') + + # Log dataset information + if dataset == 'Custom' and custom_dataset: + logger.info(f"Custom dataset provided with {len(custom_dataset)} examples") + else: + logger.info(f"Using predefined dataset: {dataset}") + + # If no evaluation criteria provided, use default ones + if not evaluation_criteria: + evaluation_criteria = ["Clarity", "Accuracy", "Completeness"] + + # Create a simple setup config + setup_config = { + 'llm': { + 'model_type': model, + 'temperature': 0.0 + } + } + + # Initialize parameters + params = PromptOptimizationParams( + task_description=task_description, + base_instruction=base_instruction, + answer_format=answer_format, + max_iterations=mutation_rounds, + evaluation_criteria=evaluation_criteria, + temperature=0.0, + mutate_refine_iterations=mutate_refine_iterations, + refine_task_eg_iterations=refine_task_eg_iterations, + refine_instruction=refine_instruction, + min_correct_count=min_correct_count, + max_eval_batches=max_eval_batches, + top_n=top_n, + questions_batch_size=questions_batch_size, + generate_expert_identity=generate_expert_identity, + generate_intent_keywords=generate_intent_keywords, + style_variation=style_variation, + few_shot_count=few_shot_count, + advanced_evaluation_metrics=advanced_evaluation_metrics + ) + + # Get API key from request or environment + api_key = data.get('apiKey') + if api_key: + # If API key is provided in the request, temporarily set it in the environment + logger.info("Using API key from request") + os.environ["GOOGLE_API_KEY"] = api_key + else: + logger.info("No API key in request, using environment variable if available") + + # Initialize optimizer + optimizer = PromptOptimizer( + setup_config=setup_config, + task_description=task_description + ) + + # Add custom dataset if provided + examples = None + if dataset == 'Custom' and custom_dataset and use_examples: + try: + # Convert custom dataset to the format expected by the optimizer + examples = [] + for item in custom_dataset: + if 'input' in item and 'output' in item: + examples.append({ + 'question': item['input'], + 'answer': item['output'] + }) + logger.info(f"Using {len(examples)} examples from custom dataset") + except Exception as e: + logger.error(f"Error processing custom dataset: {str(e)}") + examples = None + + # Run optimization + optimized_prompt, _ = optimizer.get_best_prompt( + params, + use_examples=use_examples, + run_without_train_examples=(not use_examples), + generate_synthetic_examples=generate_synthetic_examples + ) + + return jsonify({ + 'success': True, + 'optimizedPrompt': optimized_prompt + }) + + except Exception as e: + import traceback + error_traceback = traceback.format_exc() + logger.error(f"Error in optimize_prompt endpoint: {str(e)}\n{error_traceback}") + + # Return a more user-friendly error message + error_message = str(e) + if "not found in environment variables" in error_message: + error_message = "API key not configured. Please check your API key." + + return jsonify({ + 'success': False, + 'error': error_message, + 'details': error_traceback if app.debug else None + }), 500 + +if __name__ == '__main__': + # Set up proper error handling for production + if os.environ.get('FLASK_ENV') == 'production': + app.config['DEBUG'] = False + app.config['PROPAGATE_EXCEPTIONS'] = False + else: + app.config['DEBUG'] = True + app.config['PROPAGATE_EXCEPTIONS'] = True + + # Run the app + # Try port 5000 first, fallback to 5001 if that's in use + try: + app.run(debug=app.config['DEBUG'], port=5000) + except OSError: + print("Port 5000 is in use, trying port 5001...") + app.run(debug=app.config['DEBUG'], port=5001) diff --git a/api/requirements.txt b/api/requirements.txt new file mode 100644 index 00000000..5eea3b5e --- /dev/null +++ b/api/requirements.txt @@ -0,0 +1,14 @@ +flask==2.3.3 +flask-cors==4.0.0 +python-dotenv==1.0.0 +pyyaml==6.0.1 +google-generativeai==0.3.1 +requests==2.31.0 +openai>=1.14.0 +tiktoken==0.5.2 +nltk +datasets==2.16.0 +pyarrow==15.0.2 +llama-index==0.11.10 +llama-index-core==0.11.10 +gunicorn==21.2.0 diff --git a/api/test_api.py b/api/test_api.py new file mode 100644 index 00000000..8b98c021 --- /dev/null +++ b/api/test_api.py @@ -0,0 +1,36 @@ +import requests +import json + +def test_optimize_prompt_api(): + """Test the optimize_prompt API endpoint.""" + url = "http://localhost:5000/api/optimize_prompt" + + # Test data + data = { + "taskDescription": "Test task description", + "baseInstruction": "This is a test prompt that needs optimization.", + "answerFormat": "JSON", + "model": "Gemini", + "mutationRounds": 2, + "refineSteps": 1, + "useExamples": False, + "dataset": "Custom", + "evaluationCriteria": ["Clarity", "Accuracy"] + } + + # Send request + print("Sending request to API...") + response = requests.post(url, json=data) + + # Print results + print(f"Status code: {response.status_code}") + if response.status_code == 200: + result = response.json() + print("Success:", result["success"]) + print("\nOptimized prompt:") + print(result["optimizedPrompt"]) + else: + print("Error:", response.text) + +if __name__ == "__main__": + test_optimize_prompt_api() diff --git a/configs/llm_config.yaml b/configs/llm_config.yaml new file mode 100644 index 00000000..88b67e6e --- /dev/null +++ b/configs/llm_config.yaml @@ -0,0 +1,28 @@ +azure_open_ai: + use_azure_ad: true + api_key: "" + azure_endpoint: "" + api_version: "" + models: + - unique_model_id: "gpt-4" + model_name_in_azure: "gpt-4" + deployment_name_in_azure: "gpt-4" + model_type: "chat" + +gemini: + api_key: "${GOOGLE_API_KEY}" + temperature: 0.0 + max_tokens: 1024 + models: + - unique_model_id: "gemini-flash" + model_name: "gemini-2.0-flash" + model_type: "chat" + - unique_model_id: "gemini-flash-vision" + model_name: "gemini-2.0-flash-vision" + model_type: "multi_modal" + +custom_models: + - unique_model_id: "custom-model-1" + class_name: "CustomLLMClass" + path_to_py_file: "path/to/custom_llm.py" + track_tokens: true diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..ba0ae59e --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,27 @@ +version: '3.8' + +services: + api: + build: + context: ./api + dockerfile: Dockerfile + ports: + - "5000:5000" + environment: + - FLASK_APP=app.py + - FLASK_ENV=production + volumes: + - ./api:/app + restart: unless-stopped + + ui: + build: + context: ./ui + dockerfile: Dockerfile + ports: + - "3000:3000" + environment: + - NEXT_PUBLIC_API_URL=http://api:5000 + depends_on: + - api + restart: unless-stopped diff --git a/images/README.md b/images/README.md new file mode 100644 index 00000000..a10789b4 --- /dev/null +++ b/images/README.md @@ -0,0 +1,110 @@ +# PromptWizard UI and API Images + +This directory contains images for the PromptWizard documentation. Below are descriptions of each image and what they should represent. + +## UI Images + +### promptwizard_banner.png +A banner image for the top of the README with the PromptWizard logo and a visually appealing background. + +### promptwizard_ui_showcase.png +A screenshot of the PromptWizard UI showing the tabbed interface with all the new features. This should be a high-quality screenshot of the actual UI. + +### ui_workflow.png +A flowchart showing the workflow of using the PromptWizard UI, from entering the task description to getting the optimized prompt. + +### ui_screenshot.png +A full screenshot of the main UI page showing the tabbed interface. + +### ui_basic_info.png +A screenshot of the Basic Info tab showing the task description, base instruction, model, and API key fields. + +### ui_data_selection.png +A screenshot of the Data Selection tab showing dataset selection and in-context examples configuration. + +### ui_prompt_config.png +A screenshot of the Prompt Configuration tab showing scenario selection and advanced parameters. + +### ui_evaluation.png +A screenshot of the Evaluation tab showing evaluation criteria and session management. + +### ui_results.png +A screenshot of the results page showing the optimized prompt and performance metrics. + +### feature_comparison.png +A comparison chart showing the features available in different optimization scenarios (Scenario 1, 2, and 3). + +### deployment_options.png +A diagram showing the different deployment options for PromptWizard (local, Docker, cloud). + +### ui_features_diagram.png +A diagram showing the relationships between different features in the UI. + +### tabs_workflow.png +A flowchart showing how users navigate between the different tabs in the UI. + +### optimization_process.png +A diagram showing the prompt optimization process flow from the UI perspective. + +### getting_started.png +An illustration for the Getting Started section showing the setup process. + +### contributing.png +An illustration for the Contributing section showing collaboration. + +### video_thumbnail.png +A thumbnail image for the demo video with a play button overlay. + +### test_values_sequence.png +A sequence diagram showing the flow of the Test Values feature, from clicking the button to displaying results. + +## API Images + +### api_architecture.png +A diagram showing the architecture of the PromptWizard API and its components. + +### api_sequence_diagram.png +A sequence diagram showing the interaction between the UI, API, and LLM services. + +### api_endpoints.png +A diagram showing the different API endpoints and their functions. + +### optimize_prompt_flow.png +A flowchart showing the process flow of the optimize_prompt endpoint. + +### health_endpoint.png +A simple diagram showing the health check endpoint flow. + +## System Architecture Diagrams + +### system_architecture.png +A comprehensive diagram showing the complete system architecture with all components and their interactions. + +### data_flow.png +A diagram showing the data flow between components during the optimization process. + +### sequence_diagram.png +A detailed sequence diagram showing the complete flow from user input to optimized prompt. + +### promptwizard_workflow.png +A diagram illustrating the core workflow of Microsoft's PromptWizard framework. + +## Creating the Images + +You can create these images using: +1. Screenshots of the actual UI +2. Diagram tools like draw.io, Lucidchart, or Mermaid +3. Design tools like Figma, Sketch, or Adobe Illustrator + +Please use a consistent style and color scheme across all images: +- Primary color: #4F46E5 (indigo) +- Secondary color: #8B5CF6 (purple) +- Accent color: #EC4899 (pink) +- Background: White or light gray +- Text: Dark gray or black + +## Image Specifications + +- Format: PNG with transparent background where appropriate +- Resolution: At least 1200px wide +- Aspect ratio: Maintain a consistent aspect ratio across similar types of images diff --git a/images/sequenceDiagram1.txt b/images/sequenceDiagram1.txt new file mode 100644 index 00000000..4a76938a --- /dev/null +++ b/images/sequenceDiagram1.txt @@ -0,0 +1,35 @@ +sequenceDiagram + participant User + participant UI as PromptWizard UI + participant API as Backend API + participant LLM as Language Model (Gemini/GPT) + + Note over User,LLM: Initial Setup Phase + User->>UI: Enter task description + User->>UI: Enter/select base instruction + User->>UI: Set mutation rounds (N) + User->>UI: Set refinement steps (M) + User->>UI: Click "Optimize Prompt" + + UI->>API: Send optimization request + + Note over API,LLM: Optimization Process Begins + + API->>API: Initialize parameters + + loop For each mutation round (1 to N) + Note over API,LLM: Mutation Phase + API->>LLM: Send current prompt for mutation + LLM->>API: Return mutated prompt + + Note over API,LLM: Refinement Phase + loop For each refinement step (1 to M) + API->>LLM: Send mutated prompt for refinement + LLM->>API: Return refined prompt + end + + API->>API: Update current prompt with refined version + end + + API->>UI: Return final optimized prompt + UI->>User: Display optimized prompt \ No newline at end of file diff --git a/images/sequenceDiagram2.txt b/images/sequenceDiagram2.txt new file mode 100644 index 00000000..054e605d --- /dev/null +++ b/images/sequenceDiagram2.txt @@ -0,0 +1,69 @@ +sequenceDiagram + title PromptWizard Test Values Feature Flow + + actor User + participant UI as PromptWizard UI + participant API as Backend API + participant LLM as Gemini API + + User->>+UI: Clicks "Test Values" button + + rect rgb(240, 240, 255) + Note over UI: Form Auto-Population Phase + UI->>UI: Fill task description with
image generation prompt + UI->>UI: Fill base instruction with
Ghibli-style aesthetics + UI->>UI: Set model to Gemini + UI->>UI: Set refine steps to 2 + UI->>UI: Configure optimization parameters + UI->>UI: Set evaluation criteria + end + + UI-->>-User: Display populated form + + User->>UI: Reviews pre-filled values + User->>+UI: Enters API key + UI->>UI: Validate API key + UI-->>-User: Show validation result + + User->>+UI: Clicks "Optimize Prompt" button + + rect rgb(255, 240, 240) + Note over UI,API: Request Processing Phase + UI->>+API: Send optimization request with
pre-filled values + API->>API: Parse and validate request + API->>API: Prepare optimization parameters + end + + rect rgb(240, 255, 240) + Note over API,LLM: Optimization Phase + API->>+LLM: Send request to Gemini API + LLM->>LLM: Generate prompt variations + LLM->>LLM: Perform mutation rounds + LLM->>LLM: Evaluate variations + LLM->>LLM: Refine best prompts + LLM-->>-API: Return optimized prompt + end + + rect rgb(255, 255, 240) + Note over API,UI: Result Processing Phase + API->>API: Process optimization results + API->>API: Calculate performance metrics + API->>-UI: Return optimization results + end + + UI->>UI: Format results for display + UI-->>-User: Display optimized prompt
and performance metrics + + rect rgb(240, 255, 255) + Note over User,UI: Post-Optimization Actions + User->>+UI: Click "Copy to Clipboard" + UI-->>-User: Prompt copied to clipboard + + alt Export Configuration + User->>+UI: Click "Export Config" + UI->>UI: Generate YAML configuration + UI-->>-User: Download configuration file + end + end + + Note over User: Ready to use optimized prompt
in actual applications \ No newline at end of file diff --git a/images/test_values_sequence.txt b/images/test_values_sequence.txt new file mode 100644 index 00000000..e700f264 --- /dev/null +++ b/images/test_values_sequence.txt @@ -0,0 +1,35 @@ +sequenceDiagram + title Test Values Feature Flow + + actor User + participant UI as PromptWizard UI + participant API as Backend API + participant LLM as Gemini API + + User->>UI: Clicks "Test Values" button + + Note over UI: Form is populated with
pre-configured values + + UI->>UI: Fill task description with
image generation prompt + UI->>UI: Fill base instruction with
Ghibli-style aesthetics + UI->>UI: Set model to Gemini + UI->>UI: Configure optimization parameters + + User->>UI: Reviews pre-filled values + User->>UI: Enters API key + User->>UI: Clicks "Optimize Prompt" button + + UI->>API: Send optimization request + + Note over API: Process request and
prepare for optimization + + API->>LLM: Send request to Gemini API + + Note over LLM: Generate variations
Perform mutation rounds
Refine prompt + + LLM->>API: Return optimized prompt + API->>UI: Return optimization results + + UI->>User: Display optimized prompt
and performance metrics + + Note over User: Can export or
copy optimized prompt diff --git a/images/video_thumbnail.gif b/images/video_thumbnail.gif new file mode 100644 index 00000000..52ae20aa Binary files /dev/null and b/images/video_thumbnail.gif differ diff --git a/images/video_thumbnail.png b/images/video_thumbnail.png new file mode 100644 index 00000000..a65abfa0 Binary files /dev/null and b/images/video_thumbnail.png differ diff --git a/install.bat b/install.bat new file mode 100644 index 00000000..2f70828f --- /dev/null +++ b/install.bat @@ -0,0 +1,15 @@ +@echo off +echo Installing PromptWizard UI dependencies... + +echo Installing backend dependencies... +cd api +pip install -r requirements.txt +cd .. + +echo Installing frontend dependencies... +cd ui +npm install +cd .. + +echo Installation complete! +echo Run 'start.bat' to start the application. diff --git a/my_project_anonymizer/.env b/my_project_anonymizer/.env new file mode 100644 index 00000000..139a64d3 --- /dev/null +++ b/my_project_anonymizer/.env @@ -0,0 +1,4 @@ +import os + +MODEL_TYPE="Gemini" +GOOGLE_API_KEY=os.getenv("GOOGLE_API_KEY", "your-gemini-api-key-here") diff --git a/my_project_anonymizer/configs/promptopt_config.yaml b/my_project_anonymizer/configs/promptopt_config.yaml new file mode 100644 index 00000000..7215b226 --- /dev/null +++ b/my_project_anonymizer/configs/promptopt_config.yaml @@ -0,0 +1,50 @@ +prompt_technique_name: "critique_n_refine" +unique_model_id: "gemini-2.0-flash" +mutate_refine_iterations: 3 +mutation_rounds: 3 +refine_instruction: true +refine_task_eg_iterations: 2 +top_n: 3 +min_correct_count: 2 +max_eval_batches: 5 + +# Task Description +task_description: "Optimize a prompt for identifying and returning coordinates of sensitive data in images for anonymization purposes" + +# Initial base instruction +base_instruction: | + Please provide the coordinates for anonymizing any and all sensitive data (such as names, phone numbers, addresses, signatures, bank account details, faces, etc.) in the provided images. + Return the coordinates of rectangles that cover all potential sensitive data in JSON format, structured as follows: + { + "1st_image_name": { + "field1_name": [[x1, y1], [x2, y2]], + "field2_name": [[x3, y3], [x4, y4]], + ... + } + } + Where: + - "field1_name", "field2_name" are descriptive names for the sensitive data fields + - [x1, y1] is the top-left coordinate of the rectangle + - [x2, y2] is the bottom-right coordinate of the rectangle + Ensure you identify ALL sensitive information, including patient data, medical info, and personal identifiers. + ONLY return the JSON structure with no extra text. + +# Answer format specification +answer_format: "JSON format containing image names as keys and nested objects with field names and coordinate pairs" + +# Evaluation criteria +evaluation_criteria: + - "Completeness in identifying all types of sensitive data" + - "Accuracy of JSON structure" + - "Clarity of field naming" + - "Precision of coordinate specifications" + - "Adherence to JSON-only response format" + +# Optional features +use_examples: false +generate_synthetic_examples: false +run_without_train_examples: true +generate_expert_identity: true +generate_intent_keywords: true + + diff --git a/my_project_anonymizer/configs/setup_config.yaml b/my_project_anonymizer/configs/setup_config.yaml new file mode 100644 index 00000000..2c341eda --- /dev/null +++ b/my_project_anonymizer/configs/setup_config.yaml @@ -0,0 +1,12 @@ +llm: + model_type: "Gemini" + model_name: "gemini-2.0-flash" + temperature: 0.0 + max_tokens: 1024 +dir_info: + base_dir: logs + log_dir_name: glue_logs +experiment_name: my_task +mode: offline +description: "My custom prompt optimization task" + diff --git a/my_project_anonymizer/run_optimization.py b/my_project_anonymizer/run_optimization.py new file mode 100644 index 00000000..e7674937 --- /dev/null +++ b/my_project_anonymizer/run_optimization.py @@ -0,0 +1,110 @@ +import os +from dotenv import load_dotenv +import yaml +import google.generativeai as genai +from dataclasses import dataclass + +# Load environment variables from system +load_dotenv() + +@dataclass +class PromptOptimizationParams: + task_description: str + base_instruction: str + answer_format: str + max_iterations: int + evaluation_criteria: list + temperature: float + +class PromptOptimizer: + def __init__(self, setup_config, task_description): + self.setup_config = setup_config + self.task_description = task_description + + # Initialize Gemini + api_key = os.environ.get("GEMINI_API_KEY") + if not api_key or api_key == "your-gemini-api-key": + print("\nERROR: Valid GOOGLE_API_KEY not found in system environment variables.") + print("Please set a valid Gemini API key in your system environment.") + print("You can get an API key from https://ai.google.dev/") + print("\nFor testing purposes, we'll continue with a mock optimization.") + self.use_mock = True + else: + self.use_mock = False + genai.configure(api_key=api_key) + self.model = genai.GenerativeModel('gemini-2.0-flash') + + def optimize_prompt(self, base_prompt, criteria): + if self.use_mock: + # Return a mock optimized prompt for testing + print(f"\nMock optimization iteration:") + print(f"Base prompt: {base_prompt[:50]}...") + print(f"Criteria: {', '.join(criteria)}") + + # Create a simple mock optimization by adding some text + optimized = base_prompt + "\n\nAdditional instructions: Please ensure all sensitive data is properly identified and coordinates are precise to the pixel level." + return optimized + else: + # Use the actual Gemini model + chat = self.model.start_chat(history=[]) + + optimization_prompt = f""" + Task: {self.task_description} + Base prompt: {base_prompt} + Evaluation criteria: {', '.join(criteria)} + + Please optimize this prompt to better meet the evaluation criteria. + Return only the optimized prompt without any explanations. + """ + + response = chat.send_message(optimization_prompt) + return response.text + + def get_best_prompt(self, params): + current_prompt = params.base_instruction + + print(f"\nStarting prompt optimization with {params.max_iterations} iterations") + print(f"Task: {params.task_description}") + + for i in range(params.max_iterations): + print(f"\nIteration {i+1}/{params.max_iterations}:") + optimized_prompt = self.optimize_prompt( + current_prompt, + params.evaluation_criteria + ) + current_prompt = optimized_prompt + + return current_prompt, None + +def main(): + # Load configurations + with open('configs/promptopt_config.yaml', 'r') as f: + prompt_config = yaml.safe_load(f) + + with open('configs/setup_config.yaml', 'r') as f: + setup_config = yaml.safe_load(f) + + # Extract only the needed parameters from the config + params = PromptOptimizationParams( + task_description=prompt_config['task_description'], + base_instruction=prompt_config['base_instruction'], + answer_format=prompt_config['answer_format'], + evaluation_criteria=prompt_config['evaluation_criteria'], + max_iterations=prompt_config.get('mutate_refine_iterations', 3), # Default to 3 if not found + temperature=setup_config['llm'].get('temperature', 0.0) # Default to 0.0 if not found + ) + + # Initialize optimizer + optimizer = PromptOptimizer( + setup_config=setup_config, + task_description=params.task_description + ) + + # Run optimization + best_prompt, _ = optimizer.get_best_prompt(params=params) + + print("\nBest optimized prompt:") + print(best_prompt) + +if __name__ == "__main__": + main() diff --git a/promptwizard.egg-info/PKG-INFO b/promptwizard.egg-info/PKG-INFO new file mode 100644 index 00000000..b1292de6 --- /dev/null +++ b/promptwizard.egg-info/PKG-INFO @@ -0,0 +1,331 @@ +Metadata-Version: 2.4 +Name: promptwizard +Version: 0.2.2 +Summary: Optimize Prompt +Home-page: https://github.com/microsoft/PromptWizard +Author: The PromptWizard team +Author-email: promptwizard@microsoft.com +License: MIT License +Keywords: PromptWizard +Classifier: Intended Audience :: Science/Research +Classifier: Development Status :: 3 - Alpha +Classifier: Programming Language :: Python :: 3 +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Requires-Python: >=3.8.0 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: datasets +Requires-Dist: tiktoken +Requires-Dist: nltk +Requires-Dist: openai +Requires-Dist: azure-identity +Requires-Dist: azure-search-documents +Requires-Dist: pyyaml~=6.0.1 +Requires-Dist: pyarrow==15.0.2 +Requires-Dist: llama-index==0.11.10 +Requires-Dist: llama-index-core==0.11.10 +Requires-Dist: python-dotenv +Provides-Extra: dev +Requires-Dist: datasets; extra == "dev" +Requires-Dist: tiktoken; extra == "dev" +Requires-Dist: nltk; extra == "dev" +Requires-Dist: openai; extra == "dev" +Requires-Dist: azure-identity; extra == "dev" +Requires-Dist: azure-search-documents; extra == "dev" +Requires-Dist: pyyaml~=6.0.1; extra == "dev" +Requires-Dist: pyarrow==15.0.2; extra == "dev" +Requires-Dist: llama-index==0.11.10; extra == "dev" +Requires-Dist: llama-index-core==0.11.10; extra == "dev" +Requires-Dist: python-dotenv; extra == "dev" +Requires-Dist: black==21.4b0; extra == "dev" +Requires-Dist: flake8>=3.8.3; extra == "dev" +Requires-Dist: isort>=5.5.4; extra == "dev" +Requires-Dist: pre-commit; extra == "dev" +Requires-Dist: pytest; extra == "dev" +Requires-Dist: pytest-xdist; extra == "dev" +Provides-Extra: quality +Requires-Dist: black==21.4b0; extra == "quality" +Requires-Dist: flake8>=3.8.3; extra == "quality" +Requires-Dist: isort>=5.5.4; extra == "quality" +Requires-Dist: pre-commit; extra == "quality" +Requires-Dist: pytest; extra == "quality" +Requires-Dist: pytest-xdist; extra == "quality" +Dynamic: author +Dynamic: author-email +Dynamic: classifier +Dynamic: description +Dynamic: description-content-type +Dynamic: home-page +Dynamic: keywords +Dynamic: license +Dynamic: license-file +Dynamic: provides-extra +Dynamic: requires-dist +Dynamic: requires-python +Dynamic: summary + + +# PromptWizard 🧙 + +

+ + + + + + Blog Post + + + + Project Website + +

+ + +> **PromptWizard: Task-Aware Prompt Optimization Framework**
+> Eshaan Agarwal, Joykirat Singh, Vivek Dani, Raghav Magazine, Tanuja Ganu, Akshay Nambi
+ +## Overview 🌟 +

Overview of the PromptWizard framework

+ + +PromptWizard is a discrete prompt optimization framework that employs a self-evolving mechanism where the LLM generates, critiques, and refines its own prompts and examples, continuously improving through iterative feedback and synthesis. This self-adaptive approach ensures holistic optimization by evolving both the instructions and in-context learning examples for better task performance. + +Three key components of PromptWizard are te following : + +- Feedback-driven Refinement: LLM generates, critiques, and refines its own prompts and examples, continuously improving through iterative feedback and synthesis​ +- Critique and Synthesize diverse examples: Generates synthetic examples that are robust, diverse and task-aware. Also it optimizes both prompt and examples in tandem​ +- Self generated Chain of Thought (CoT) steps with combination of positive, negative and synthetic examples + +

Stage 1: Iterative optimization of instructions

+

+ +

+ +

Stage 2: Sequential optimization of instruction and examples

+

+ +

+ +## Installation ⬇️ + +Follow these steps to set up the development environment and install the package: + +1) Clone the repository + ``` + git clone https://github.com/microsoft/PromptWizard + cd PromptWizard + ``` +2) Create and activate a virtual environment + + On Windows + ``` + python -m venv venv + venv\Scripts\activate + ``` + On macOS/Linux: + ``` + python -m venv venv + source venv/bin/activate + ``` +3) Install the package in development mode: + ``` + pip install -e . + ``` + + +## Quickstart 🏃 + +There are three main ways to use PromptWizard: +- Scenario 1 : Optimizing prompts without examples +- Scenario 2 : Generating synthetic examples and using them to optimize prompts +- Scenario 3 : Optimizing prompts with training data + +**NOTE** : Refer this [notebook](demos/scenarios/dataset_scenarios_demo.ipynb) to get a detailed understanding of the usage for each of the scenarios. **This serves as a starting point to understand the usage of PromptWizard** + +#### High level overview of using PromptWizard +- Decide your scenario +- Fix the configuration and environmental varibles for API calling + - Use ```promptopt_config.yaml``` to set configurations. For example for GSM8k this [file](demos/gsm8k/configs/promptopt_config.yaml) can be used + - Use ```.env``` to set environmental varibles. For GSM8k this [file](demos/gsm8k/.env) can be used + ``` + USE_OPENAI_API_KEY="XXXX" + # Replace with True/False based on whether or not to use OPENAI API key + + # If the first variable is set to True then fill the following two + OPENAI_API_KEY="XXXX" + OPENAI_MODEL_NAME ="XXXX" + + # If the first variable is set to False then fill the following three + AZURE_OPENAI_ENDPOINT="XXXXX" + # Replace with your Azure OpenAI Endpoint + + OPENAI_API_VERSION="XXXX" + # Replace with the version of your API + + AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="XXXXX" + # Create a deployment for the model and place the deployment name here. + ``` +- Run the code + - To run PromptWizard on your custom dataset please jump [here](#run-on-custom-dataset) + +#### Running PromptWizard with training data (Scenario 3) +- We support [GSM8k](https://huggingface.co/datasets/openai/gsm8k), [SVAMP](https://huggingface.co/datasets/ChilleD/SVAMP), [AQUARAT](https://huggingface.co/datasets/deepmind/aqua_rat) and [Instruction_Induction(BBII)](https://github.com/xqlin98/INSTINCT/tree/main/Induction/experiments/data/instruction_induction/raw) datasets +- Please note that time taken for prompt optimzation is dependent on the dataset. In our experiments for the above mentioned datasets, it took around 20 - 30 minutes on average. + +#### Running on GSM8k (AQUARAT/SVAMP) + +- Please note that this code requires access to LLMs via API calling for which we support AZURE endpoints or OPENAI keys +- Set the AZURE endpoint configurations in [.env](demos/gsm8k/.env) +- Follow the steps in [demo.ipynb](demos/gsm8k/demo.ipynb) to download the data, run the prompt optimization and carry out inference. + +#### Running on BBII + +- BBII has many datasets in it, based on the dataset set the configs [here](demos/bbh/configs/promptopt_config.yaml) +- In configs ```task_description```,```base_instruction``` and ```answer_format``` need to be changed for different datasets in BBII, the rest of the configs remain the same +- A demo is presented in [demo.ipynb](demos/bbh/demo.ipynb) + + + +## Run on Custom Datasets 🗃️ + +### Create Custom Dataset +- Our code expects the dataset to be in ```.jsonl``` file format +- Both the train and test set follow the same format +- Every sample in the ```.jsonl``` should have 2 fields : + 1) ```question``` : It should contain the complete question that is to asked to the LLM + 2) ```answer``` : It should contain the ground truth answer which can be verbose or concise + + +### Run on Custom Dataset + +NOTE : Refer to [demos](demos) folder for examples of folders for four datasets. The ```.ipynb``` in each of the folders shows how to run PromptWizard on that particular dataset. A similar procedure can be followed for a new dataset. Below is the explanation of each of the components of the ```.ipynb``` and the dataset specifc folder structure in detail + +#### Steps to be followed for custom datasets + +1) Every new dataset needs to have the following + - ```configs``` folder to store files for defining optimization hyperparameters and setup configs + - ```data``` folder to store ```train.jsonl``` and ```test.jsonl``` as curated [here](#create-custom-dataset) (this is done in the notebooks) + - ```.env``` file for environment varibles to be used for API calling + - ```.py/.ipynb``` script to run the code + +2) Set the hyperparameters like number of mutations, refine steps, in-context examples etc. + - Set the following in [promptopt_config.yaml](demos/gsm8k/configs/promptopt_config.yaml) : + - ```task_description``` : Desciption of the task at hand which will be fed into the prompt + - For GSM8k a description like the following can be used + ``` + You are a mathematics expert. You will be given a mathematics problem which you need to solve + ``` + - ```base_instruction``` : Base instruction in line with the dataset + - A commonly used base instruction could be + ``` + Lets think step by step. + ``` + - ```answer_format``` : Instruction for specifying the answer format + - It is crucial to set the ```answer_format``` properly to ensure correct extraction by ```def extract_final_answer()``` + - Answer format could be : + ``` + At the end, wrap only your final option between and tags + ``` + Then in ```def extract_final_answer()``` we can simply write code to extract string between the tags + + - ```seen_set_size``` : The number of train samples to be used for prompt optimization + - In our experiments we set this to be 25. In general any number between 20-50 would work + - ```few_shot_count``` : The number of in-context examples needed in the prompt + - The value can be set to any positive integer based on the requirement + - For generating zero-shot prompts, set the values to a small number (i.e between 2-5) and after the final prompt is generated the in-context examples can be removed. We suggest using some in-context examples as during the optimization process the instructions in the prompt are refined using in-context examples hence setting it to a small number will give better zero-shot instructions in the prompt + - ```generate_reasoning``` : Whether or not to generate reasoning for the in-context examples + - In our experiments we found it to improve the prompt overall as it provides a step-by-step approach to reach the final answer. However if there is a constraint on the prompt length or number of prompt tokens, it can be turned off to get smaller sized prompts + - ```generate_expert_identity``` and ```generate_intent_keywords``` : Having these helped improve the prompt as they help making the prompt relevant to the task + - Refer ```promptopt_config.yaml``` files in folders present [here](demos) for the descriptions used for AQUARAT, SVAMP and GSM8k. For BBII refer [description.py](demos/bbh/description.py) which has the meta instructions for each of the datasets + - Following are the global parameters which can be set based on the availability of the training data + - ```run_without_train_examples``` is a global hyperparameter which can be used when there are no training samples and in-context examples are not required in the final prompt + - ```generate_synthetic_examples``` is a global hyperparameter which can be used when there are no training samples and we want to generate synthetic data for training + - ```use_examples``` is a global hyperparameter which can be used to optimize prompts using training data +3) Create a dataset specific class which inherits ```class DatasetSpecificProcessing``` similar to ```GSM8k(DatasetSpecificProcessing)``` in [demo.ipynb](demos/gsm8k/demo.ipynb) and define the following functions in it + 1) In ```def extract_answer_from_output()``` : This is a dataset specific function, given the ```answer``` from the dataset it should extract and return a concise form of the answer. Note that based on the dataset it can also simply return the ```answer``` as it is like in case of SVAMP and AQUARAT datasets + 2) ```def extract_final_answer()``` : This is a LLM output specific function, given the verbose answer from the LLM it should extract and return the concise final answer + 3) Define ```def access_answer()``` : This function takes an input the LLM output, then does the following: + - Extracts the concise answer using ```def extract_final_answer()``` from the LLM output as defined above + - Evaluates the extracted answer with the ground truth and retuns + - Extracted answer from LLM output + - Boolean value indicating if answer is correct or not + - The evaluation done here is dataset specific, for datasets like GSM8k, SVAMP and AQUARAT which have final answer as an number, we can do a direct match between the numbers generated and the ground truth, while for datasets where the answer is a sentence or paragraph it would be better to do evaluation with llm-as-a-judge, to compare the generated and ground truth paragraph/sentence. An example is available in ```def access_answer()``` in [this](demos/bbh/demo.ipynb) notebook + + +## How PromptWizard Works 🔍 +- Using the problem description and initial prompt instruction, PW generates variations of the instruction by prompting LLMs to mutate it. Based on performance, the best prompt is selected. PW incorporates a critique component that provides feedback, thus guiding and refining the prompt over multiple iterations. +- PW also optimizes in-context examples. PW selects a diverse set of examples +from the training data, identifying positive and negative examples based on their performance with +the modified prompt. Negative examples help inform further prompt refinements. +- Examples and instructions are sequentially optimized, using the critique to generate synthetic examples that address the current prompt’s weaknesses. These examples are integrated to further refine the prompt. +- PW generates detailed reasoning chains via Chain-of-Thought (CoT), enriching the prompt’s capacity for problem-solving. +- PW aligns prompts with human reasoning by integrating task intent and expert +personas, enhancing both model performance and interpretability. + +## Configurations ⚙️ + +Here we define the various hyperparameters used in prompt optimization process found in [promptopt_config.yaml](demos/gsm8k/configs/promptopt_config.yaml) + +- ```mutate_refine_iterations```: Number of iterations for conducting mutation of task description + followed by refinement of instructions +- ```mutation_rounds```: Number of rounds of mutation to be performed when generating different styles +- ```refine_task_eg_iterations```: Number of iterations for refining task description and in context examples +- ```style_variation```: Number of thinking style variations to be used in prompt mutation +- ```questions_batch_size```: Number of questions to be asked to LLM in a single batch, during training step +- ```min_correct_count```: Minimum number of batches of questions to correctly answered, for a prompt to be considered as performing good +- ```max_eval_batches```: Maximum number of mini-batches on which we should evaluate the prompt +- ```top_n```: Number of top best prompts to be considered from scoring stage for the next stage +- ```seen_set_size```: Number of samples from trainset to be used for training +- ```few_shot_count```: Number of in-context examples required in final prompt + +## Best Practices 💡 + +Following are some of best pracitices we followed during are experiments +- Regarding the parameters in [promptopt_config.yaml](demos/gsm8k/configs/promptopt_config.yaml) + - We found the best performing values for ```mutate_refine_iterations```,```mutation_rounds```,```refine_task_eg_iterations``` to be 3 or 5 + - Other parameters have been set to their ideal values. ```seen_set_size``` can be increased to 50 and ```few_shot_count``` can be set based on the use case +- The prompts generated at the end of the training process are usually very detailed, however user supervision can help tune it further for the task at hand +- Trying both configurations of having synthetic in-context examples or in-context examples from the train set can be tried to find the best prompt based on use case. + +## Results 📈 + +

+ +

PromptWizard consistently outperforms other methods across various +thresholds, maintaining the highest p(τ) values, indicating that it consistently performs near the best +possible accuracy across all tasks

+

+ + +- The fiqure shows the performance profile curve for the instruction induction +tasks. The performance profile curve visualizes how frequently +different approaches’ performance is within a given distance of the best performance. In this curve, +the x-axis (τ) represents the performance ratio relative to the best-performing method, and the y-axis +(p(τ )) reflects the fraction of tasks where a method’s performance is within this ratio. So for a given +method, the curve tells what percentage of the tasks are within τ distance to the best performance. + + +## How to contribute: ✋ +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repositories using our CLA. +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact opencode@microsoft.com with any additional questions or comments. + +## Citation 📝 + +If you make use of our work, please cite our paper: + +``` +@misc{agarwal2024promptwizardtaskawarepromptoptimization, + title={PromptWizard: Task-Aware Prompt Optimization Framework}, + author={Eshaan Agarwal and Joykirat Singh and Vivek Dani and Raghav Magazine and Tanuja Ganu and Akshay Nambi}, + year={2024}, + eprint={2405.18369}, + archivePrefix={arXiv}, + primaryClass={cs.CL}, + url={https://arxiv.org/abs/2405.18369}, +} +``` +## Responsible AI Considerations +For guidelines and best practices related to Responsible AI, please refer to our [Responsible AI Guidelines](RESPONSIBLE_AI.md). + diff --git a/promptwizard.egg-info/SOURCES.txt b/promptwizard.egg-info/SOURCES.txt new file mode 100644 index 00000000..a9285b14 --- /dev/null +++ b/promptwizard.egg-info/SOURCES.txt @@ -0,0 +1,13 @@ +LICENSE +README.md +pyproject.toml +setup.cfg +setup.py +./promptwizard/__init__.py +./promptwizard/version.py +promptwizard.egg-info/PKG-INFO +promptwizard.egg-info/SOURCES.txt +promptwizard.egg-info/dependency_links.txt +promptwizard.egg-info/not-zip-safe +promptwizard.egg-info/requires.txt +promptwizard.egg-info/top_level.txt \ No newline at end of file diff --git a/promptwizard.egg-info/dependency_links.txt b/promptwizard.egg-info/dependency_links.txt new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/promptwizard.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/promptwizard.egg-info/not-zip-safe b/promptwizard.egg-info/not-zip-safe new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/promptwizard.egg-info/not-zip-safe @@ -0,0 +1 @@ + diff --git a/promptwizard.egg-info/requires.txt b/promptwizard.egg-info/requires.txt new file mode 100644 index 00000000..3fc7c4d0 --- /dev/null +++ b/promptwizard.egg-info/requires.txt @@ -0,0 +1,38 @@ +datasets +tiktoken +nltk +openai +azure-identity +azure-search-documents +pyyaml~=6.0.1 +pyarrow==15.0.2 +llama-index==0.11.10 +llama-index-core==0.11.10 +python-dotenv + +[dev] +datasets +tiktoken +nltk +openai +azure-identity +azure-search-documents +pyyaml~=6.0.1 +pyarrow==15.0.2 +llama-index==0.11.10 +llama-index-core==0.11.10 +python-dotenv +black==21.4b0 +flake8>=3.8.3 +isort>=5.5.4 +pre-commit +pytest +pytest-xdist + +[quality] +black==21.4b0 +flake8>=3.8.3 +isort>=5.5.4 +pre-commit +pytest +pytest-xdist diff --git a/promptwizard.egg-info/top_level.txt b/promptwizard.egg-info/top_level.txt new file mode 100644 index 00000000..53c80dec --- /dev/null +++ b/promptwizard.egg-info/top_level.txt @@ -0,0 +1 @@ +promptwizard diff --git a/promptwizard/glue/common/llm/llm_mgr.py b/promptwizard/glue/common/llm/llm_mgr.py index c5cec9cd..8a35510f 100644 --- a/promptwizard/glue/common/llm/llm_mgr.py +++ b/promptwizard/glue/common/llm/llm_mgr.py @@ -1,69 +1,80 @@ +import os +import google.generativeai as genai from typing import Dict from llama_index.core.callbacks import CallbackManager, TokenCountingHandler from llama_index.core.llms import ChatMessage from llama_index.core.llms import LLM -from tenacity import retry, stop_after_attempt, wait_fixed, wait_random from ..base_classes import LLMConfig -from ..constants.str_literals import InstallLibs, OAILiterals, \ - OAILiterals, LLMLiterals, LLMOutputTypes -from .llm_helper import get_token_counter +from ..constants.str_literals import InstallLibs, OAILiterals, LLMLiterals, LLMOutputTypes from ..exceptions import GlueLLMException -from ..utils.runtime_tasks import install_lib_if_missing +from ..utils.runtime_tasks import install_lib_if_missing, str_to_class from ..utils.logging import get_glue_logger -from ..utils.runtime_tasks import str_to_class -import os -logger = get_glue_logger(__name__) -def call_api(messages): +logger = get_glue_logger(__name__) +def call_openai_api(messages): from openai import OpenAI from azure.identity import get_bearer_token_provider, AzureCliCredential from openai import AzureOpenAI - if os.environ['USE_OPENAI_API_KEY'] == "True": + if os.environ.get('USE_OPENAI_API_KEY') == "True": client = OpenAI(api_key=os.environ["OPENAI_API_KEY"]) - response = client.chat.completions.create( - model=os.environ["OPENAI_MODEL_NAME"], - messages=messages, - temperature=0.0, + model=os.environ["OPENAI_MODEL_NAME"], + messages=messages, + temperature=0.0, ) else: token_provider = get_bearer_token_provider( - AzureCliCredential(), "https://cognitiveservices.azure.com/.default" - ) + AzureCliCredential(), "https://cognitiveservices.azure.com/.default" + ) client = AzureOpenAI( api_version=os.environ["OPENAI_API_VERSION"], azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], azure_ad_token_provider=token_provider - ) + ) response = client.chat.completions.create( model=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"], messages=messages, temperature=0.0, ) - prediction = response.choices[0].message.content - return prediction - + return response.choices[0].message.content + +def call_gemini_api(messages): + try: + api_key = os.getenv("GOOGLE_API_KEY") + if not api_key: + raise GlueLLMException("GOOGLE_API_KEY environment variable not set") + + genai.configure(api_key=api_key) + model = genai.GenerativeModel('gemini-2.0-flash') + chat = model.start_chat(history=[]) + + # Convert messages to Gemini format and maintain conversation + for message in messages: + if message["role"] in ["system", "user", "assistant"]: + response = chat.send_message(message["content"]) + + return response.text + except Exception as e: + logger.error(f"Error in Gemini API call: {str(e)}") + raise GlueLLMException("Failed to get response from Gemini", e) class LLMMgr: @staticmethod def chat_completion(messages: Dict): - llm_handle = os.environ.get("MODEL_TYPE", "AzureOpenAI") + llm_handle = os.getenv("MODEL_TYPE", "AzureOpenAI") try: - if(llm_handle == "AzureOpenAI"): - # Code to for calling LLMs - return call_api(messages) - elif(llm_handle == "LLamaAML"): - # Code to for calling SLMs - return 0 + if llm_handle == "AzureOpenAI": + return call_openai_api(messages) + elif llm_handle == "Gemini": + return call_gemini_api(messages) + else: + raise GlueLLMException(f"Unsupported model type: {llm_handle}") except Exception as e: - print(e) + logger.error(f"Error in chat completion: {str(e)}") return "Sorry, I am not able to understand your query. Please try again." - # raise GlueLLMException(f"Exception when calling {llm_handle.__class__.__name__} " - # f"LLM in chat mode, with message {messages} ", e) - @staticmethod def get_all_model_ids_of_type(llm_config: LLMConfig, llm_output_type: str): @@ -88,25 +99,25 @@ def get_llm_pool(llm_config: LLMConfig) -> Dict[str, LLM]: which can be used as handle to that LLM """ llm_pool = {} - az_llm_config = llm_config.azure_open_ai - - if az_llm_config: + + # Handle Azure OpenAI configuration + if llm_config.azure_open_ai: install_lib_if_missing(InstallLibs.LLAMA_LLM_AZ_OAI) install_lib_if_missing(InstallLibs.LLAMA_EMB_AZ_OAI) install_lib_if_missing(InstallLibs.LLAMA_MM_LLM_AZ_OAI) install_lib_if_missing(InstallLibs.TIKTOKEN) import tiktoken - # from llama_index.llms.azure_openai import AzureOpenAI from openai import AzureOpenAI from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding from llama_index.multi_modal_llms.azure_openai import AzureOpenAIMultiModal az_token_provider = None - # if az_llm_config.use_azure_ad: from azure.identity import get_bearer_token_provider, AzureCliCredential - az_token_provider = get_bearer_token_provider(AzureCliCredential(), - "https://cognitiveservices.azure.com/.default") + az_token_provider = get_bearer_token_provider( + AzureCliCredential(), + "https://cognitiveservices.azure.com/.default" + ) for azure_oai_model in az_llm_config.azure_oai_models: callback_mgr = None @@ -158,22 +169,61 @@ def get_llm_pool(llm_config: LLMConfig) -> Dict[str, LLM]: max_new_tokens=4096 ) + # Handle Gemini configuration + if hasattr(llm_config, 'gemini') and llm_config.gemini: + try: + install_lib_if_missing("google-generativeai>=0.3.0") + from llama_index.llms.gemini import Gemini + from llama_index.multi_modal_llms.gemini import GeminiMultiModal + + api_key = os.getenv("GOOGLE_API_KEY") + if not api_key: + raise GlueLLMException("GOOGLE_API_KEY environment variable not set") + + # Configure Gemini + gemini_config = llm_config.gemini + for gemini_model in gemini_config.models: + if gemini_model.model_type == LLMOutputTypes.CHAT: + llm_pool[gemini_model.unique_model_id] = Gemini( + api_key=api_key, + model_name=gemini_model.model_name, + temperature=gemini_config.temperature or 0.0, + max_tokens=gemini_config.max_tokens, + ) + elif gemini_model.model_type == LLMOutputTypes.MULTI_MODAL: + llm_pool[gemini_model.unique_model_id] = GeminiMultiModal( + api_key=api_key, + model_name=gemini_model.model_name, + temperature=gemini_config.temperature or 0.0, + max_tokens=gemini_config.max_tokens, + ) + except Exception as e: + logger.error(f"Failed to initialize Gemini models: {str(e)}") + raise GlueLLMException("Failed to initialize Gemini models", e) + + # Handle custom models if llm_config.custom_models: for custom_model in llm_config.custom_models: - # try: - custom_llm_class = str_to_class(custom_model.class_name, None, custom_model.path_to_py_file) - - callback_mgr = None - if custom_model.track_tokens: - # If we need to count number of tokens used in LLM calls - token_counter = TokenCountingHandler( - tokenizer=custom_llm_class.get_tokenizer() + try: + custom_llm_class = str_to_class( + custom_model.class_name, + None, + custom_model.path_to_py_file + ) + callback_mgr = None + if custom_model.track_tokens: + token_counter = TokenCountingHandler( + tokenizer=custom_llm_class.get_tokenizer() ) - callback_mgr = CallbackManager([token_counter]) - token_counter.reset_counts() - llm_pool[custom_model.unique_model_id] = custom_llm_class(callback_manager=callback_mgr) - # except Exception as e: - # raise GlueLLMException(f"Custom model {custom_model.unique_model_id} not loaded.", e) + callback_mgr = CallbackManager([token_counter]) + token_counter.reset_counts() + llm_pool[custom_model.unique_model_id] = custom_llm_class( + callback_manager=callback_mgr + ) + except Exception as e: + logger.error(f"Failed to load custom model {custom_model.unique_model_id}: {str(e)}") + raise GlueLLMException(f"Custom model {custom_model.unique_model_id} not loaded.", e) + return llm_pool @staticmethod diff --git a/start.bat b/start.bat new file mode 100644 index 00000000..8e3a3ef1 --- /dev/null +++ b/start.bat @@ -0,0 +1,12 @@ +@echo off +echo Starting PromptWizard UI... + +echo Starting backend API... +start cmd /k "cd api && python app.py" + +echo Starting frontend... +start cmd /k "cd ui && npm run dev" + +echo PromptWizard UI started! +echo Backend API: http://localhost:5000 +echo Frontend: http://localhost:3000 diff --git a/ui/.gitignore b/ui/.gitignore new file mode 100644 index 00000000..5ef6a520 --- /dev/null +++ b/ui/.gitignore @@ -0,0 +1,41 @@ +# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. + +# dependencies +/node_modules +/.pnp +.pnp.* +.yarn/* +!.yarn/patches +!.yarn/plugins +!.yarn/releases +!.yarn/versions + +# testing +/coverage + +# next.js +/.next/ +/out/ + +# production +/build + +# misc +.DS_Store +*.pem + +# debug +npm-debug.log* +yarn-debug.log* +yarn-error.log* +.pnpm-debug.log* + +# env files (can opt-in for committing if needed) +.env* + +# vercel +.vercel + +# typescript +*.tsbuildinfo +next-env.d.ts diff --git a/ui/Dockerfile b/ui/Dockerfile new file mode 100644 index 00000000..191790fe --- /dev/null +++ b/ui/Dockerfile @@ -0,0 +1,53 @@ +FROM node:18-alpine AS base + +# Install dependencies only when needed +FROM base AS deps +WORKDIR /app + +# Copy package.json and package-lock.json +COPY package.json package-lock.json* ./ + +# Install dependencies +RUN npm ci + +# Rebuild the source code only when needed +FROM base AS builder +WORKDIR /app +COPY --from=deps /app/node_modules ./node_modules +COPY . . + +# Set environment variables +ENV NEXT_TELEMETRY_DISABLED 1 +ENV NODE_ENV production + +# Build the application +RUN npm run build + +# Production image, copy all the files and run next +FROM base AS runner +WORKDIR /app + +ENV NODE_ENV production +ENV NEXT_TELEMETRY_DISABLED 1 + +RUN addgroup --system --gid 1001 nodejs +RUN adduser --system --uid 1001 nextjs + +COPY --from=builder /app/public ./public + +# Set the correct permission for prerender cache +RUN mkdir .next +RUN chown nextjs:nodejs .next + +# Automatically leverage output traces to reduce image size +COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./ +COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static + +USER nextjs + +EXPOSE 3000 + +ENV PORT 3000 +ENV HOSTNAME "0.0.0.0" + +CMD ["node", "server.js"] diff --git a/ui/README.md b/ui/README.md new file mode 100644 index 00000000..a279daae --- /dev/null +++ b/ui/README.md @@ -0,0 +1,134 @@ +# PromptWizard UI 🧙‍♂️✨ + +A modern, user-friendly web interface for the PromptWizard prompt optimization framework. + +

+ PromptWizard UI +

+ +## Features + +

+ UI Features +
+ PromptWizard UI features and their relationships +

+ +### Tabbed Interface +- **Basic Info**: Configure task description, base instruction, answer format, model, and API key +- **Data Selection**: Choose datasets, configure in-context examples, and preview data +- **Prompt Configuration**: Select optimization scenarios and configure advanced parameters +- **Evaluation**: Set evaluation criteria and manage optimization sessions + +

+ Tabs Workflow +
+ The workflow between different tabs in the UI +

+ +### Advanced Features +- **Advanced Optimization Parameters**: Fine-tune the optimization process with parameters like mutate refine iterations, refine task examples iterations, and more +- **Advanced Evaluation Metrics**: Use metrics like Faithfulness, Semantic Similarity, Context Relevancy, and more +- **Dataset Preview**: Visualize and inspect your dataset before optimization +- **Multimodal Support**: Optimize prompts for image-based tasks with image uploads +- **Session Management**: Save and load optimization sessions for later use + +

+ Optimization Process +
+ The prompt optimization process flow +

+ +## Getting Started + +### Prerequisites +- Node.js 18+ and npm +- Running instance of the PromptWizard API + +### Installation + +1. Install dependencies: + ```bash + npm install + ``` + +2. Create a `.env.local` file with the following content: + ``` + NEXT_PUBLIC_API_URL=http://localhost:5000 + ``` + +3. Run the development server: + ```bash + npm run dev + ``` + +4. Open [http://localhost:3000](http://localhost:3000) with your browser to see the application. + +## Usage + +1. **Basic Info Tab**: + - Enter your task description and base instruction + - Specify the answer format (optional) + - Select the model and enter your API key + +2. **Data Selection Tab**: + - Choose a dataset or upload your own + - Configure in-context examples settings + - Preview your dataset (if available) + +3. **Prompt Configuration Tab**: + - Select an optimization scenario + - Configure optimization parameters + - Enable multimodal support if needed + +4. **Evaluation Tab**: + - Select evaluation criteria + - Configure advanced evaluation metrics + - Set up session management + +5. Click "Optimize Prompt" to start the optimization process + +## Deployment + +### Build for Production + +```bash +npm run build +``` + +### Start Production Server + +```bash +npm run start +``` + +### Docker Deployment + +1. Build the Docker image: + ```bash + docker build -t promptwizard-ui . + ``` + +2. Run the container: + ```bash + docker run -p 3000:3000 -e NEXT_PUBLIC_API_URL=http://api:5000 promptwizard-ui + ``` + +## Configuration + +The UI can be configured using environment variables: + +- `NEXT_PUBLIC_API_URL`: URL of the PromptWizard API (default: http://localhost:5000) +- `NEXT_PUBLIC_DEFAULT_MODEL`: Default model to use (default: Gemini) + +## Learn More + +To learn more about the technologies used: + +- [Next.js Documentation](https://nextjs.org/docs) +- [React Documentation](https://reactjs.org/docs) +- [Tailwind CSS Documentation](https://tailwindcss.com/docs) + +## Contributing + +Contributions are welcome! Please feel free to submit a Pull Request. diff --git a/ui/next.config.ts b/ui/next.config.ts new file mode 100644 index 00000000..9decbab0 --- /dev/null +++ b/ui/next.config.ts @@ -0,0 +1,17 @@ +import type { NextConfig } from "next"; + +const nextConfig: NextConfig = { + /* config options here */ + async rewrites() { + return [ + { + source: '/api/:path*', + destination: process.env.NODE_ENV === 'development' + ? 'http://localhost:5000/api/:path*' + : 'https://promptwizard.onrender.com/api/:path*', + }, + ]; + }, +}; + +export default nextConfig; diff --git a/ui/package-lock.json b/ui/package-lock.json new file mode 100644 index 00000000..df156166 --- /dev/null +++ b/ui/package-lock.json @@ -0,0 +1,1733 @@ +{ + "name": "ui", + "version": "0.1.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "ui", + "version": "0.1.0", + "dependencies": { + "next": "15.3.2", + "react": "^19.0.0", + "react-dom": "^19.0.0" + }, + "devDependencies": { + "@tailwindcss/postcss": "^4", + "@types/node": "^20", + "@types/react": "^19", + "@types/react-dom": "^19", + "tailwindcss": "^4", + "typescript": "^5" + } + }, + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", + "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@ampproject/remapping": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", + "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@emnapi/runtime": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.4.3.tgz", + "integrity": "sha512-pBPWdu6MLKROBX05wSNKcNb++m5Er+KQ9QkB+WVM+pW2Kx9hoSrVTnu3BdkI5eBLZoKu/J6mW/B6i6bJB2ytXQ==", + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@img/sharp-darwin-arm64": { + "version": "0.34.1", + "resolved": "https://registry.npmjs.org/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.34.1.tgz", + "integrity": "sha512-pn44xgBtgpEbZsu+lWf2KNb6OAf70X68k+yk69Ic2Xz11zHR/w24/U49XT7AeRwJ0Px+mhALhU5LPci1Aymk7A==", + "cpu": [ + "arm64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-darwin-arm64": "1.1.0" + } + }, + "node_modules/@img/sharp-darwin-x64": { + "version": "0.34.1", + "resolved": "https://registry.npmjs.org/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.34.1.tgz", + "integrity": "sha512-VfuYgG2r8BpYiOUN+BfYeFo69nP/MIwAtSJ7/Zpxc5QF3KS22z8Pvg3FkrSFJBPNQ7mmcUcYQFBmEQp7eu1F8Q==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-darwin-x64": "1.1.0" + } + }, + "node_modules/@img/sharp-libvips-darwin-arm64": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.1.0.tgz", + "integrity": "sha512-HZ/JUmPwrJSoM4DIQPv/BfNh9yrOA8tlBbqbLz4JZ5uew2+o22Ik+tHQJcih7QJuSa0zo5coHTfD5J8inqj9DA==", + "cpu": [ + "arm64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "darwin" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-darwin-x64": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.1.0.tgz", + "integrity": "sha512-Xzc2ToEmHN+hfvsl9wja0RlnXEgpKNmftriQp6XzY/RaSfwD9th+MSh0WQKzUreLKKINb3afirxW7A0fz2YWuQ==", + "cpu": [ + "x64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "darwin" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-arm": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.1.0.tgz", + "integrity": "sha512-s8BAd0lwUIvYCJyRdFqvsj+BJIpDBSxs6ivrOPm/R7piTs5UIwY5OjXrP2bqXC9/moGsyRa37eYWYCOGVXxVrA==", + "cpu": [ + "arm" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-arm64": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.1.0.tgz", + "integrity": "sha512-IVfGJa7gjChDET1dK9SekxFFdflarnUB8PwW8aGwEoF3oAsSDuNUTYS+SKDOyOJxQyDC1aPFMuRYLoDInyV9Ew==", + "cpu": [ + "arm64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-ppc64": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-ppc64/-/sharp-libvips-linux-ppc64-1.1.0.tgz", + "integrity": "sha512-tiXxFZFbhnkWE2LA8oQj7KYR+bWBkiV2nilRldT7bqoEZ4HiDOcePr9wVDAZPi/Id5fT1oY9iGnDq20cwUz8lQ==", + "cpu": [ + "ppc64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-s390x": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.1.0.tgz", + "integrity": "sha512-xukSwvhguw7COyzvmjydRb3x/09+21HykyapcZchiCUkTThEQEOMtBj9UhkaBRLuBrgLFzQ2wbxdeCCJW/jgJA==", + "cpu": [ + "s390x" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-x64": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.1.0.tgz", + "integrity": "sha512-yRj2+reB8iMg9W5sULM3S74jVS7zqSzHG3Ol/twnAAkAhnGQnpjj6e4ayUz7V+FpKypwgs82xbRdYtchTTUB+Q==", + "cpu": [ + "x64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linuxmusl-arm64": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.1.0.tgz", + "integrity": "sha512-jYZdG+whg0MDK+q2COKbYidaqW/WTz0cc1E+tMAusiDygrM4ypmSCjOJPmFTvHHJ8j/6cAGyeDWZOsK06tP33w==", + "cpu": [ + "arm64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linuxmusl-x64": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.1.0.tgz", + "integrity": "sha512-wK7SBdwrAiycjXdkPnGCPLjYb9lD4l6Ze2gSdAGVZrEL05AOUJESWU2lhlC+Ffn5/G+VKuSm6zzbQSzFX/P65A==", + "cpu": [ + "x64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-linux-arm": { + "version": "0.34.1", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm/-/sharp-linux-arm-0.34.1.tgz", + "integrity": "sha512-anKiszvACti2sGy9CirTlNyk7BjjZPiML1jt2ZkTdcvpLU1YH6CXwRAZCA2UmRXnhiIftXQ7+Oh62Ji25W72jA==", + "cpu": [ + "arm" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-arm": "1.1.0" + } + }, + "node_modules/@img/sharp-linux-arm64": { + "version": "0.34.1", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.34.1.tgz", + "integrity": "sha512-kX2c+vbvaXC6vly1RDf/IWNXxrlxLNpBVWkdpRq5Ka7OOKj6nr66etKy2IENf6FtOgklkg9ZdGpEu9kwdlcwOQ==", + "cpu": [ + "arm64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-arm64": "1.1.0" + } + }, + "node_modules/@img/sharp-linux-s390x": { + "version": "0.34.1", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.34.1.tgz", + "integrity": "sha512-7s0KX2tI9mZI2buRipKIw2X1ufdTeaRgwmRabt5bi9chYfhur+/C1OXg3TKg/eag1W+6CCWLVmSauV1owmRPxA==", + "cpu": [ + "s390x" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-s390x": "1.1.0" + } + }, + "node_modules/@img/sharp-linux-x64": { + "version": "0.34.1", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.34.1.tgz", + "integrity": "sha512-wExv7SH9nmoBW3Wr2gvQopX1k8q2g5V5Iag8Zk6AVENsjwd+3adjwxtp3Dcu2QhOXr8W9NusBU6XcQUohBZ5MA==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-x64": "1.1.0" + } + }, + "node_modules/@img/sharp-linuxmusl-arm64": { + "version": "0.34.1", + "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.34.1.tgz", + "integrity": "sha512-DfvyxzHxw4WGdPiTF0SOHnm11Xv4aQexvqhRDAoD00MzHekAj9a/jADXeXYCDFH/DzYruwHbXU7uz+H+nWmSOQ==", + "cpu": [ + "arm64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linuxmusl-arm64": "1.1.0" + } + }, + "node_modules/@img/sharp-linuxmusl-x64": { + "version": "0.34.1", + "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.34.1.tgz", + "integrity": "sha512-pax/kTR407vNb9qaSIiWVnQplPcGU8LRIJpDT5o8PdAx5aAA7AS3X9PS8Isw1/WfqgQorPotjrZL3Pqh6C5EBg==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linuxmusl-x64": "1.1.0" + } + }, + "node_modules/@img/sharp-wasm32": { + "version": "0.34.1", + "resolved": "https://registry.npmjs.org/@img/sharp-wasm32/-/sharp-wasm32-0.34.1.tgz", + "integrity": "sha512-YDybQnYrLQfEpzGOQe7OKcyLUCML4YOXl428gOOzBgN6Gw0rv8dpsJ7PqTHxBnXnwXr8S1mYFSLSa727tpz0xg==", + "cpu": [ + "wasm32" + ], + "license": "Apache-2.0 AND LGPL-3.0-or-later AND MIT", + "optional": true, + "dependencies": { + "@emnapi/runtime": "^1.4.0" + }, + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-win32-ia32": { + "version": "0.34.1", + "resolved": "https://registry.npmjs.org/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.34.1.tgz", + "integrity": "sha512-WKf/NAZITnonBf3U1LfdjoMgNO5JYRSlhovhRhMxXVdvWYveM4kM3L8m35onYIdh75cOMCo1BexgVQcCDzyoWw==", + "cpu": [ + "ia32" + ], + "license": "Apache-2.0 AND LGPL-3.0-or-later", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-win32-x64": { + "version": "0.34.1", + "resolved": "https://registry.npmjs.org/@img/sharp-win32-x64/-/sharp-win32-x64-0.34.1.tgz", + "integrity": "sha512-hw1iIAHpNE8q3uMIRCgGOeDoz9KtFNarFLQclLxr/LK1VBkj8nby18RjFvr6aP7USRYAjTZW6yisnBWMX571Tw==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0 AND LGPL-3.0-or-later", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@isaacs/fs-minipass": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz", + "integrity": "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^7.0.4" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.8", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz", + "integrity": "sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", + "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@next/env": { + "version": "15.3.2", + "resolved": "https://registry.npmjs.org/@next/env/-/env-15.3.2.tgz", + "integrity": "sha512-xURk++7P7qR9JG1jJtLzPzf0qEvqCN0A/T3DXf8IPMKo9/6FfjxtEffRJIIew/bIL4T3C2jLLqBor8B/zVlx6g==", + "license": "MIT" + }, + "node_modules/@next/swc-darwin-arm64": { + "version": "15.3.2", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.3.2.tgz", + "integrity": "sha512-2DR6kY/OGcokbnCsjHpNeQblqCZ85/1j6njYSkzRdpLn5At7OkSdmk7WyAmB9G0k25+VgqVZ/u356OSoQZ3z0g==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-darwin-x64": { + "version": "15.3.2", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.3.2.tgz", + "integrity": "sha512-ro/fdqaZWL6k1S/5CLv1I0DaZfDVJkWNaUU3un8Lg6m0YENWlDulmIWzV96Iou2wEYyEsZq51mwV8+XQXqMp3w==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-arm64-gnu": { + "version": "15.3.2", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.3.2.tgz", + "integrity": "sha512-covwwtZYhlbRWK2HlYX9835qXum4xYZ3E2Mra1mdQ+0ICGoMiw1+nVAn4d9Bo7R3JqSmK1grMq/va+0cdh7bJA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-arm64-musl": { + "version": "15.3.2", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.3.2.tgz", + "integrity": "sha512-KQkMEillvlW5Qk5mtGA/3Yz0/tzpNlSw6/3/ttsV1lNtMuOHcGii3zVeXZyi4EJmmLDKYcTcByV2wVsOhDt/zg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-x64-gnu": { + "version": "15.3.2", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.3.2.tgz", + "integrity": "sha512-uRBo6THWei0chz+Y5j37qzx+BtoDRFIkDzZjlpCItBRXyMPIg079eIkOCl3aqr2tkxL4HFyJ4GHDes7W8HuAUg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-x64-musl": { + "version": "15.3.2", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.3.2.tgz", + "integrity": "sha512-+uxFlPuCNx/T9PdMClOqeE8USKzj8tVz37KflT3Kdbx/LOlZBRI2yxuIcmx1mPNK8DwSOMNCr4ureSet7eyC0w==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-arm64-msvc": { + "version": "15.3.2", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.3.2.tgz", + "integrity": "sha512-LLTKmaI5cfD8dVzh5Vt7+OMo+AIOClEdIU/TSKbXXT2iScUTSxOGoBhfuv+FU8R9MLmrkIL1e2fBMkEEjYAtPQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-x64-msvc": { + "version": "15.3.2", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.3.2.tgz", + "integrity": "sha512-aW5B8wOPioJ4mBdMDXkt5f3j8pUr9W8AnlX0Df35uRWNT1Y6RIybxjnSUe+PhM+M1bwgyY8PHLmXZC6zT1o5tA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@swc/counter": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==", + "license": "Apache-2.0" + }, + "node_modules/@swc/helpers": { + "version": "0.5.15", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.15.tgz", + "integrity": "sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g==", + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.8.0" + } + }, + "node_modules/@tailwindcss/node": { + "version": "4.1.6", + "resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.1.6.tgz", + "integrity": "sha512-ed6zQbgmKsjsVvodAS1q1Ld2BolEuxJOSyyNc+vhkjdmfNUDCmQnlXBfQkHrlzNmslxHsQU/bFmzcEbv4xXsLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@ampproject/remapping": "^2.3.0", + "enhanced-resolve": "^5.18.1", + "jiti": "^2.4.2", + "lightningcss": "1.29.2", + "magic-string": "^0.30.17", + "source-map-js": "^1.2.1", + "tailwindcss": "4.1.6" + } + }, + "node_modules/@tailwindcss/oxide": { + "version": "4.1.6", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.1.6.tgz", + "integrity": "sha512-0bpEBQiGx+227fW4G0fLQ8vuvyy5rsB1YIYNapTq3aRsJ9taF3f5cCaovDjN5pUGKKzcpMrZst/mhNaKAPOHOA==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "dependencies": { + "detect-libc": "^2.0.4", + "tar": "^7.4.3" + }, + "engines": { + "node": ">= 10" + }, + "optionalDependencies": { + "@tailwindcss/oxide-android-arm64": "4.1.6", + "@tailwindcss/oxide-darwin-arm64": "4.1.6", + "@tailwindcss/oxide-darwin-x64": "4.1.6", + "@tailwindcss/oxide-freebsd-x64": "4.1.6", + "@tailwindcss/oxide-linux-arm-gnueabihf": "4.1.6", + "@tailwindcss/oxide-linux-arm64-gnu": "4.1.6", + "@tailwindcss/oxide-linux-arm64-musl": "4.1.6", + "@tailwindcss/oxide-linux-x64-gnu": "4.1.6", + "@tailwindcss/oxide-linux-x64-musl": "4.1.6", + "@tailwindcss/oxide-wasm32-wasi": "4.1.6", + "@tailwindcss/oxide-win32-arm64-msvc": "4.1.6", + "@tailwindcss/oxide-win32-x64-msvc": "4.1.6" + } + }, + "node_modules/@tailwindcss/oxide-android-arm64": { + "version": "4.1.6", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.1.6.tgz", + "integrity": "sha512-VHwwPiwXtdIvOvqT/0/FLH/pizTVu78FOnI9jQo64kSAikFSZT7K4pjyzoDpSMaveJTGyAKvDjuhxJxKfmvjiQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-darwin-arm64": { + "version": "4.1.6", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.1.6.tgz", + "integrity": "sha512-weINOCcqv1HVBIGptNrk7c6lWgSFFiQMcCpKM4tnVi5x8OY2v1FrV76jwLukfT6pL1hyajc06tyVmZFYXoxvhQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-darwin-x64": { + "version": "4.1.6", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.1.6.tgz", + "integrity": "sha512-3FzekhHG0ww1zQjQ1lPoq0wPrAIVXAbUkWdWM8u5BnYFZgb9ja5ejBqyTgjpo5mfy0hFOoMnMuVDI+7CXhXZaQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-freebsd-x64": { + "version": "4.1.6", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.1.6.tgz", + "integrity": "sha512-4m5F5lpkBZhVQJq53oe5XgJ+aFYWdrgkMwViHjRsES3KEu2m1udR21B1I77RUqie0ZYNscFzY1v9aDssMBZ/1w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm-gnueabihf": { + "version": "4.1.6", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.1.6.tgz", + "integrity": "sha512-qU0rHnA9P/ZoaDKouU1oGPxPWzDKtIfX7eOGi5jOWJKdxieUJdVV+CxWZOpDWlYTd4N3sFQvcnVLJWJ1cLP5TA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm64-gnu": { + "version": "4.1.6", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.1.6.tgz", + "integrity": "sha512-jXy3TSTrbfgyd3UxPQeXC3wm8DAgmigzar99Km9Sf6L2OFfn/k+u3VqmpgHQw5QNfCpPe43em6Q7V76Wx7ogIQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm64-musl": { + "version": "4.1.6", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.1.6.tgz", + "integrity": "sha512-8kjivE5xW0qAQ9HX9reVFmZj3t+VmljDLVRJpVBEoTR+3bKMnvC7iLcoSGNIUJGOZy1mLVq7x/gerVg0T+IsYw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-x64-gnu": { + "version": "4.1.6", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.1.6.tgz", + "integrity": "sha512-A4spQhwnWVpjWDLXnOW9PSinO2PTKJQNRmL/aIl2U/O+RARls8doDfs6R41+DAXK0ccacvRyDpR46aVQJJCoCg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-x64-musl": { + "version": "4.1.6", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.1.6.tgz", + "integrity": "sha512-YRee+6ZqdzgiQAHVSLfl3RYmqeeaWVCk796MhXhLQu2kJu2COHBkqlqsqKYx3p8Hmk5pGCQd2jTAoMWWFeyG2A==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi": { + "version": "4.1.6", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.1.6.tgz", + "integrity": "sha512-qAp4ooTYrBQ5pk5jgg54/U1rCJ/9FLYOkkQ/nTE+bVMseMfB6O7J8zb19YTpWuu4UdfRf5zzOrNKfl6T64MNrQ==", + "bundleDependencies": [ + "@napi-rs/wasm-runtime", + "@emnapi/core", + "@emnapi/runtime", + "@tybys/wasm-util", + "@emnapi/wasi-threads", + "tslib" + ], + "cpu": [ + "wasm32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/core": "^1.4.3", + "@emnapi/runtime": "^1.4.3", + "@emnapi/wasi-threads": "^1.0.2", + "@napi-rs/wasm-runtime": "^0.2.9", + "@tybys/wasm-util": "^0.9.0", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@tailwindcss/oxide-win32-arm64-msvc": { + "version": "4.1.6", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.6.tgz", + "integrity": "sha512-nqpDWk0Xr8ELO/nfRUDjk1pc9wDJ3ObeDdNMHLaymc4PJBWj11gdPCWZFKSK2AVKjJQC7J2EfmSmf47GN7OuLg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-win32-x64-msvc": { + "version": "4.1.6", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.1.6.tgz", + "integrity": "sha512-5k9xF33xkfKpo9wCvYcegQ21VwIBU1/qEbYlVukfEIyQbEA47uK8AAwS7NVjNE3vHzcmxMYwd0l6L4pPjjm1rQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/postcss": { + "version": "4.1.6", + "resolved": "https://registry.npmjs.org/@tailwindcss/postcss/-/postcss-4.1.6.tgz", + "integrity": "sha512-ELq+gDMBuRXPJlpE3PEen+1MhnHAQQrh2zF0dI1NXOlEWfr2qWf2CQdr5jl9yANv8RErQaQ2l6nIFO9OSCVq/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@alloc/quick-lru": "^5.2.0", + "@tailwindcss/node": "4.1.6", + "@tailwindcss/oxide": "4.1.6", + "postcss": "^8.4.41", + "tailwindcss": "4.1.6" + } + }, + "node_modules/@types/node": { + "version": "20.17.46", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.17.46.tgz", + "integrity": "sha512-0PQHLhZPWOxGW4auogW0eOQAuNIlCYvibIpG67ja0TOJ6/sehu+1en7sfceUn+QQtx4Rk3GxbLNwPh0Cav7TWw==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.19.2" + } + }, + "node_modules/@types/react": { + "version": "19.1.4", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.1.4.tgz", + "integrity": "sha512-EB1yiiYdvySuIITtD5lhW4yPyJ31RkJkkDw794LaQYrxCSaQV/47y5o1FMC4zF9ZyjUjzJMZwbovEnT5yHTW6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "csstype": "^3.0.2" + } + }, + "node_modules/@types/react-dom": { + "version": "19.1.4", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.1.4.tgz", + "integrity": "sha512-WxYAszDYgsMV31OVyoG4jbAgJI1Gw0Xq9V19zwhy6+hUUJlJIdZ3r/cbdmTqFv++SktQkZ/X+46yGFxp5XJBEg==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "@types/react": "^19.0.0" + } + }, + "node_modules/busboy": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz", + "integrity": "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==", + "dependencies": { + "streamsearch": "^1.1.0" + }, + "engines": { + "node": ">=10.16.0" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001718", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001718.tgz", + "integrity": "sha512-AflseV1ahcSunK53NfEs9gFWgOEmzr0f+kaMFA4xiLZlr9Hzt7HxcSpIFcnNCUkz6R6dWKa54rUz3HUmI3nVcw==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/chownr": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz", + "integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=18" + } + }, + "node_modules/client-only": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", + "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==", + "license": "MIT" + }, + "node_modules/color": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/color/-/color-4.2.3.tgz", + "integrity": "sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A==", + "license": "MIT", + "optional": true, + "dependencies": { + "color-convert": "^2.0.1", + "color-string": "^1.9.0" + }, + "engines": { + "node": ">=12.5.0" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", + "optional": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "license": "MIT", + "optional": true + }, + "node_modules/color-string": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz", + "integrity": "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==", + "license": "MIT", + "optional": true, + "dependencies": { + "color-name": "^1.0.0", + "simple-swizzle": "^0.2.2" + } + }, + "node_modules/csstype": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==", + "dev": true, + "license": "MIT" + }, + "node_modules/detect-libc": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.4.tgz", + "integrity": "sha512-3UDv+G9CsCKO1WKMGw9fwq/SWJYbI0c5Y7LU1AXYoDdbhE2AHQ6N6Nb34sG8Fj7T5APy8qXDCKuuIHd1BR0tVA==", + "devOptional": true, + "license": "Apache-2.0", + "engines": { + "node": ">=8" + } + }, + "node_modules/enhanced-resolve": { + "version": "5.18.1", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.18.1.tgz", + "integrity": "sha512-ZSW3ma5GkcQBIpwZTSRAI8N71Uuwgs93IezB7mf7R60tC8ZbJideoDNKjHn2O9KIlx6rkGTTEk1xUCK2E1Y2Yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/is-arrayish": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", + "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==", + "license": "MIT", + "optional": true + }, + "node_modules/jiti": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-2.4.2.tgz", + "integrity": "sha512-rg9zJN+G4n2nfJl5MW3BMygZX56zKPNVEYYqq7adpmMh4Jn2QNEwhvQlFy6jPVdcod7txZtKHWnyZiA3a0zP7A==", + "dev": true, + "license": "MIT", + "bin": { + "jiti": "lib/jiti-cli.mjs" + } + }, + "node_modules/lightningcss": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.29.2.tgz", + "integrity": "sha512-6b6gd/RUXKaw5keVdSEtqFVdzWnU5jMxTUjA2bVcMNPLwSQ08Sv/UodBVtETLCn7k4S1Ibxwh7k68IwLZPgKaA==", + "dev": true, + "license": "MPL-2.0", + "dependencies": { + "detect-libc": "^2.0.3" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "lightningcss-darwin-arm64": "1.29.2", + "lightningcss-darwin-x64": "1.29.2", + "lightningcss-freebsd-x64": "1.29.2", + "lightningcss-linux-arm-gnueabihf": "1.29.2", + "lightningcss-linux-arm64-gnu": "1.29.2", + "lightningcss-linux-arm64-musl": "1.29.2", + "lightningcss-linux-x64-gnu": "1.29.2", + "lightningcss-linux-x64-musl": "1.29.2", + "lightningcss-win32-arm64-msvc": "1.29.2", + "lightningcss-win32-x64-msvc": "1.29.2" + } + }, + "node_modules/lightningcss-darwin-arm64": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.29.2.tgz", + "integrity": "sha512-cK/eMabSViKn/PG8U/a7aCorpeKLMlK0bQeNHmdb7qUnBkNPnL+oV5DjJUo0kqWsJUapZsM4jCfYItbqBDvlcA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-darwin-x64": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.29.2.tgz", + "integrity": "sha512-j5qYxamyQw4kDXX5hnnCKMf3mLlHvG44f24Qyi2965/Ycz829MYqjrVg2H8BidybHBp9kom4D7DR5VqCKDXS0w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-freebsd-x64": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.29.2.tgz", + "integrity": "sha512-wDk7M2tM78Ii8ek9YjnY8MjV5f5JN2qNVO+/0BAGZRvXKtQrBC4/cn4ssQIpKIPP44YXw6gFdpUF+Ps+RGsCwg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm-gnueabihf": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.29.2.tgz", + "integrity": "sha512-IRUrOrAF2Z+KExdExe3Rz7NSTuuJ2HvCGlMKoquK5pjvo2JY4Rybr+NrKnq0U0hZnx5AnGsuFHjGnNT14w26sg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-gnu": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.29.2.tgz", + "integrity": "sha512-KKCpOlmhdjvUTX/mBuaKemp0oeDIBBLFiU5Fnqxh1/DZ4JPZi4evEH7TKoSBFOSOV3J7iEmmBaw/8dpiUvRKlQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-musl": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.29.2.tgz", + "integrity": "sha512-Q64eM1bPlOOUgxFmoPUefqzY1yV3ctFPE6d/Vt7WzLW4rKTv7MyYNky+FWxRpLkNASTnKQUaiMJ87zNODIrrKQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-gnu": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.29.2.tgz", + "integrity": "sha512-0v6idDCPG6epLXtBH/RPkHvYx74CVziHo6TMYga8O2EiQApnUPZsbR9nFNrg2cgBzk1AYqEd95TlrsL7nYABQg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-musl": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.29.2.tgz", + "integrity": "sha512-rMpz2yawkgGT8RULc5S4WiZopVMOFWjiItBT7aSfDX4NQav6M44rhn5hjtkKzB+wMTRlLLqxkeYEtQ3dd9696w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-arm64-msvc": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.29.2.tgz", + "integrity": "sha512-nL7zRW6evGQqYVu/bKGK+zShyz8OVzsCotFgc7judbt6wnB2KbiKKJwBE4SGoDBQ1O94RjW4asrCjQL4i8Fhbw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-x64-msvc": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.29.2.tgz", + "integrity": "sha512-EdIUW3B2vLuHmv7urfzMI/h2fmlnOQBk1xlsDxkN1tCWKjNFjfLhGxYk8C8mzpSfr+A6jFFIi8fU6LbQGsRWjA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/magic-string": { + "version": "0.30.17", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.17.tgz", + "integrity": "sha512-sNPKHvyjVf7gyjwS4xGTaW/mCnF8wnjtifKBEhxfZ7E/S8tQ0rssrwGNn6q8JH/ohItJfSQp9mBtQYuTlH5QnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0" + } + }, + "node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/minizlib": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.0.2.tgz", + "integrity": "sha512-oG62iEk+CYt5Xj2YqI5Xi9xWUeZhDI8jjQmC5oThVH5JGCTgIjr7ciJDzC7MBzYd//WvR1OTmP5Q38Q8ShQtVA==", + "dev": true, + "license": "MIT", + "dependencies": { + "minipass": "^7.1.2" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/mkdirp": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-3.0.1.tgz", + "integrity": "sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg==", + "dev": true, + "license": "MIT", + "bin": { + "mkdirp": "dist/cjs/src/bin.js" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/next": { + "version": "15.3.2", + "resolved": "https://registry.npmjs.org/next/-/next-15.3.2.tgz", + "integrity": "sha512-CA3BatMyHkxZ48sgOCLdVHjFU36N7TF1HhqAHLFOkV6buwZnvMI84Cug8xD56B9mCuKrqXnLn94417GrZ/jjCQ==", + "license": "MIT", + "dependencies": { + "@next/env": "15.3.2", + "@swc/counter": "0.1.3", + "@swc/helpers": "0.5.15", + "busboy": "1.6.0", + "caniuse-lite": "^1.0.30001579", + "postcss": "8.4.31", + "styled-jsx": "5.1.6" + }, + "bin": { + "next": "dist/bin/next" + }, + "engines": { + "node": "^18.18.0 || ^19.8.0 || >= 20.0.0" + }, + "optionalDependencies": { + "@next/swc-darwin-arm64": "15.3.2", + "@next/swc-darwin-x64": "15.3.2", + "@next/swc-linux-arm64-gnu": "15.3.2", + "@next/swc-linux-arm64-musl": "15.3.2", + "@next/swc-linux-x64-gnu": "15.3.2", + "@next/swc-linux-x64-musl": "15.3.2", + "@next/swc-win32-arm64-msvc": "15.3.2", + "@next/swc-win32-x64-msvc": "15.3.2", + "sharp": "^0.34.1" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.1.0", + "@playwright/test": "^1.41.2", + "babel-plugin-react-compiler": "*", + "react": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", + "react-dom": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", + "sass": "^1.3.0" + }, + "peerDependenciesMeta": { + "@opentelemetry/api": { + "optional": true + }, + "@playwright/test": { + "optional": true + }, + "babel-plugin-react-compiler": { + "optional": true + }, + "sass": { + "optional": true + } + } + }, + "node_modules/next/node_modules/postcss": { + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.6", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" + }, + "node_modules/postcss": { + "version": "8.5.3", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.3.tgz", + "integrity": "sha512-dle9A3yYxlBSrt8Fu+IpjGT8SY8hN0mlaA6GY8t0P5PjIOZemULz/E2Bnm/2dcUOena75OTNkHI76uZBNUUq3A==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.8", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/react": { + "version": "19.1.0", + "resolved": "https://registry.npmjs.org/react/-/react-19.1.0.tgz", + "integrity": "sha512-FS+XFBNvn3GTAWq26joslQgWNoFu08F4kl0J4CgdNKADkdSGXQyTCnKteIAJy96Br6YbpEU1LSzV5dYtjMkMDg==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "19.1.0", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.1.0.tgz", + "integrity": "sha512-Xs1hdnE+DyKgeHJeJznQmYMIBG3TKIHJJT95Q58nHLSrElKlGQqDTR2HQ9fx5CN/Gk6Vh/kupBTDLU11/nDk/g==", + "license": "MIT", + "dependencies": { + "scheduler": "^0.26.0" + }, + "peerDependencies": { + "react": "^19.1.0" + } + }, + "node_modules/scheduler": { + "version": "0.26.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.26.0.tgz", + "integrity": "sha512-NlHwttCI/l5gCPR3D1nNXtWABUmBwvZpEQiD4IXSbIDq8BzLIK/7Ir5gTFSGZDUu37K5cMNp0hFtzO38sC7gWA==", + "license": "MIT" + }, + "node_modules/semver": { + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "license": "ISC", + "optional": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/sharp": { + "version": "0.34.1", + "resolved": "https://registry.npmjs.org/sharp/-/sharp-0.34.1.tgz", + "integrity": "sha512-1j0w61+eVxu7DawFJtnfYcvSv6qPFvfTaqzTQ2BLknVhHTwGS8sc63ZBF4rzkWMBVKybo4S5OBtDdZahh2A1xg==", + "hasInstallScript": true, + "license": "Apache-2.0", + "optional": true, + "dependencies": { + "color": "^4.2.3", + "detect-libc": "^2.0.3", + "semver": "^7.7.1" + }, + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-darwin-arm64": "0.34.1", + "@img/sharp-darwin-x64": "0.34.1", + "@img/sharp-libvips-darwin-arm64": "1.1.0", + "@img/sharp-libvips-darwin-x64": "1.1.0", + "@img/sharp-libvips-linux-arm": "1.1.0", + "@img/sharp-libvips-linux-arm64": "1.1.0", + "@img/sharp-libvips-linux-ppc64": "1.1.0", + "@img/sharp-libvips-linux-s390x": "1.1.0", + "@img/sharp-libvips-linux-x64": "1.1.0", + "@img/sharp-libvips-linuxmusl-arm64": "1.1.0", + "@img/sharp-libvips-linuxmusl-x64": "1.1.0", + "@img/sharp-linux-arm": "0.34.1", + "@img/sharp-linux-arm64": "0.34.1", + "@img/sharp-linux-s390x": "0.34.1", + "@img/sharp-linux-x64": "0.34.1", + "@img/sharp-linuxmusl-arm64": "0.34.1", + "@img/sharp-linuxmusl-x64": "0.34.1", + "@img/sharp-wasm32": "0.34.1", + "@img/sharp-win32-ia32": "0.34.1", + "@img/sharp-win32-x64": "0.34.1" + } + }, + "node_modules/simple-swizzle": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", + "integrity": "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==", + "license": "MIT", + "optional": true, + "dependencies": { + "is-arrayish": "^0.3.1" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/streamsearch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz", + "integrity": "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/styled-jsx": { + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.6.tgz", + "integrity": "sha512-qSVyDTeMotdvQYoHWLNGwRFJHC+i+ZvdBRYosOFgC+Wg1vx4frN2/RG/NA7SYqqvKNLf39P2LSRA2pu6n0XYZA==", + "license": "MIT", + "dependencies": { + "client-only": "0.0.1" + }, + "engines": { + "node": ">= 12.0.0" + }, + "peerDependencies": { + "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0 || ^19.0.0-0" + }, + "peerDependenciesMeta": { + "@babel/core": { + "optional": true + }, + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/tailwindcss": { + "version": "4.1.6", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.6.tgz", + "integrity": "sha512-j0cGLTreM6u4OWzBeLBpycK0WIh8w7kSwcUsQZoGLHZ7xDTdM69lN64AgoIEEwFi0tnhs4wSykUa5YWxAzgFYg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tapable": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", + "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/tar": { + "version": "7.4.3", + "resolved": "https://registry.npmjs.org/tar/-/tar-7.4.3.tgz", + "integrity": "sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==", + "dev": true, + "license": "ISC", + "dependencies": { + "@isaacs/fs-minipass": "^4.0.0", + "chownr": "^3.0.0", + "minipass": "^7.1.2", + "minizlib": "^3.0.1", + "mkdirp": "^3.0.1", + "yallist": "^5.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" + }, + "node_modules/typescript": { + "version": "5.8.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.3.tgz", + "integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "6.19.8", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.19.8.tgz", + "integrity": "sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==", + "dev": true, + "license": "MIT" + }, + "node_modules/yallist": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz", + "integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=18" + } + } + } +} diff --git a/ui/package.json b/ui/package.json new file mode 100644 index 00000000..d288a011 --- /dev/null +++ b/ui/package.json @@ -0,0 +1,26 @@ +{ + "name": "ui", + "version": "0.1.0", + "private": true, + "scripts": { + "dev": "next dev --turbopack", + "build": "next build", + "start": "next start", + "lint": "next lint", + "heroku-postbuild": "next build && next export" + }, + "dependencies": { + "react": "^19.0.0", + "react-dom": "^19.0.0", + "next": "15.3.2", + "serve": "^14.2.1" + }, + "devDependencies": { + "typescript": "^5", + "@types/node": "^20", + "@types/react": "^19", + "@types/react-dom": "^19", + "@tailwindcss/postcss": "^4", + "tailwindcss": "^4" + } +} diff --git a/ui/postcss.config.mjs b/ui/postcss.config.mjs new file mode 100644 index 00000000..c7bcb4b1 --- /dev/null +++ b/ui/postcss.config.mjs @@ -0,0 +1,5 @@ +const config = { + plugins: ["@tailwindcss/postcss"], +}; + +export default config; diff --git a/ui/public/file.svg b/ui/public/file.svg new file mode 100644 index 00000000..004145cd --- /dev/null +++ b/ui/public/file.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ui/public/globe.svg b/ui/public/globe.svg new file mode 100644 index 00000000..567f17b0 --- /dev/null +++ b/ui/public/globe.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ui/public/next.svg b/ui/public/next.svg new file mode 100644 index 00000000..5174b28c --- /dev/null +++ b/ui/public/next.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ui/public/vercel.svg b/ui/public/vercel.svg new file mode 100644 index 00000000..77053960 --- /dev/null +++ b/ui/public/vercel.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ui/public/window.svg b/ui/public/window.svg new file mode 100644 index 00000000..b2b2a44f --- /dev/null +++ b/ui/public/window.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ui/src/app/favicon.ico b/ui/src/app/favicon.ico new file mode 100644 index 00000000..718d6fea Binary files /dev/null and b/ui/src/app/favicon.ico differ diff --git a/ui/src/app/globals.css b/ui/src/app/globals.css new file mode 100644 index 00000000..a2dc41ec --- /dev/null +++ b/ui/src/app/globals.css @@ -0,0 +1,26 @@ +@import "tailwindcss"; + +:root { + --background: #ffffff; + --foreground: #171717; +} + +@theme inline { + --color-background: var(--background); + --color-foreground: var(--foreground); + --font-sans: var(--font-geist-sans); + --font-mono: var(--font-geist-mono); +} + +@media (prefers-color-scheme: dark) { + :root { + --background: #0a0a0a; + --foreground: #ededed; + } +} + +body { + background: var(--background); + color: var(--foreground); + font-family: Arial, Helvetica, sans-serif; +} diff --git a/ui/src/app/layout.tsx b/ui/src/app/layout.tsx new file mode 100644 index 00000000..abc9f889 --- /dev/null +++ b/ui/src/app/layout.tsx @@ -0,0 +1,34 @@ +import type { Metadata } from "next"; +import { Geist, Geist_Mono } from "next/font/google"; +import "./globals.css"; + +const geistSans = Geist({ + variable: "--font-geist-sans", + subsets: ["latin"], +}); + +const geistMono = Geist_Mono({ + variable: "--font-geist-mono", + subsets: ["latin"], +}); + +export const metadata: Metadata = { + title: "PromptWizard UI", + description: "A user interface for the PromptWizard prompt optimization framework", +}; + +export default function RootLayout({ + children, +}: Readonly<{ + children: React.ReactNode; +}>) { + return ( + + + {children} + + + ); +} diff --git a/ui/src/app/page.js b/ui/src/app/page.js new file mode 100644 index 00000000..cf3cd89f --- /dev/null +++ b/ui/src/app/page.js @@ -0,0 +1,95 @@ +'use client' + +import { useState } from 'react'; +import './globals.css' +import PromptForm from '../components/PromptForm'; + +export default function Home() { + const [optimizedPrompt, setOptimizedPrompt] = useState(''); + const [copySuccess, setCopySuccess] = useState(false); + + const handleCopyToClipboard = async () => { + try { + await navigator.clipboard.writeText(optimizedPrompt); + setCopySuccess(true); + setTimeout(() => { + setCopySuccess(false); + }, 2000); // Reset after 2 seconds + } catch (err) { + console.error('Failed to copy: ', err); + setCopySuccess(false); + } + }; + + return ( +
+
+

PromptWizard UI

+
+ +
+
+ +
+ +
+
+

Optimized Prompt:

+ {optimizedPrompt && ( +
+ {optimizedPrompt.includes("Additional instructions: Please ensure all responses are clear, concise") && ( +
+ Mock Response (No API Key) +
+ )} + + +
+ )} +
+
+            {optimizedPrompt || 'Your optimized prompt will appear here'}
+          
+ + {optimizedPrompt && optimizedPrompt.includes("Additional instructions: Please ensure all responses are clear, concise") && ( +
+

⚠️ This is a mock response

+

The system is using a mock implementation because it couldn't access the Gemini API. To get real optimized prompts:

+
    +
  1. Make sure you've entered a valid Gemini API key
  2. +
  3. Click the "Validate" button next to the API key field
  4. +
  5. Check that the API Key status shows as "Valid"
  6. +
+

You can get a Gemini API key from https://ai.google.dev/

+
+ )} +
+
+
+ ); +} diff --git a/ui/src/components/AdvancedEvaluationMetrics.js b/ui/src/components/AdvancedEvaluationMetrics.js new file mode 100644 index 00000000..d95a42ed --- /dev/null +++ b/ui/src/components/AdvancedEvaluationMetrics.js @@ -0,0 +1,53 @@ +// AdvancedEvaluationMetrics.js +import React from 'react'; +import Tooltip from './Tooltip'; + +/** + * Advanced Evaluation Metrics component + * @param {Object} props - Component props + * @param {Object} props.formData - Form data state + * @param {Function} props.handleAdvancedMetricsChange - Function to handle metrics changes + * @param {Object} props.tooltipDefinitions - Tooltip definitions + * @returns {JSX.Element} Advanced Evaluation Metrics component + */ +const AdvancedEvaluationMetrics = ({ formData, handleAdvancedMetricsChange, tooltipDefinitions }) => { + const metrics = [ + 'Faithfulness', + 'SemanticSimilarity', + 'ContextRelevancy', + 'HitRate', + 'MRR', + 'NDCG' + ]; + + return ( +
+ +
+ {metrics.map(metric => ( + + ))} +
+

+ These advanced metrics provide more comprehensive evaluation of your prompts beyond basic criteria. +

+
+ ); +}; + +export default AdvancedEvaluationMetrics; diff --git a/ui/src/components/AdvancedOptimizationParams.js b/ui/src/components/AdvancedOptimizationParams.js new file mode 100644 index 00000000..124f1c97 --- /dev/null +++ b/ui/src/components/AdvancedOptimizationParams.js @@ -0,0 +1,146 @@ +// AdvancedOptimizationParams.js +import React from 'react'; +import Tooltip from './Tooltip'; + +/** + * Advanced Optimization Parameters component + * @param {Object} props - Component props + * @param {Object} props.formData - Form data state + * @param {Function} props.handleChange - Function to handle form changes + * @returns {JSX.Element} Advanced Optimization Parameters component + */ +const AdvancedOptimizationParams = ({ formData, handleChange, tooltipDefinitions }) => { + return ( +
+

Advanced Optimization Parameters

+ +
+ {/* Mutate Refine Iterations */} +
+ + +
+ + {/* Refine Task Examples Iterations */} +
+ + +
+ + {/* Min Correct Count */} +
+ + +
+ + {/* Max Eval Batches */} +
+ + +
+ + {/* Top N */} +
+ + +
+ + {/* Questions Batch Size */} +
+ + +
+
+ + {/* Refine Instruction Checkbox */} +
+ +
+
+ ); +}; + +export default AdvancedOptimizationParams; diff --git a/ui/src/components/DatasetPreview.js b/ui/src/components/DatasetPreview.js new file mode 100644 index 00000000..3855cf75 --- /dev/null +++ b/ui/src/components/DatasetPreview.js @@ -0,0 +1,91 @@ +// DatasetPreview.js +import React, { useState } from 'react'; + +/** + * Dataset Preview component + * @param {Object} props - Component props + * @param {Object} props.dataset - Dataset to preview + * @returns {JSX.Element} Dataset Preview component + */ +const DatasetPreview = ({ dataset }) => { + const [currentPage, setCurrentPage] = useState(1); + const itemsPerPage = 5; + + if (!dataset || dataset.length === 0) { + return ( +
+

Dataset Preview

+

No dataset available for preview.

+
+ ); + } + + // Calculate pagination + const totalPages = Math.ceil(dataset.length / itemsPerPage); + const startIndex = (currentPage - 1) * itemsPerPage; + const endIndex = Math.min(startIndex + itemsPerPage, dataset.length); + const currentItems = dataset.slice(startIndex, endIndex); + + return ( +
+

Dataset Preview

+

+ Showing {startIndex + 1}-{endIndex} of {dataset.length} examples +

+ +
+ + + + + + + + + + {currentItems.map((item, index) => ( + + + + + + ))} + +
#InputOutput
{startIndex + index + 1} +
+ {item.input || (item.question ? item.question : 'N/A')} +
+
+
+ {item.output || (item.answer ? item.answer : 'N/A')} +
+
+
+ + {/* Pagination */} + {totalPages > 1 && ( +
+ + + Page {currentPage} of {totalPages} + + +
+ )} +
+ ); +}; + +export default DatasetPreview; diff --git a/ui/src/components/MultimodalSupport.js b/ui/src/components/MultimodalSupport.js new file mode 100644 index 00000000..9c539d29 --- /dev/null +++ b/ui/src/components/MultimodalSupport.js @@ -0,0 +1,108 @@ +// MultimodalSupport.js +import React, { useState } from 'react'; +import Tooltip from './Tooltip'; + +/** + * Multimodal Support component + * @param {Object} props - Component props + * @param {Object} props.formData - Form data state + * @param {Function} props.handleChange - Function to handle form changes + * @param {Object} props.tooltipDefinitions - Tooltip definitions + * @returns {JSX.Element} Multimodal Support component + */ +const MultimodalSupport = ({ formData, handleChange, tooltipDefinitions }) => { + const [imagePreview, setImagePreview] = useState(null); + + const handleImageUpload = (e) => { + const file = e.target.files[0]; + if (file) { + const reader = new FileReader(); + reader.onload = (event) => { + setImagePreview(event.target.result); + }; + reader.readAsDataURL(file); + } + }; + + return ( +
+
+ + Beta Feature +
+ + {formData.enableMultimodal && ( +
+

+ Upload an image to optimize prompts for image-based tasks. This allows the model to understand visual context. +

+ +
+
+ +
+ +
+
+ +
+ {imagePreview ? ( +
+

Image Preview:

+ Preview +
+ ) : ( +
+

No image selected

+

Supported formats: JPG, PNG, GIF

+
+ )} +
+
+ +
+ + +
+
+ )} +
+ ); +}; + +export default MultimodalSupport; diff --git a/ui/src/components/PromptForm.js b/ui/src/components/PromptForm.js new file mode 100644 index 00000000..fedd9db1 --- /dev/null +++ b/ui/src/components/PromptForm.js @@ -0,0 +1,1159 @@ +// PromptForm.js +import { useState, useEffect } from 'react'; +import { exportConfigAsYaml } from '../utils/exportConfig'; +import Tooltip from './Tooltip'; +import { tooltipDefinitions } from '../utils/tooltipDefinitions'; +import AdvancedOptimizationParams from './AdvancedOptimizationParams'; +import AdvancedEvaluationMetrics from './AdvancedEvaluationMetrics'; +import DatasetPreview from './DatasetPreview'; +import MultimodalSupport from './MultimodalSupport'; +import SessionManagement from './SessionManagement'; +import TabContainer from './TabContainer'; + +const PromptForm = ({ setOptimizedPrompt, optimizedPrompt }) => { + const [formData, setFormData] = useState({ + taskDescription: '', + baseInstruction: '', + answerFormat: '', + model: 'Gemini', + apiKey: '', + dataset: 'Custom', + customDataset: null, + mutationRounds: 3, + refineSteps: 2, + mutateRefineIterations: 3, + refineTaskEgIterations: 3, + refineInstruction: true, + minCorrectCount: 3, + maxEvalBatches: 6, + topN: 1, + questionsBatchSize: 1, + useExamples: false, + generateSyntheticExamples: false, + generateExpertIdentity: true, + generateIntentKeywords: false, + styleVariation: 5, + fewShotCount: 5, + evaluationCriteria: [], + advancedEvaluationMetrics: [], + showDatasetPreview: false, + enableMultimodal: false, + saveSession: false, + sessionName: '', + }); + + const [isLoading, setIsLoading] = useState(false); + const [isValidatingKey, setIsValidatingKey] = useState(false); + const [error, setError] = useState(''); + const [customDatasetName, setCustomDatasetName] = useState(''); + const [apiStatus, setApiStatus] = useState({ checked: false, available: false }); + const [lastApiCheckTime, setLastApiCheckTime] = useState(0); + const [apiKeyStatus, setApiKeyStatus] = useState({ + checked: false, + valid: false, + message: '' + }); + + const handleChange = (e) => { + const { id, value, type, checked } = e.target; + + // Create a copy of the current form data + const updatedFormData = { ...formData }; + + // Update the field that changed + updatedFormData[id] = type === 'checkbox' ? checked : value; + + // Handle field dependencies based on the three scenarios: + // Scenario 1: No training data, no in-context examples (useExamples=false, generateSyntheticExamples=false) + // Scenario 2: No training data, with in-context examples using synthetic examples (useExamples=true, generateSyntheticExamples=true) + // Scenario 3: With training data, with in-context examples (useExamples=true, generateSyntheticExamples=false) + + if (id === 'generateSyntheticExamples') { + if (checked) { + // Scenario 2: If Generate Synthetic Examples is checked: + // 1. Automatically enable Use In-Context Examples since synthetic examples are used as in-context examples + updatedFormData.useExamples = true; + // 2. No dataset is required since synthetic examples are generated by the LLM + } + // If unchecked, we don't change useExamples as the user might want Scenario 3 + } + + if (id === 'useExamples') { + if (!checked) { + // Scenario 1: If Use In-Context Examples is unchecked: + // 1. Automatically uncheck Generate Synthetic Examples since synthetic examples are only used as in-context examples + updatedFormData.generateSyntheticExamples = false; + } + // If checked, we don't change generateSyntheticExamples as the user might want either Scenario 2 or 3 + } + + // If dataset is changed to something other than Custom, clear customDataset + if (id === 'dataset' && value !== 'Custom') { + updatedFormData.customDataset = null; + setCustomDatasetName(''); + } + + // Update the form data + setFormData(updatedFormData); + }; + + // Check if the API is available + const checkApiHealth = async () => { + try { + // Update the last check time + setLastApiCheckTime(Date.now()); + + // Try the default API path first + let response = await fetch('/api/health'); + + if (response.ok) { + const data = await response.json(); + setApiStatus({ checked: true, available: data.success }); + return true; + } + + // If that fails, try direct connection to port 5000 + try { + console.log('Trying direct connection to port 5000...'); + response = await fetch('http://localhost:5000/api/health'); + + if (response.ok) { + const data = await response.json(); + setApiStatus({ checked: true, available: data.success }); + console.log('Connected directly to port 5000'); + return true; + } + } catch (directErr) { + console.log('Failed to connect directly to port 5000'); + } + + // If that fails too, try port 5001 + try { + console.log('Trying direct connection to port 5001...'); + response = await fetch('http://localhost:5001/api/health'); + + if (response.ok) { + const data = await response.json(); + setApiStatus({ checked: true, available: data.success }); + console.log('Connected directly to port 5001'); + return true; + } + } catch (directErr) { + console.log('Failed to connect directly to port 5001'); + } + + // If all attempts fail, set status to unavailable + setApiStatus({ checked: true, available: false }); + return false; + } catch (err) { + console.error('API health check failed:', err); + setApiStatus({ checked: true, available: false }); + return false; + } + }; + + // Validate the API key - optimized to avoid unnecessary API health checks + const validateApiKey = async () => { + if (!formData.apiKey) { + setApiKeyStatus({ + checked: true, + valid: false, + message: 'Please enter an API key' + }); + return false; + } + + setIsValidatingKey(true); + setError(''); + + try { + // Only check API health if we haven't checked recently or if the last check failed + if (!apiStatus.checked || !apiStatus.available || Date.now() - lastApiCheckTime > 30000) { + const isApiAvailable = await checkApiHealth(); + if (!isApiAvailable) { + setApiKeyStatus({ + checked: true, + valid: false, + message: 'API server is not available' + }); + return false; + } + } else if (!apiStatus.available) { + // If we already know the API is unavailable, don't try to validate + setApiKeyStatus({ + checked: true, + valid: false, + message: 'API server is not available' + }); + return false; + } + + // Send validation request + const response = await fetch('/api/validate_key', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + apiKey: formData.apiKey, + model: formData.model + }), + }); + + const data = await response.json(); + + setApiKeyStatus({ + checked: true, + valid: data.valid, + message: data.message + }); + + return data.valid; + } catch (err) { + console.error('API key validation failed:', err); + setApiKeyStatus({ + checked: true, + valid: false, + message: err.message || 'Failed to validate API key' + }); + return false; + } finally { + setIsValidatingKey(false); + } + }; + + // Check API health when component mounts + useEffect(() => { + checkApiHealth(); + }, []); + + const handleFileUpload = (e) => { + const file = e.target.files[0]; + if (file) { + setCustomDatasetName(file.name); + + // Read the file + const reader = new FileReader(); + reader.onload = (event) => { + try { + // Try to parse as JSON + const jsonData = JSON.parse(event.target.result); + setFormData(prev => ({ + ...prev, + customDataset: jsonData + })); + setError(''); + } catch (err) { + console.error('Error parsing file:', err); + setError('Invalid file format. Please upload a valid JSON file.'); + setFormData(prev => ({ + ...prev, + customDataset: null + })); + setCustomDatasetName(''); + } + }; + reader.readAsText(file); + } + }; + + const handleEvaluationCriteriaChange = (e) => { + const { value, checked } = e.target; + setFormData({ + ...formData, + evaluationCriteria: checked + ? [...formData.evaluationCriteria, value] + : formData.evaluationCriteria.filter(criteria => criteria !== value) + }); + }; + + const handleAdvancedMetricsChange = (e) => { + const { value, checked } = e.target; + setFormData({ + ...formData, + advancedEvaluationMetrics: checked + ? [...formData.advancedEvaluationMetrics, value] + : formData.advancedEvaluationMetrics.filter(metric => metric !== value) + }); + }; + + // Effect to update the checked state of evaluation criteria checkboxes when evaluationCriteria changes + useEffect(() => { + // This will ensure the checkboxes reflect the current state of evaluationCriteria + // Especially important when setting evaluationCriteria programmatically + }, [formData.evaluationCriteria]); + + // Validate form data - optimized for speed + const validateForm = () => { + // Fast path: Check required fields directly without array iteration + if (!formData.taskDescription) { + return { valid: false, message: 'Task Description is required' }; + } + + if (!formData.baseInstruction) { + return { valid: false, message: 'Base Instruction is required' }; + } + + if (!formData.apiKey) { + return { valid: false, message: 'API Key is required' }; + } + + // Check numeric fields + if (!formData.mutationRounds || formData.mutationRounds < 1) { + return { valid: false, message: 'Mutation Rounds must be at least 1' }; + } + + if (!formData.refineSteps || formData.refineSteps < 1) { + return { valid: false, message: 'Refine Steps must be at least 1' }; + } + + // Check dependencies based on the three scenarios: + // Scenario 1: No training data, no in-context examples (useExamples=false, generateSyntheticExamples=false) + // Scenario 2: No training data, with in-context examples using synthetic examples (useExamples=true, generateSyntheticExamples=true) + // Scenario 3: With training data, with in-context examples (useExamples=true, generateSyntheticExamples=false) + + if (formData.useExamples) { + // If using in-context examples... + + if (!formData.generateSyntheticExamples) { + // Scenario 3: Using real examples from dataset + // Dataset is required and must be valid + if (formData.dataset === 'Custom' && !formData.customDataset) { + return { valid: false, message: 'Custom dataset is required when using in-context examples without synthetic examples' }; + } + } + // Scenario 2: Using synthetic examples + // Dataset is optional and can be used for evaluation + } + + // Validate that generateSyntheticExamples requires useExamples + if (formData.generateSyntheticExamples && !formData.useExamples) { + return { valid: false, message: 'Use In-Context Examples must be enabled when generating synthetic examples' }; + } + + return { valid: true }; + }; + + const handleOptimizePrompt = async (e) => { + e.preventDefault(); + + // Validate form fields first (this is fast and synchronous) + const validation = validateForm(); + if (!validation.valid) { + setError(validation.message); + return; + } + + setIsLoading(true); + setError(''); + + try { + // Only check API health if we haven't checked recently or if the last check failed + // This avoids unnecessary network requests + if (!apiStatus.checked || !apiStatus.available || Date.now() - lastApiCheckTime > 30000) { + const isApiAvailable = await checkApiHealth(); + if (!isApiAvailable) { + throw new Error('API server is not available. Please make sure the backend is running.'); + } + } + + // Only validate API key if it hasn't been validated yet or if it was invalid + // This avoids unnecessary network requests + if (!apiKeyStatus.checked || !apiKeyStatus.valid) { + const isKeyValid = await validateApiKey(); + if (!isKeyValid) { + throw new Error(`Invalid API key: ${apiKeyStatus.message}`); + } + } + + // Prepare data for API + const apiData = { + taskDescription: formData.taskDescription, + baseInstruction: formData.baseInstruction, + answerFormat: formData.answerFormat, + model: formData.model, + mutationRounds: formData.mutationRounds, + refineSteps: formData.refineSteps, + mutateRefineIterations: formData.mutateRefineIterations, + refineTaskEgIterations: formData.refineTaskEgIterations, + refineInstruction: formData.refineInstruction, + minCorrectCount: formData.minCorrectCount, + maxEvalBatches: formData.maxEvalBatches, + topN: formData.topN, + questionsBatchSize: formData.questionsBatchSize, + useExamples: formData.useExamples, + generateSyntheticExamples: formData.generateSyntheticExamples, + generateExpertIdentity: formData.generateExpertIdentity, + generateIntentKeywords: formData.generateIntentKeywords, + styleVariation: formData.styleVariation, + fewShotCount: formData.fewShotCount, + dataset: formData.dataset, + evaluationCriteria: formData.evaluationCriteria, + advancedEvaluationMetrics: formData.advancedEvaluationMetrics, + enableMultimodal: formData.enableMultimodal, + saveSession: formData.saveSession, + sessionName: formData.sessionName, + apiKey: formData.apiKey + }; + + // Add custom dataset if available + if (formData.dataset === 'Custom' && formData.customDataset) { + apiData.customDataset = formData.customDataset; + } + + console.log('Sending data to API:', apiData); + + // Always use the relative URL path which will be handled by Next.js rewrites + const apiUrl = '/api/optimize_prompt'; + + console.log('Sending request to API at:', apiUrl); + + const response = await fetch(apiUrl, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(apiData), + }); + + // Check if response is ok before trying to parse JSON + if (!response.ok) { + // Try to parse as JSON first + let errorMessage = `API responded with status: ${response.status}`; + try { + const errorData = await response.json(); + if (errorData && errorData.error) { + errorMessage = errorData.error; + } + } catch (jsonError) { + // If JSON parsing fails, try to get text content + try { + const textContent = await response.text(); + if (textContent && textContent.length < 100) { + errorMessage = textContent; + } + } catch (textError) { + console.error('Failed to parse error response as text:', textError); + } + } + throw new Error(errorMessage); + } + + // Now we know the response is OK, parse the JSON + let data; + try { + data = await response.json(); + } catch (jsonError) { + throw new Error('Invalid JSON response from server'); + } + + if (data.success) { + setOptimizedPrompt(data.optimizedPrompt); + } else { + throw new Error(data.error || 'Unknown error occurred'); + } + } catch (err) { + console.error('Error optimizing prompt:', err); + + // Provide more helpful error message + let errorMessage = `Failed to optimize prompt: ${err.message}`; + if (err.message === 'Failed to fetch') { + errorMessage = 'Failed to connect to the API server. Please make sure the backend is running and try again.'; + } + setError(errorMessage); + + // Always use mock response for demo purposes when there's an error + const mockResponse = { + optimizedPrompt: `[MOCK RESPONSE] Optimized prompt for: "${formData.taskDescription}"\n\n${formData.baseInstruction}\n\nAdditional instructions: Please ensure all responses follow the format: ${formData.answerFormat}\n\nThis prompt has been optimized using ${formData.mutationRounds} mutation rounds and ${formData.refineSteps} refine steps.\n\n(Note: This is a mock response because the API server could not be reached.)` + }; + + // Only show the mock response if the user confirms + if (window.confirm('API server error. Would you like to see a mock response instead?')) { + setOptimizedPrompt(mockResponse.optimizedPrompt); + } + } finally { + setIsLoading(false); + } + }; + + // Function to fill test values + const fillTestValues = () => { + setFormData({ + ...formData, + taskDescription: "write image description to give to any image gen model", + baseInstruction: "cute and young punjabi couples taking fere, ghibli style", + refineSteps: 4, + mutateRefineIterations: 3, + refineTaskEgIterations: 3, + refineInstruction: true, + minCorrectCount: 3, + maxEvalBatches: 6, + topN: 1, + questionsBatchSize: 1, + useExamples: true, // Enable this since generateSyntheticExamples requires it + generateSyntheticExamples: true, // With synthetic examples, we don't need a dataset + generateExpertIdentity: true, + generateIntentKeywords: true, + styleVariation: 3, + evaluationCriteria: ["Clarity", "Completeness", "Relevance"], + advancedEvaluationMetrics: ["Faithfulness", "SemanticSimilarity"], + enableMultimodal: true, + // Keep the dataset as is, but it will be disabled in the UI when synthetic examples are enabled + }); + + // Clear any custom dataset since we're using synthetic examples + if (formData.dataset === 'Custom') { + setCustomDatasetName(''); + } + }; + + // Define tab content components + const renderBasicInfoTab = () => ( +
+
+ +