diff --git a/.claude-plugin/marketplace.json b/.claude-plugin/marketplace.json index 7d84368d..2b3c495d 100644 --- a/.claude-plugin/marketplace.json +++ b/.claude-plugin/marketplace.json @@ -4,13 +4,13 @@ "name": "FutureSearch" }, "metadata": { - "description": "AI-powered data processing plugins from FutureSearch" + "description": "everyrow plugins from FutureSearch" }, "plugins": [ { "name": "everyrow", "source": "./", - "description": "Claude Code plugin for the everyrow SDK - AI-powered data processing utilities for transforming, deduping, merging, ranking, and screening dataframes", + "description": "Give Claude Code a research team. Forecast, score, classify, or research every row of a dataset.", "version": "0.4.0" } ] diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json index 516370f6..f5c2e2a2 100644 --- a/.claude-plugin/plugin.json +++ b/.claude-plugin/plugin.json @@ -1,6 +1,6 @@ { "name": "everyrow", - "description": "Claude Code plugin for the everyrow SDK - AI-powered data processing utilities for transforming, deduping, merging, ranking, and screening dataframes", + "description": "Give Claude Code a research team. Forecast, score, classify, or research every row of a dataset.", "version": "0.4.0", "author": { "name": "FutureSearch" diff --git a/CITATION.cff b/CITATION.cff index 130819ee..0434d850 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -2,7 +2,7 @@ cff-version: 1.2.0 message: "If you use this software, please cite it as below." type: software title: "everyrow" -abstract: "Screen, rank, dedupe, and merge dataframes using natural language. Run web agents to research every row." +abstract: "A researcher for every row. Run web research agents at scale to forecast, score, classify, deduplicate, merge, or enrich entire datasets." license: MIT version: 0.4.0 date-released: 2026-02-24 diff --git a/README.md b/README.md index 3a7c2adb..761275e9 100644 --- a/README.md +++ b/README.md @@ -7,33 +7,35 @@ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Python 3.12+](https://img.shields.io/badge/python-3.12+-blue.svg)](https://www.python.org/downloads/) -An add-on for Claude Code, Claude Desktop/Cowork, and Claude web to enable Claude to run LLM web research agents at scale. Claude uses everyrow to research entire datasets, and to intelligently sort, filter, merge, dedupe, or add columns to large datasets, via a single Python or MCP call. See the [docs site](https://everyrow.io/docs) for how to install into your Claude interface of choice. +Give yourself, or your AI, a team of researchers to gather data, forecast, score, or classify every row in a dataset. Available [standalone](https://everyrow.io/app) a Claude Code plugin, MCP server, or Python SDK. See the [docs site](https://everyrow.io/docs) for how to install into your interface of choice. The best experience is inside Claude Code. + ```bash claude plugin marketplace add futuresearch/everyrow-sdk claude plugin install everyrow@futuresearch ``` -See [here](https://everyrow.io/docs#tab-claude-desktop-mcp) for Claude Desktop/Cowork. Claude web (claude.ai) connector coming soon. Or try it directly in our hosted app that uses the Claude Agent SDK at [everyrow.io/app](https://everyrow.io/app)]. +See [here](https://everyrow.io/docs#tab-claude-desktop-mcp) for Claude Desktop/Cowork. Claude web (claude.ai) connector coming soon. Or try it directly in our hosted app that uses the Claude Agent SDK at [everyrow.io/app](https://everyrow.io/app). Get an API key at [everyrow.io/api-key](https://everyrow.io/api-key) ($20 free credit), then: ## Operations -Enable Claude to perform tens of thousands of LLM calls, or thousands of LLM web research agents, in each single operation. +Spin up a team of: -| Operation | Intelligence | Scales To | -|---|---|---| -| [**Screen**](https://everyrow.io/docs/reference/SCREEN) | Filter by criteria that need judgment | 10k rows | -| [**Rank**](https://everyrow.io/docs/reference/RANK) | Score rows from research | 10k rows | -| [**Dedupe**](https://everyrow.io/docs/reference/DEDUPE) | Deduplicate when fuzzy matching fails | 20k rows | -| [**Merge**](https://everyrow.io/docs/reference/MERGE) | Join tables when keys don't match | 5k rows | -| [**Research**](https://everyrow.io/docs/reference/RESEARCH) | Web research on every row | 10k rows | +| Role | What it does | Cost | Scales To | +| ---- | ------------ | ---- | --------- | +| [**Agents**](https://everyrow.io/docs/reference/RESEARCH) | Research, then analyze | 1–3¢/researcher | 10k rows | +| [**Forecasters**](https://everyrow.io/docs/reference/FORECAST) | Predict outcomes | 20-50¢/researcher | 10k rows | +| [**Scorers**](https://everyrow.io/docs/reference/RANK) | Research, then score | 1-5¢/researcher | 10k rows | +| [**Classifiers**](https://everyrow.io/docs/reference/SCREEN) | Research, then categorize | 0.1-0.7¢/researcher | 10k rows | +| [**Matchers**](https://everyrow.io/docs/reference/MERGE) | Find matching rows | 0.2-0.5¢/researcher | 20k rows | See the full [API reference](https://everyrow.io/docs/api), [guides](https://everyrow.io/docs/guides), and [case studies](https://everyrow.io/docs/case-studies), (for example, see our [case study](https://everyrow.io/docs/case-studies/llm-web-research-agents-at-scale) running a `Research` task on 10k rows, running agents that used 120k LLM calls.) Or just ask Claude in your interface of choice: + ``` Label this 5,000 row CSV with the right categories. ``` @@ -50,7 +52,7 @@ Rank these 2,000 people from Wikipedia on who is the most bullish on AI. ## Web Agents -The most basic utility to build from is `agent_map`, to have LLM web research agents work on every row of the dataframe. Agents are tuned on [Deep Research Bench](https://arxiv.org/abs/2506.06287), our benchmark for questions that need extensive searching and cross-referencing, and tuned to get correct answers at minimal cost. +The base operation is `agent_map`: one web research agent per row. The other operations (rank, classify, forecast, merge, dedupe) use the agents under the hood as necessary. Agents are tuned on [Deep Research Bench](https://arxiv.org/abs/2506.06287), our benchmark for questions that need extensive searching and cross-referencing, and tuned to get correct answers at minimal cost. Under the hood, Claude will: @@ -83,7 +85,6 @@ print(result.data.head()) See the API [docs](https://everyrow.io/docs/reference/RESEARCH.md), a case study of [labeling data](https://everyrow.io/docs/classify-dataframe-rows-llm) or a case study for [researching government data](https://everyrow.io/docs/case-studies/research-and-rank-permit-times) at scale. - ## Sessions You can also use a session to output a URL to see the research and data processing in the [everyrow.io/app](https://everyrow.io/app) application, which streams the research and makes charts. Or you can use it purely as an intelligent data utility, and [chain intelligent pandas operations](https://everyrow.io/docs/chaining-operations) with normal pandas operations where LLMs are used to process every row. @@ -127,14 +128,18 @@ df = await fetch_task_data("12345678-1234-1234-1234-123456789abc") ### Other AI agent plugins #### Gemini CLI + [Official Docs](https://geminicli.com/docs/extensions/#installing-an-extension). Ensure that you're using version >= 0.25.0 + ```sh gemini --version gemini extensions install https://github.com/futuresearch/everyrow-sdk gemini extensions enable everyrow [--scope ] ``` + Then within the CLI + ```sh /settings > Preview Features > Enable /settings > Agent Skills > Enable @@ -144,21 +149,28 @@ Then within the CLI ``` #### Codex CLI + [Official docs](https://developers.openai.com/codex/skills#install-new-skills). Install from GitHub using the built-in skill installer, requested via natural language: + ```sh codex $skill-installer from the futuresearch/everyrow-sdk github repo, install the everyrow-sdk skill at --path skills/everyrow-sdk ``` + Or install directly: + ```sh python ~/.codex/skills/.system/skill-installer/scripts/install-skill-from-github.py \ --repo futuresearch/everyrow-sdk --path skills/everyrow-sdk ``` + Restart Codex to pick up the new skill. #### Cursor + [Official docs](https://cursor.com/docs/context/skills#installing-skills-from-github). + ```sh 1. Open Cursor Settings → Rules 2. In the Project Rules section, click Add Rule @@ -232,7 +244,7 @@ uv run basedpyright # type check ## About -Built by [FutureSearch](https://futuresearch.ai). We kept running into the same data problems: ranking leads, deduping messy CRM exports, merging tables without clean keys. Tedious for humans, but needs judgment that automation can't handle. So we built this. +Built by [FutureSearch](https://futuresearch.ai). [everyrow.io](https://everyrow.io) (app/dashboard) · [case studies](https://futuresearch.ai/solutions/) · [research](https://futuresearch.ai/research/) diff --git a/docs-site/src/app/layout.tsx b/docs-site/src/app/layout.tsx index 02a0ab24..684e88e6 100644 --- a/docs-site/src/app/layout.tsx +++ b/docs-site/src/app/layout.tsx @@ -22,7 +22,7 @@ const jetbrainsMono = JetBrains_Mono({ export const metadata: Metadata = { metadataBase: new URL("https://everyrow.io"), title: "Everyrow Documentation", - description: "Documentation for the Everyrow SDK - AI-powered data operations for pandas DataFrames", + description: "EveryRow documentation. A researcher for every row. Forecast, score, classify, or research entire datasets.", openGraph: { siteName: "Everyrow", type: "website", diff --git a/docs-site/src/app/page.tsx b/docs-site/src/app/page.tsx index 7bbb097a..fdf4095b 100644 --- a/docs-site/src/app/page.tsx +++ b/docs-site/src/app/page.tsx @@ -7,14 +7,14 @@ import { MDXContent } from "@/components/MDXContent"; export const metadata: Metadata = { title: "Everyrow Documentation", description: - "Run LLM Research Agents at Scale", + "A researcher for every row. Forecast, score, classify, or research entire datasets.", alternates: { canonical: "https://everyrow.io/docs", }, openGraph: { title: "Everyrow Documentation", description: - "Run LLM Research Agents at Scale", + "A researcher for every row. Forecast, score, classify, or research entire datasets.", url: "https://everyrow.io/docs", images: [{ url: "https://everyrow.io/everyrow-og.png" }], }, @@ -27,9 +27,9 @@ const SECTION_ICONS: Record = { }; const SECTION_DESCRIPTIONS: Record = { - Guides: "Step-by-step tutorials for common data processing tasks", - "API Reference": "Detailed documentation for all everyrow functions", - "Case Studies": "Real-world examples with Jupyter notebooks", + Guides: "Step-by-step tutorials for web research at scale", + "API Reference": "API reference for all everyrow operations", + "Case Studies": "Real-world examples with verified results", }; const SECTION_LINKS: Record = { @@ -102,7 +102,7 @@ export default async function DocsHome() {

everyrow documentation

- Run LLM Research Agents at Scale + A researcher for every row

diff --git a/docs/add-column-web-lookup.mdx b/docs/add-column-web-lookup.mdx index 1cb782a6..11b37b48 100644 --- a/docs/add-column-web-lookup.mdx +++ b/docs/add-column-web-lookup.mdx @@ -166,4 +166,4 @@ Each result includes a `research` column showing how the agent found the answer, --- -Built with [everyrow](https://github.com/futuresearch/everyrow-sdk). See the [agent_map documentation](/docs/reference/RESEARCH) for more options including response models and effort levels. +Built with [everyrow](https://github.com/futuresearch/everyrow-sdk). See the [agent_map documentation](reference/RESEARCH) for more options including response models and effort levels. diff --git a/docs/classify-dataframe-rows-llm.mdx b/docs/classify-dataframe-rows-llm.mdx index 72fa6015..f7d15ea7 100644 --- a/docs/classify-dataframe-rows-llm.mdx +++ b/docs/classify-dataframe-rows-llm.mdx @@ -190,4 +190,4 @@ Without web research agents, everyrow can classify data for ~$0.009 per row, or --- -Built with [everyrow](https://github.com/futuresearch/everyrow-sdk). See the [agent_map documentation](/docs/reference/RESEARCH) for more options including response models and effort levels. +Built with [everyrow](https://github.com/futuresearch/everyrow-sdk). See the [agent_map documentation](reference/RESEARCH) for more options including response models and effort levels. diff --git a/docs/deduplicate-training-data-ml.mdx b/docs/deduplicate-training-data-ml.mdx index 65edf4b3..030660d1 100644 --- a/docs/deduplicate-training-data-ml.mdx +++ b/docs/deduplicate-training-data-ml.mdx @@ -183,4 +183,4 @@ The 35.7% reduction rate is typical for datasets that weren't explicitly dedupli --- -Built with [everyrow](https://github.com/futuresearch/everyrow-sdk). See the [dedupe documentation](/docs/reference/DEDUPE) for more options including equivalence relation design. +Built with [everyrow](https://github.com/futuresearch/everyrow-sdk). See the [dedupe documentation](reference/DEDUPE) for more options including equivalence relation design. diff --git a/docs/filter-dataframe-with-llm.mdx b/docs/filter-dataframe-with-llm.mdx index 315e133b..d050a162 100644 --- a/docs/filter-dataframe-with-llm.mdx +++ b/docs/filter-dataframe-with-llm.mdx @@ -190,4 +190,4 @@ Gladly | Staff Software Engineer | Remote (US, Colombia) | $60k–$215k + Equity --- -Built with [everyrow](https://github.com/futuresearch/everyrow-sdk). See the [screen documentation](/docs/reference/SCREEN) for more options including batch size tuning and async execution. +Built with [everyrow](https://github.com/futuresearch/everyrow-sdk). See the [screen documentation](reference/SCREEN) for more options including batch size tuning and async execution. diff --git a/docs/getting-started.md b/docs/getting-started.md index 5d1b0bce..1361ea62 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -7,6 +7,13 @@ description: Install everyrow and run your first operation. Everyrow lets you perform qualitative data transformations on noisy real-world data, at quantitative scale. Define your fuzzy logic concisely in natural language, and everyrow handles the complexity of orchestrating the execution. +**Using Claude Code?** Install the plugin and ask Claude in natural language: + +```bash +claude plugin marketplace add futuresearch/everyrow-sdk +claude plugin install everyrow@futuresearch +``` + ## Prerequisites - Python 3.12+ diff --git a/docs/installation.mdx b/docs/installation.mdx index f097c9cb..6154ca1d 100644 --- a/docs/installation.mdx +++ b/docs/installation.mdx @@ -11,6 +11,67 @@ Select your platform and integration method below. + + +Install the everyrow plugin from the marketplace: + +```bash +claude plugin marketplace add futuresearch/everyrow-sdk +claude plugin install everyrow@futuresearch +``` + +This installs both the skill and MCP server together. You can toggle each on/off in Claude Code settings. + +**Important:** be sure to supply your API key when launching Claude Code: + +```bash +export EVERYROW_API_KEY=sk-cho... +claude +``` + +You can optionally configure Claude Code to show a [progress bar](/docs/progress-monitoring#progress-bar) for long-running tasks. + +[Official Claude Code Plugin Docs](https://code.claude.com/docs/en/discover-plugins#add-from-github) + + + + + +Add everyrow to your MCP config (requires [uv](https://docs.astral.sh/uv/)): + +```json +{ + "mcpServers": { + "everyrow": { + "command": "uvx", + "args": ["everyrow-mcp"], + "env": { + "EVERYROW_API_KEY": "${EVERYROW_API_KEY}" + } + } + } +} +``` + +Or install with pip and use `"command": "everyrow-mcp"` instead of uvx. + +Config file location: +- **User scope:** `~/.claude.json` (in the `mcpServers` field) +- **Project scope:** `.mcp.json` in your project root + +[Choosing the right scope](https://code.claude.com/docs/en/mcp#choosing-the-right-scope) + +**Important:** either insert your API key when creating the JSON file, or supply the key when launching Claude Code: + +```bash +export EVERYROW_API_KEY=sk-cho... +claude +``` + +You can optionally configure Claude Code to show a [progress bar](/docs/progress-monitoring#progress-bar) for long-running tasks. + + + ```bash @@ -92,67 +153,6 @@ See the [API Reference](/docs/api) for full documentation. - - -Add everyrow to your MCP config (requires [uv](https://docs.astral.sh/uv/)): - -```json -{ - "mcpServers": { - "everyrow": { - "command": "uvx", - "args": ["everyrow-mcp"], - "env": { - "EVERYROW_API_KEY": "${EVERYROW_API_KEY}" - } - } - } -} -``` - -Or install with pip and use `"command": "everyrow-mcp"` instead of uvx. - -Config file location: -- **User scope:** `~/.claude.json` (in the `mcpServers` field) -- **Project scope:** `.mcp.json` in your project root - -[Choosing the right scope](https://code.claude.com/docs/en/mcp#choosing-the-right-scope) - -**Important:** either insert your API key when creating the JSON file, or supply the key when launching Claude Code: - -```bash -export EVERYROW_API_KEY=sk-cho... -claude -``` - -You can optionally configure Claude Code to show a [progress bar](/docs/progress-monitoring#progress-bar) for long-running tasks. - - - - - -Install the everyrow plugin from the marketplace: - -```bash -claude plugin marketplace add futuresearch/everyrow-sdk -claude plugin install everyrow@futuresearch -``` - -This installs both the skill and MCP server together. You can toggle each on/off in Claude Code settings. - -**Important:** be sure to supply your API key when launching Claude Code: - -```bash -export EVERYROW_API_KEY=sk-cho... -claude -``` - -You can optionally configure Claude Code to show a [progress bar](/docs/progress-monitoring#progress-bar) for long-running tasks. - -[Official Claude Code Plugin Docs](https://code.claude.com/docs/en/discover-plugins#add-from-github) - - - First, make sure you have [uv installed](https://docs.astral.sh/uv/). diff --git a/docs/rank-by-external-metric.mdx b/docs/rank-by-external-metric.mdx index d9a00e81..bd1d83b3 100644 --- a/docs/rank-by-external-metric.mdx +++ b/docs/rank-by-external-metric.mdx @@ -220,4 +220,4 @@ result = await rank( --- -Built with [everyrow](https://github.com/futuresearch/everyrow-sdk). See the [rank documentation](/docs/reference/RANK) for more options including field types and sort order. +Built with [everyrow](https://github.com/futuresearch/everyrow-sdk). See the [rank documentation](reference/RANK) for more options including field types and sort order. diff --git a/docs/reference/DEDUPE.md b/docs/reference/DEDUPE.md index 74499406..3a426317 100644 --- a/docs/reference/DEDUPE.md +++ b/docs/reference/DEDUPE.md @@ -136,6 +136,15 @@ Output (selected rows only): | 500 | ~2 min | ~$1.67 | | 2,000 | ~8 min | ~$7 | +## Via MCP + +MCP tool: `everyrow_dedupe` + +| Parameter | Type | Description | +|-----------|------|-------------| +| `csv_path` | string | Path to input CSV file | +| `equivalence_relation` | string | What makes two rows duplicates | + ## Related docs ### Guides diff --git a/docs/reference/FORECAST.md b/docs/reference/FORECAST.md index 4d1e6aed..f5be0b3d 100644 --- a/docs/reference/FORECAST.md +++ b/docs/reference/FORECAST.md @@ -79,6 +79,15 @@ Probabilities are clamped to [3, 97]—even near-certain outcomes retain residua | 5 | ~6 min | ~$3 | | 20 | ~10 min | ~$12 | +## Via MCP + +MCP tool: `everyrow_forecast` + +| Parameter | Type | Description | +|-----------|------|-------------| +| `csv_path` | string | Path to CSV with questions (one per row) | +| `context` | string | Optional batch-level context for all questions | + ## Related docs ### Blog posts diff --git a/docs/reference/MERGE.md b/docs/reference/MERGE.md index 1db2a38b..79eddede 100644 --- a/docs/reference/MERGE.md +++ b/docs/reference/MERGE.md @@ -69,6 +69,16 @@ A DataFrame with all left table columns plus matched right table columns. Rows t | 2,000 × 50 | ~8 min | ~$9 | | 1,000 × 1,000 | ~12 min | ~$15 | +## Via MCP + +MCP tool: `everyrow_merge` + +| Parameter | Type | Description | +|-----------|------|-------------| +| `left_csv_path` | string | Path to the table being enriched (left join) | +| `right_csv_path` | string | Path to the lookup/reference table | +| `task` | string | How to match rows across tables | + ## Related docs ### Guides diff --git a/docs/reference/RANK.md b/docs/reference/RANK.md index 52eb4c22..93112eba 100644 --- a/docs/reference/RANK.md +++ b/docs/reference/RANK.md @@ -81,6 +81,16 @@ When specifying a response model, make sure that it contains `field_name`. Other | `ascending_order` | bool | True = lowest first (default) | | `preview` | bool | True = process only a few rows | +## Via MCP + +MCP tool: `everyrow_rank` + +| Parameter | Type | Description | +|-----------|------|-------------| +| `csv_path` | string | Path to input CSV file | +| `task` | string | How to score each row | +| `field_name` | string | Column name for the score | + ## Related docs ### Guides diff --git a/docs/reference/RESEARCH.md b/docs/reference/RESEARCH.md index 71af7058..5008fd54 100644 --- a/docs/reference/RESEARCH.md +++ b/docs/reference/RESEARCH.md @@ -131,6 +131,23 @@ companies = await single_agent( ) ``` +## Via MCP + +MCP tools: `everyrow_agent` (DataFrame), `everyrow_single_agent` (single question) + +**everyrow_agent:** + +| Parameter | Type | Description | +|-----------|------|-------------| +| `csv_path` | string | Path to input CSV file | +| `task` | string | What to research for each row | + +**everyrow_single_agent:** + +| Parameter | Type | Description | +|-----------|------|-------------| +| `task` | string | The question to research | + ## Related docs ### Guides diff --git a/docs/reference/SCREEN.md b/docs/reference/SCREEN.md index 358980bc..68f1f4ce 100644 --- a/docs/reference/SCREEN.md +++ b/docs/reference/SCREEN.md @@ -90,6 +90,15 @@ class Detailed(BaseModel): Compare: regex on "remote-friendly" job postings gets 68% precision. +## Via MCP + +MCP tool: `everyrow_screen` + +| Parameter | Type | Description | +|-----------|------|-------------| +| `csv_path` | string | Path to input CSV file | +| `task` | string | What should pass | + ## Related docs ### Guides diff --git a/docs/resolve-entities-python.mdx b/docs/resolve-entities-python.mdx index 3ed45a90..f894860a 100644 --- a/docs/resolve-entities-python.mdx +++ b/docs/resolve-entities-python.mdx @@ -141,4 +141,4 @@ See the [full notebook](case-studies/dedupe-crm-company-records) for additional --- -Built with [everyrow](https://github.com/futuresearch/everyrow-sdk). See the [dedupe documentation](/docs/reference/DEDUPE) for more options. +Built with [everyrow](https://github.com/futuresearch/everyrow-sdk). See the [dedupe documentation](reference/DEDUPE) for more options. diff --git a/docs/scale-deduplication-20k-rows.mdx b/docs/scale-deduplication-20k-rows.mdx index 390c47a8..8802d064 100644 --- a/docs/scale-deduplication-20k-rows.mdx +++ b/docs/scale-deduplication-20k-rows.mdx @@ -34,6 +34,8 @@ Set your API key before launching Claude Code: export EVERYROW_API_KEY=your_key_here # Get one at everyrow.io/api-key ``` +Download [fda_products.csv](https://media.githubusercontent.com/media/futuresearch/everyrow-sdk/refs/heads/main/docs/data/fda_products.csv) (20,000 rows from the FDA Drugs@FDA database with ingredient, strength, applicant, and dosage form columns). Tell Claude: + ``` Deduplicate fda_products.csv. Two rows are duplicates if they have the same ingredient + same strength + same applicant + same dosage form. @@ -144,4 +146,4 @@ equivalence_relation="Same drug" --- -Built with [everyrow](https://github.com/futuresearch/everyrow-sdk). See the [dedupe documentation](/docs/reference/DEDUPE) for more options. Related guides: [Resolve Duplicate Entities](/resolve-entities-python) (500-row CRM walkthrough), [Deduplicate Training Data](/deduplicate-training-data-ml) (semantic dedup for ML datasets). +Built with [everyrow](https://github.com/futuresearch/everyrow-sdk). See the [dedupe documentation](reference/DEDUPE) for more options. Related guides: [Resolve Duplicate Entities](/resolve-entities-python) (500-row CRM walkthrough), [Deduplicate Training Data](/deduplicate-training-data-ml) (semantic dedup for ML datasets). diff --git a/everyrow-mcp/manifest.json b/everyrow-mcp/manifest.json index 155c064d..56d82107 100644 --- a/everyrow-mcp/manifest.json +++ b/everyrow-mcp/manifest.json @@ -3,8 +3,8 @@ "name": "everyrow-mcp", "display_name": "Everyrow MCP Server", "version": "0.4.0", - "description": "AI-powered dataframe ops: transform, dedupe, merge, rank, and screen with natural language", - "long_description": "MCP server for everyrow: agent ops at spreadsheet scale. This server exposes everyrow's 5 core operations as MCP tools, allowing LLM applications to screen, rank, dedupe, merge, and run agents on CSV files. All tools operate on local CSV files.", + "description": "Give your AI a research team. Forecast, score, classify, or research every row of a dataset.", + "long_description": "MCP server for everyrow: give your AI a research team. Each operation dispatches web research agents across a dataset to forecast, score, classify, deduplicate, merge, or research at scale.", "author": { "name": "FutureSearch", "url": "https://everyrow.io" diff --git a/everyrow-mcp/pyproject.toml b/everyrow-mcp/pyproject.toml index cae06735..60a87dd6 100644 --- a/everyrow-mcp/pyproject.toml +++ b/everyrow-mcp/pyproject.toml @@ -1,7 +1,7 @@ [project] name = "everyrow-mcp" version = "0.4.0" -description = "MCP server for everyrow: agent ops at spreadsheet scale" +description = "MCP server for everyrow: a researcher for every row" readme = "README.md" requires-python = ">=3.12" dependencies = [ diff --git a/everyrow-mcp/server.json b/everyrow-mcp/server.json index dabf3e63..7605a8ac 100644 --- a/everyrow-mcp/server.json +++ b/everyrow-mcp/server.json @@ -2,7 +2,7 @@ "$schema": "https://static.modelcontextprotocol.io/schemas/2025-12-11/server.schema.json", "name": "io.github.futuresearch/everyrow-mcp", "title": "Everyrow MCP Server", - "description": "AI-powered dataframe ops: transform, dedupe, merge, rank, and screen with natural language", + "description": "Give your AI a research team. Forecast, score, classify, or research every row of a dataset.", "repository": { "url": "https://github.com/futuresearch/everyrow-sdk", "source": "github", diff --git a/pyproject.toml b/pyproject.toml index 42407b96..3e9efb51 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ everyrow = { workspace = true } [project] name = "everyrow" version = "0.4.0" -description = "An SDK for everyrow.io: agent ops at spreadsheet scale" +description = "A researcher for every row. Forecast, score, classify, or research entire datasets." readme = "README.md" requires-python = ">=3.12" dependencies = [ diff --git a/skills/everyrow-sdk/SKILL.md b/skills/everyrow-sdk/SKILL.md index 61163688..f5fd7b49 100644 --- a/skills/everyrow-sdk/SKILL.md +++ b/skills/everyrow-sdk/SKILL.md @@ -1,11 +1,11 @@ --- name: everyrow-sdk -description: Helps write Python code using the everyrow SDK for AI-powered data processing - transforming, deduping, merging, ranking, and screening dataframes with natural language instructions +description: Use when the user wants Claude to dispatch researchers to forecast, score, classify, or add to a dataset at scale. --- # everyrow SDK -The everyrow SDK provides intelligent data processing utilities powered by AI agents. Use this skill when writing Python code that needs to: +everyrow gives Claude a research team for your data. Use this skill when writing Python code that needs to: > **Documentation**: For detailed guides, case studies, and API reference, see: > - Docs site: [everyrow.io/docs](https://everyrow.io/docs)