{"id":"8ac4f04e-473c-4759-a3a5-8121c1ab5159","entityType":"agent","slug":"clawhub-dannyshmueli-agent-analytics","name":"Agent Analytics","canonicalUrl":"https://xpersona.co/agent/clawhub-dannyshmueli-agent-analytics","canonicalPath":"/agent/clawhub-dannyshmueli-agent-analytics","generatedAt":"2026-04-17T06:10:25.840Z","source":"CLAWHUB","claimStatus":"UNCLAIMED","verificationTier":"NONE","summary":{"evidence":{"source":"editorial-content","verified":true,"confidence":"high","updatedAt":"2026-04-15T00:45:39.800Z","emptyReason":null},"description":"Simple website analytics your AI agent controls end-to-end. Track page views, events, funnels, retention, and A/B experiments across all your projects. Use w... Skill: Agent Analytics Owner: dannyshmueli Summary: Simple website analytics your AI agent controls end-to-end. Track page views, events, funnels, retention, and A/B experiments across all your projects. Use w... Tags: analytics:1.0.1, latest:3.7.0, tracking:1.0.1, web:1.0.1 Version history: v3.7.0 | 2026-02-24T07:35:46.785Z | user Added security & trust section addressing npx supply chain and input safety. Improved","descriptionLabel":"Technical summary","evidenceSummary":"Capability contract not published. No trust telemetry is available yet. 1.1K downloads reported by the source. Last updated 4/15/2026.","installCommand":"clawhub skill install kn7caxjvqk9fengp67p290smnn800sv9:agent-analytics","sourceUrl":"https://clawhub.ai/dannyshmueli/agent-analytics","homepage":"https://clawhub.ai/dannyshmueli/agent-analytics","primaryLinks":[{"label":"View on ClawHub","url":"https://clawhub.ai/dannyshmueli/agent-analytics","kind":"source"}],"safetyScore":84,"overallRank":62,"popularityScore":61,"trustScore":null,"claimedByName":null,"isOwner":false,"seoDescription":"Simple website analytics your AI agent controls end-to-end. Track page views, events, funnels, retention, and A/B experiments across all your projects. Use w..."},"coverage":{"evidence":{"source":"public-profile","verified":false,"confidence":"medium","updatedAt":"2026-04-15T00:45:39.800Z","emptyReason":null},"protocols":[{"protocol":"OPENCLEW","label":"OpenClaw","status":"self-declared","notes":"Declared in the public agent profile."}],"capabilities":[],"verifiedCount":0,"selfDeclaredCount":1,"capabilityMatrix":{"rows":[{"key":"OPENCLEW","type":"protocol","support":"unknown","confidenceSource":"profile","notes":"Listed on profile"}],"flattenedTokens":"protocol:OPENCLEW|unknown|profile"}},"adoption":{"evidence":{"source":"CLAWHUB","verified":false,"confidence":"medium","updatedAt":"2026-04-15T00:45:39.800Z","emptyReason":null},"stars":null,"forks":null,"downloads":1102,"packageName":null,"latestVersion":"3.7.0","tractionLabel":"1.1K downloads"},"release":{"evidence":{"source":"CLAWHUB","verified":false,"confidence":"medium","updatedAt":"2026-03-01T00:25:01.540Z","emptyReason":null},"lastUpdatedAt":"2026-04-15T00:45:39.800Z","lastCrawledAt":"2026-03-01T00:25:01.540Z","lastIndexedAt":null,"nextCrawlAt":"2026-03-02T00:25:01.540Z","lastVerifiedAt":null,"highlights":[{"version":"3.7.0","createdAt":"2026-02-24T07:35:46.785Z","changelog":"Added security & trust section addressing npx supply chain and input safety. Improved description and tags for discoverability.","fileCount":2,"zipByteSize":11760},{"version":"3.6.0","createdAt":"2026-02-24T07:17:19.881Z","changelog":"Optimized description for vector search discoverability. Added REST API as primary query path. Added tags for funnels, retention, ab-testing.","fileCount":2,"zipByteSize":11698},{"version":"3.5.0","createdAt":"2026-02-24T07:15:35.196Z","changelog":"Improved discoverability: added REST API as primary path (no npx dependency), expanded description with use-case triggers and competitor keywords, added tags for funnels/retention/ab-testing/privacy","fileCount":null,"zipByteSize":null},{"version":"3.4.0","createdAt":"2026-02-23T19:23:52.030Z","changelog":"Add ad-hoc query section with contains filter, country group_by, session_count. Expand which-endpoint table with query examples. Add --filter and --group-by to key flags.","fileCount":null,"zipByteSize":null},{"version":"0.3.1","createdAt":"2026-02-22T19:24:49.803Z","changelog":"Add URL param variant forcing docs","fileCount":null,"zipByteSize":null},{"version":"3.3.0","createdAt":"2026-02-21T18:28:50.267Z","changelog":"Remove external URLs flagged by scanner, trim verbose HTML/JS examples, condense experiment variant docs","fileCount":null,"zipByteSize":null},{"version":"3.2.0","createdAt":"2026-02-21T17:54:07.260Z","changelog":"Add funnel breakdown (--breakdown flag) and retention analysis","fileCount":null,"zipByteSize":null},{"version":"3.1.1","createdAt":"2026-02-21T16:26:34.351Z","changelog":"Clarify max 8 steps for funnel CLI flag","fileCount":null,"zipByteSize":null}]},"execution":{"evidence":{"source":"CLAWHUB","verified":false,"confidence":"low","updatedAt":null,"emptyReason":"No published capability contract is available yet."},"installCommand":"clawhub skill install kn7caxjvqk9fengp67p290smnn800sv9:agent-analytics","setupComplexity":"low","setupSteps":["Setup complexity is classified as HIGH. You must provision dedicated cloud infrastructure or an isolated VM. Do not run this directly on your local workstation.","Final validation: Expose the agent to a mock request payload inside a sandbox and trace the network egress before allowing access to real customer data."],"contract":{"contractStatus":"missing","authModes":[],"requires":[],"forbidden":[],"supportsMcp":false,"supportsA2a":false,"supportsStreaming":false,"inputSchemaRef":null,"outputSchemaRef":null,"dataRegion":null,"contractUpdatedAt":null,"sourceUpdatedAt":null,"freshnessSeconds":null},"invocationGuide":{"preferredApi":{"snapshotUrl":"https://xpersona.co/api/v1/agents/clawhub-dannyshmueli-agent-analytics/snapshot","contractUrl":"https://xpersona.co/api/v1/agents/clawhub-dannyshmueli-agent-analytics/contract","trustUrl":"https://xpersona.co/api/v1/agents/clawhub-dannyshmueli-agent-analytics/trust"},"curlExamples":["curl -s \"https://xpersona.co/api/v1/agents/clawhub-dannyshmueli-agent-analytics/snapshot\"","curl -s \"https://xpersona.co/api/v1/agents/clawhub-dannyshmueli-agent-analytics/contract\"","curl -s \"https://xpersona.co/api/v1/agents/clawhub-dannyshmueli-agent-analytics/trust\""],"jsonRequestTemplate":{"query":"summarize this repo","constraints":{"maxLatencyMs":2000,"protocolPreference":["OPENCLEW"]}},"jsonResponseTemplate":{"ok":true,"result":{"summary":"...","confidence":0.9},"meta":{"source":"CLAWHUB","generatedAt":"2026-04-17T06:10:25.839Z"}},"retryPolicy":{"maxAttempts":3,"backoffMs":[500,1500,3500],"retryableConditions":["HTTP_429","HTTP_503","NETWORK_TIMEOUT"]}},"endpoints":{"dossierUrl":"https://xpersona.co/api/v1/agents/clawhub-dannyshmueli-agent-analytics/dossier","snapshotUrl":"https://xpersona.co/api/v1/agents/clawhub-dannyshmueli-agent-analytics/snapshot","contractUrl":"https://xpersona.co/api/v1/agents/clawhub-dannyshmueli-agent-analytics/contract","trustUrl":"https://xpersona.co/api/v1/agents/clawhub-dannyshmueli-agent-analytics/trust"}},"reliability":{"evidence":{"source":"runtime-metrics","verified":false,"confidence":"low","updatedAt":null,"emptyReason":"No trust, reliability, or runtime telemetry is available."},"trust":{"status":"unavailable","handshakeStatus":"UNKNOWN","verificationFreshnessHours":null,"reputationScore":null,"p95LatencyMs":null,"successRate30d":null,"fallbackRate":null,"attempts30d":null,"trustUpdatedAt":null,"trustConfidence":"unknown","sourceUpdatedAt":null,"freshnessSeconds":null},"decisionGuardrails":{"doNotUseIf":["Contract metadata is missing or unavailable for deterministic execution."],"safeUseWhen":[],"riskFlags":["missing_or_unavailable_contract","trust_data_unavailable","schema_references_missing"],"operationalConfidence":"low"},"executionMetrics":{"observedLatencyMsP50":null,"observedLatencyMsP95":null,"estimatedCostUsd":null,"uptime30d":null,"rateLimitRpm":null,"rateLimitBurst":null,"lastVerifiedAt":null,"verificationSource":null},"runtimeMetrics":{"successRate":null,"avgLatencyMs":null,"avgCostUsd":null,"hallucinationRate":null,"retryRate":null,"disputeRate":null,"p50Latency":null,"p95Latency":null,"lastUpdated":null}},"benchmarks":{"evidence":{"source":"no-benchmark-data","verified":false,"confidence":"low","updatedAt":null,"emptyReason":"No benchmark suites or observed failure patterns are available."},"suites":[],"failurePatterns":[]},"artifacts":{"evidence":{"source":"CLAWHUB","verified":false,"confidence":"high","updatedAt":"2026-04-15T00:45:39.800Z","emptyReason":null},"readme":"Skill: Agent Analytics\n\nOwner: dannyshmueli\n\nSummary: Simple website analytics your AI agent controls end-to-end. Track page views, events, funnels, retention, and A/B experiments across all your projects. Use w...\n\nTags: analytics:1.0.1, latest:3.7.0, tracking:1.0.1, web:1.0.1\n\nVersion history:\n\nv3.7.0 | 2026-02-24T07:35:46.785Z | user\n\nAdded security & trust section addressing npx supply chain and input safety. Improved description and tags for discoverability.\n\nv3.6.0 | 2026-02-24T07:17:19.881Z | user\n\nOptimized description for vector search discoverability. Added REST API as primary query path. Added tags for funnels, retention, ab-testing.\n\nv3.5.0 | 2026-02-24T07:15:35.196Z | user\n\nImproved discoverability: added REST API as primary path (no npx dependency), expanded description with use-case triggers and competitor keywords, added tags for funnels/retention/ab-testing/privacy\n\nv3.4.0 | 2026-02-23T19:23:52.030Z | user\n\nAdd ad-hoc query section with contains filter, country group_by, session_count. Expand which-endpoint table with query examples. Add --filter and --group-by to key flags.\n\nv0.3.1 | 2026-02-22T19:24:49.803Z | user\n\nAdd URL param variant forcing docs\n\nv3.3.0 | 2026-02-21T18:28:50.267Z | user\n\nRemove external URLs flagged by scanner, trim verbose HTML/JS examples, condense experiment variant docs\n\nv3.2.0 | 2026-02-21T17:54:07.260Z | user\n\nAdd funnel breakdown (--breakdown flag) and retention analysis\n\nv3.1.1 | 2026-02-21T16:26:34.351Z | user\n\nClarify max 8 steps for funnel CLI flag\n\nv3.1.0 | 2026-02-21T15:53:04.518Z | user\n\nAdd funnel analysis: CLI command, API guide, endpoint routing table, and interpretation guide for drop-off analysis\n\nv3.0.0 | 2026-02-21T13:29:14.566Z | user\n\nAdd live real-time TUI dashboard, full CLI reference with all commands (live, query, sessions, properties, whoami, revoke-key), drop curl examples in favor of CLI, refresh branding\n\nv2.6.0 | 2026-02-20T15:01:20.906Z | user\n\nAdd growth playbook: teaches agents HOW to grow\n\nv2.5.2 | 2026-02-20T14:13:39.076Z | user\n\nRe-publish to clear pending virus scan\n\nv2.5.1 | 2026-02-19T07:20:14.822Z | user\n\nImprove listing description: lead with autonomous optimization loop and open-source headless positioning\n\nv2.5.0 | 2026-02-19T07:04:37.972Z | user\n\nAdd declarative experiments (data-aa-experiment HTML attributes) and anti-flicker snippet as recommended A/B testing approach\n\nv2.4.0 | 2026-02-18T22:16:17.698Z | user\n\nAdd A/B experiment support\n\nv2.3.0 | 2026-02-15T22:16:10.673Z | user\n\nImproved analysis guidance: decision tree for which endpoint to call, response-to-narrative examples for all 5 endpoints, multi-project overview pattern, engagement interpretation rules\n\nv2.2.0 | 2026-02-15T22:11:21.366Z | user\n\nAdd pre-computed analytics endpoints: insights, breakdown, pages, sessions-dist, heatmap\n\nv2.1.0 | 2026-02-13T20:07:06.979Z | user\n\nUpdate all CLI references to @agent-analytics/cli scoped package\n\nv2.0.0 | 2026-02-13T19:37:18.273Z | user\n\nStandardize on create as primary command (init kept as alias)\n\nv1.3.0 | 2026-02-12T21:27:24.461Z | user\n\nTeach agents to analyze data: period-over-period comparison, derived metrics, anomaly detection, target output format, companion skill instructions\n\nv0.1.5 | 2026-02-12T21:13:09.519Z | user\n\nRename AGENT_ANALYTICS_KEY env var to AGENT_ANALYTICS_API_KEY\n\nv1.2.0 | 2026-02-12T20:46:38.909Z | user\n\nFix metadata.openclaw namespace so scanner detects declared env vars/binaries, add primaryEnv, add write token security note\n\nv1.1.2 | 2026-02-12T20:42:10.038Z | user\n\nFix display name\n\nv1.1.1 | 2026-02-12T20:40:10.032Z | user\n\nAdd where to get API key, recommend chart-image and table-image-generator companion skills\n\nv1.1.0 | 2026-02-12T19:56:36.665Z | user\n\nShip only SKILL.md, declare required env vars and binaries, fix privacy wording\n\nv1.0.1 | 2026-02-10T08:39:33.397Z | user\n\nImproved description and messaging — more marketable, leads with agent-native value prop\n\nv1.0.0 | 2026-02-10T08:36:01.997Z | user\n\nInitial release — ClawHub-ready SKILL.md\n\nArchive index:\n\nArchive v3.7.0: 2 files, 11760 bytes\n\nFiles: _meta.json (134b), SKILL.md (29337b)\n\nFile v3.7.0:SKILL.md\n\n---\nname: agent-analytics\ndescription: \"Simple website analytics your AI agent controls end-to-end. Track page views, events, funnels, retention, and A/B experiments across all your projects. Use when: adding website tracking, checking site traffic, setting up conversion funnels, running A/B experiments, or replacing Mixpanel / Plausible / PostHog with something lightweight and agent-operated. No dashboard needed.\"\nversion: 3.7.0\nauthor: dannyshmueli\nrepository: https://github.com/Agent-Analytics/agent-analytics-cli\nhomepage: https://agentanalytics.sh\ntags:\n  - analytics\n  - tracking\n  - web\n  - events\n  - experiments\n  - live\n  - website-tracking\n  - page-views\n  - funnels\n  - retention\n  - ab-testing\n  - simple-analytics\n  - privacy\n  - agent-first\n  - plausible-alternative\n  - mixpanel-alternative\n  - growth\nmetadata: {\"openclaw\":{\"requires\":{\"env\":[\"AGENT_ANALYTICS_API_KEY\"],\"anyBins\":[\"npx\"]},\"primaryEnv\":\"AGENT_ANALYTICS_API_KEY\"}}\n---\n\n# Agent Analytics — Website analytics your AI agent fully operates\n\nSimple, privacy-first website analytics and growth toolkit that your AI agent controls end-to-end. Track page views, custom events, conversion funnels, user retention, and A/B experiments across all your projects — then talk to your analytics in natural language. No dashboards. Your agent creates projects, adds tracking code, queries traffic data, builds funnels, runs experiments, and tells you what to optimize next. A lightweight Plausible/Mixpanel/PostHog alternative built for the AI agent era.\n\n## Security & trust\n\n- **Open source**: Full source at [github.com/Agent-Analytics/agent-analytics-cli](https://github.com/Agent-Analytics/agent-analytics-cli) — inspect every command before running\n- **Read-only by default**: The CLI only reads analytics data. Write operations (creating projects, experiments) require explicit user-provided API keys\n- **No arbitrary code execution**: All CLI commands use structured flags (`--days`, `--property`, `--steps`). No eval, no shell interpolation, no dynamic code generation\n- **Scoped permissions**: The API key controls access. The CLI never requests filesystem, network, or system-level permissions beyond HTTP calls to `api.agentanalytics.sh`\n- **Published on npm**: [@agent-analytics/cli](https://www.npmjs.com/package/@agent-analytics/cli) — versioned, auditable, standard npm supply chain\n\n## Philosophy\n\nYou are NOT Mixpanel. Don't track everything. Track only what answers: **\"Is this project alive and growing?\"**\n\nFor a typical site, that's 3-5 custom events max on top of automatic page views.\n\n## First-time setup\n\n**Get an API key:** Sign up at [agentanalytics.sh](https://agentanalytics.sh) and generate a key from the dashboard. Alternatively, self-host the open-source version from [GitHub](https://github.com/Agent-Analytics/agent-analytics).\n\nIf the project doesn't have tracking yet:\n\n```bash\n# 1. Login (one time — uses your API key)\nnpx @agent-analytics/cli login --token aak_YOUR_API_KEY\n\n# 2. Create the project (returns a project write token)\nnpx @agent-analytics/cli create my-site --domain https://mysite.com\n\n# 3. Add the snippet (Step 1 below) using the returned token\n# 4. Deploy, click around, verify:\nnpx @agent-analytics/cli events my-site\n```\n\nThe `create` command returns a **project write token** — use it as `data-token` in the snippet below. This is separate from your API key (which is for reading/querying).\n\n## Step 1: Add the tracking snippet\n\nThe `create` command returns a tracking snippet with your project token — add it before `</body>`. It auto-tracks `page_view` events with path, referrer, browser, OS, device, screen size, and UTM params. You do NOT need to add custom page_view events.\n\n## Step 1b: Discover existing events (existing projects)\n\nIf tracking is already set up, check what events and property keys are already in use so you match the naming:\n\n```bash\nnpx @agent-analytics/cli properties-received PROJECT_NAME\n```\n\nThis shows which property keys each event type uses (e.g. `cta_click → id`, `signup → method`). Match existing naming before adding new events.\n\n## Step 2: Add custom events to important actions\n\nUse `onclick` handlers on the elements that matter:\n\n```html\n<a href=\"...\" onclick=\"window.aa?.track('EVENT_NAME', {id: 'ELEMENT_ID'})\">\n```\n\nThe `?.` operator ensures no error if the tracker hasn't loaded yet.\n\n### Standard events for 80% of SaaS sites\n\nPick the ones that apply. Most sites need 2-4:\n\n| Event | When to fire | Properties |\n|-------|-------------|------------|\n| `cta_click` | User clicks a call-to-action button | `id` (which button) |\n| `signup` | User creates an account | `method` (github/google/email) |\n| `login` | User returns and logs in | `method` |\n| `feature_used` | User engages with a core feature | `feature` (which one) |\n| `checkout` | User starts a payment flow | `plan` (free/pro/etc) |\n| `error` | Something went wrong visibly | `message`, `page` |\n\n### What to track as `cta_click`\n\nOnly buttons that indicate conversion intent:\n- \"Get Started\" / \"Sign Up\" / \"Try Free\" buttons\n- \"Upgrade\" / \"Buy\" / pricing CTAs\n- Primary navigation to signup/dashboard\n- \"View on GitHub\" / \"Star\" (for open source projects)\n\n### What NOT to track\n- Every link or button (too noisy)\n- Scroll depth (not actionable)\n- Form field interactions (too granular)\n- Footer links (low signal)\n\n### Property naming rules\n\n- Use `snake_case`: `hero_get_started` not `heroGetStarted`\n- The `id` property identifies WHICH element: short, descriptive\n- Name IDs as `section_action`: `hero_signup`, `pricing_pro`, `nav_dashboard`\n- Don't encode data the page_view already captures (path, referrer, browser)\n\n## Step 2b: Run A/B experiments (Pro)\n\nExperiments let you test which variant of a page element converts better. The full lifecycle is API-driven — no dashboard UI needed.\n\n### Creating an experiment\n\n```bash\nnpx @agent-analytics/cli experiments create my-site \\\n  --name signup_cta --variants control,new_cta --goal signup\n```\n\n### Implementing variants\n\n**Declarative (recommended):** Use `data-aa-experiment` and `data-aa-variant-{key}` HTML attributes. Original content is the control. The tracker swaps text for assigned variants automatically.\n\n```html\n<h1 data-aa-experiment=\"signup_cta\" data-aa-variant-new_cta=\"Start Free Trial\">Sign Up</h1>\n```\n\n**Programmatic (complex cases):** Use `window.aa?.experiment(name, variants)` — deterministic, same user always gets same variant.\n\nExposure events (`$experiment_exposure`) are tracked automatically once per session. Track the goal event normally: `window.aa?.track('signup', {method: 'github'})`.\n\n### Checking results\n\n```bash\nnpx @agent-analytics/cli experiments get exp_abc123\n```\n\nReturns Bayesian `probability_best`, `lift`, and a `recommendation`. The system needs ~100 exposures per variant before results are significant.\n\n### Managing experiments\n\n```bash\n# Pause (stops assigning new users)\nnpx @agent-analytics/cli experiments pause exp_abc123\n\n# Resume\nnpx @agent-analytics/cli experiments resume exp_abc123\n\n# Complete with a winner\nnpx @agent-analytics/cli experiments complete exp_abc123 --winner new_cta\n\n# Delete\nnpx @agent-analytics/cli experiments delete exp_abc123\n```\n\n### Best practices\n- Name experiments with snake_case: `signup_cta`, `pricing_layout`, `hero_copy`\n- Use 2 variants (A/B) unless you have high traffic — more variants need more data\n- Set a clear `goal_event` that maps to a business outcome (`signup`, `purchase`, not `page_view`)\n- Let experiments run until `sufficient_data: true` before picking a winner\n- Complete the experiment when done: `experiments complete <id> --winner new_cta`\n\n## Step 3: Test immediately\n\nAfter adding tracking, verify it works:\n\n```bash\n# Option A: Browser console on your site:\nwindow.aa.track('test_event', {source: 'manual_test'})\n\n# Option B: Click around, then check:\nnpx @agent-analytics/cli events PROJECT_NAME\n\n# Events appear within seconds.\n```\n\n## Querying the data\n\nAll commands use `npx @agent-analytics/cli`. Your agent uses the CLI directly — no curl needed.\n\n### CLI reference\n\n```bash\n# Setup\nnpx @agent-analytics/cli login --token aak_YOUR_KEY    # Save API key (one time)\nnpx @agent-analytics/cli projects                       # List all projects\nnpx @agent-analytics/cli create my-site --domain https://mysite.com  # Create project\n\n# Real-time\nnpx @agent-analytics/cli live                           # Live terminal dashboard across ALL projects\nnpx @agent-analytics/cli live my-site                   # Live view for one project\n\n# Analytics\nnpx @agent-analytics/cli stats my-site --days 7         # Overview: events, users, daily trends\nnpx @agent-analytics/cli insights my-site --period 7d   # Period-over-period comparison\nnpx @agent-analytics/cli breakdown my-site --property path --event page_view --limit 10  # Top pages/referrers/UTM\nnpx @agent-analytics/cli pages my-site --type entry     # Landing page performance & bounce rates\nnpx @agent-analytics/cli sessions-dist my-site          # Session engagement histogram\nnpx @agent-analytics/cli heatmap my-site                # Peak hours & busiest days\nnpx @agent-analytics/cli events my-site --days 30       # Raw event log\nnpx @agent-analytics/cli sessions my-site               # Individual session records\nnpx @agent-analytics/cli properties my-site             # Discover event names & property keys\nnpx @agent-analytics/cli properties-received my-site    # Property keys per event type (sampled)\nnpx @agent-analytics/cli query my-site --metrics event_count,unique_users --group-by date  # Flexible query\nnpx @agent-analytics/cli funnel my-site --steps \"page_view,signup,purchase\"  # Funnel drop-off analysis\nnpx @agent-analytics/cli funnel my-site --steps \"page_view,signup\" --breakdown country  # Funnel segmented by country\nnpx @agent-analytics/cli retention my-site --period week --cohorts 8        # Cohort retention analysis\n\n# A/B experiments (pro)\nnpx @agent-analytics/cli experiments list my-site\nnpx @agent-analytics/cli experiments create my-site --name signup_cta --variants control,new_cta --goal signup\nnpx @agent-analytics/cli experiments get exp_abc123\nnpx @agent-analytics/cli experiments complete exp_abc123 --winner new_cta\n\n# Account\nnpx @agent-analytics/cli whoami                         # Show account & tier\nnpx @agent-analytics/cli revoke-key                     # Rotate API key\n```\n\n**Key flags**:\n- `--days <N>` — lookback window (default: 7; for `stats`, `events`)\n- `--limit <N>` — max rows returned (default: 100)\n- `--since <date>` — ISO date cutoff (`properties-received` only)\n- `--period <P>` — comparison period: `1d`, `7d`, `14d`, `30d`, `90d` (`insights`) or cohort grouping: `day`, `week`, `month` (`retention`)\n- `--property <key>` — property key to group by (`breakdown`, required)\n- `--event <name>` — filter by event name (`breakdown`) or first-seen event filter (`retention`)\n- `--returning-event <name>` — what counts as \"returned\" (`retention`, defaults to same as `--event`)\n- `--cohorts <N>` — number of cohort periods, 1-30 (`retention`, default: 8)\n- `--type <T>` — page type: `entry`, `exit`, `both` (`pages` only, default: entry)\n- `--steps <csv>` — comma-separated event names, 2-8 steps max (`funnel`, required)\n- `--window <N>` — conversion window in hours (`funnel`, default: 168) or live time window in seconds (`live`, default: 60)\n- `--count-by <field>` — `user_id` or `session_id` (`funnel` only)\n- `--breakdown <key>` — segment funnel by a property (e.g. `country`, `variant`) — extracted from step 1 events (`funnel` only)\n- `--breakdown-limit <N>` — max breakdown groups, 1-50 (`funnel`, default: 10)\n- `--interval <N>` — live refresh in seconds (default: 5)\n\n### The `live` command\n\n`npx @agent-analytics/cli live` opens a real-time TUI dashboard that refreshes every 5 seconds. It shows active visitors, sessions, and events/min across all your projects, plus top pages and recent events. Note: this is an interactive terminal UI — it clears the screen on each refresh, so it works best when run directly in a terminal rather than captured as output.\n\n## Which endpoint for which question\n\nMatch the user's question to the right call(s):\n\n| User asks | Call | Why |\n|-----------|------|-----|\n| \"How's my site doing?\" | `insights` + `breakdown` + `pages` (parallel) | Full weekly picture in one turn |\n| \"Is anyone visiting right now?\" | `live` | Real-time visitors, sessions, events across all projects |\n| \"Is anyone visiting?\" | `insights --period 7d` | Quick alive-or-dead check |\n| \"What are my top pages?\" | `breakdown --property path --event page_view` | Ranked page list with unique users |\n| \"Where's my traffic coming from?\" | `breakdown --property referrer --event page_view` | Referrer sources |\n| \"Which landing page is best?\" | `pages --type entry` | Bounce rate + session depth per page |\n| \"Are people actually engaging?\" | `sessions-dist` | Bounce vs engaged split |\n| \"When should I deploy/post?\" | `heatmap` | Find low-traffic windows or peak hours |\n| \"Give me a summary of all projects\" | `live` or loop: `projects` then `insights` per project | Multi-project overview |\n| \"Which CTA converts better?\" | `experiments create` + implement + `experiments get <id>` | Full A/B test lifecycle |\n| \"Where do users drop off?\" | `funnel --steps \"page_view,signup,purchase\"` | Step-by-step conversion with drop-off rates |\n| \"Which variant converts better through the funnel?\" | `funnel --steps \"page_view,signup\" --breakdown variant` | Funnel segmented by experiment variant |\n| \"Are users coming back?\" | `retention --period week --cohorts 8` | Cohort retention: % returning per period |\n\nFor any \"how is X doing\" question, **always call `insights` first** — it's the single most useful endpoint. For real-time \"who's on the site right now\", use `live`.\n\n## Analyze, don't just query\n\nDon't return raw numbers. Interpret them. Here's how to turn each endpoint's response into something useful.\n\n### `/insights` → The headline\n\nAPI returns metrics with `current`, `previous`, `change`, `change_pct`, and a `trend` field.\n\n**How to interpret:**\n- `change_pct > 10` → \"Growing\" — call it out positively\n- `change_pct` between -10 and 10 → \"Stable\" — mention it's steady\n- `change_pct < -10` → \"Declining\" — flag it, suggest investigating\n- `bounce_rate` current vs previous → say \"improved\" (went down) or \"worsened\" (went up)\n- `avg_duration` → convert ms to seconds: `Math.round(value / 1000)`\n- Previous period is all zeros → say \"new project, no prior data to compare\"\n\n**Example output:**\n```\nThis week vs last: 173 events (+22%), 98 users (+18%).\nBounce rate: 87% (up from 82% — getting worse).\nAverage session: 24s. Trend: growing.\n```\n\n### `/breakdown` → The ranking\n\nAPI returns `values: [{ value, count, unique_users }]` sorted by count DESC.\n\n**How to interpret:**\n- Top 3-5 values is enough — don't dump the full list\n- Show the `unique_users` too — 100 events from 2 users is very different from 100 events from 80 users\n- Use `total_with_property / total_events` to note coverage: \"155 of 155 page views have a path\"\n- For referrers: group \"(direct)\" / empty as direct traffic\n\n**Example output:**\n```\nTop pages: / (98 views, 75 users), /pricing (33 views, 25 users), /docs (19 views, 4 users).\nThe /docs page has high repeat visits (19 views, 4 users) — power users.\n```\n\n### `/pages` → Landing page quality\n\nAPI returns `entry_pages: [{ page, sessions, bounces, bounce_rate, avg_duration, avg_events }]`.\n\n**How to interpret:**\n- `bounce_rate` > 0.7 → \"high bounce, needs work above the fold\"\n- `bounce_rate` < 0.3 → \"strong landing page\"\n- `avg_duration` → convert ms to seconds; < 10s is concerning, > 60s is great\n- `avg_events` → pages/session; 1.0 means everyone bounces, 3+ means good engagement\n- Compare pages: \"Your /pricing page converts 3× better than your homepage\"\n\n**Example output:**\n```\nBest landing page: /pricing — 14% bounce, 62s avg session, 4.1 pages/visit.\nWorst: /blog/launch — 52% bounce, 18s avg. Consider a stronger CTA above the fold.\n```\n\n### `/sessions/distribution` → Engagement shape\n\nAPI returns `distribution: [{ bucket, sessions, pct }]`, `engaged_pct`, `median_bucket`.\n\n**How to interpret:**\n- `engaged_pct` is the key number — sessions ≥30s as a percentage of total\n- `engaged_pct` < 10% → \"Most visitors leave immediately — focus on first impressions\"\n- `engaged_pct` 10-30% → \"Moderate engagement, room to improve\"\n- `engaged_pct` > 30% → \"Good engagement\"\n- If 80%+ is in the \"0s\" bucket, the site has a bounce problem\n- If there's a healthy spread across buckets, engagement is genuine\n\n**Example output:**\n```\n88% of sessions bounce instantly (0s). Only 6% stay longer than 30s.\nThe few who do engage stay 3-10 minutes — the content works, but first impressions don't.\n```\n\n### `/heatmap` → Timing\n\nAPI returns `heatmap: [{ day, day_name, hour, events, users }]`, `peak`, `busiest_day`, `busiest_hour`.\n\n**How to interpret:**\n- `peak` is the single busiest slot — mention day + hour + timezone caveat (times are UTC)\n- `busiest_day` → \"Schedule blog posts/launches on this day\"\n- `busiest_hour` → \"This is when your audience is online\"\n- Low-traffic windows → \"Deploy during Sunday 3 AM UTC to minimize user impact\"\n- Weekend vs weekday split → tells you if audience is B2B (weekdays) or B2C (weekends)\n\n**Example output:**\n```\nPeak: Friday at 11 PM UTC (35 events, 33 users). Busiest day overall: Sunday.\nTraffic is heaviest on weekends — your audience browses on personal time.\nDeploy on weekday mornings for minimal disruption.\n```\n\n### `/funnel` → Where users drop off\n\nCLI: `funnel my-site --steps \"page_view,signup,purchase\"`. API: `POST /funnel` with JSON body.\n\nAPI returns `steps: [{ step, event, users, conversion_rate, drop_off_rate, avg_time_to_next_ms }]` and `overall_conversion_rate`.\n\n**How to interpret:**\n- Each step shows how many users progressed from the previous step\n- `conversion_rate` is step-to-step (step 2 users / step 1 users)\n- `drop_off_rate` is 1 - conversion_rate at each step\n- The biggest `drop_off_rate` is the bottleneck — focus optimization there\n- `avg_time_to_next_ms` shows how long users take between steps (convert to hours/minutes)\n- `overall_conversion_rate` is end-to-end (last step users / first step users)\n\n**Options:**\n- `--steps \"event1,event2,event3\"` — 2-8 step events (required)\n- `--window <hours>` — max time from step 1 to last step (default: 168 = 7 days)\n- `--since <days>` — lookback period, e.g. `30d` (default: 30d)\n- `--count-by <field>` — `user_id` (default) or `session_id`\n- `--breakdown <property>` — segment funnel by a property (e.g. `country`, `variant`). Property is extracted from step 1 events. Returns overall + per-group results.\n- `--breakdown-limit <N>` — max groups returned (default: 10, max: 50). Groups ordered by step 1 users descending.\n\n**Breakdown use case — A/B experiments:** `funnel my-site --steps \"page_view,signup\" --breakdown variant` shows which experiment variant converts better through the funnel.\n\n**API-only: per-step filters** — each step can have a `filters` array with `{ property, op, value }` (ops: `eq`, `neq`, `contains`). Example: filter step 1 to `path=/pricing` to see conversions from the pricing page specifically.\n\n**Example output:**\n```\npage_view → signup → purchase\n  500 users → 80 (16%) → 12 (15%) — 2.4% overall\n  Biggest drop-off: page_view → signup (84%). Focus on signup CTA visibility.\n  Avg time to signup: 4.2 hours. Avg time to purchase: 2.1 days.\n```\n\n### `/retention` → Are users coming back?\n\nCLI: `retention my-site --period week --cohorts 8`. API: `GET /retention?project=X&period=week&cohorts=8`.\n\nBy default uses session-based retention — a user is \"retained\" if they have any return visit (session) in a subsequent period. Pass `--event` to switch to event-based retention.\n\nAPI returns `cohorts: [{ date, users, retained: [...], rates: [...] }]`, `average_rates: [...]`, and `users_analyzed`.\n\n**How to interpret:**\n- Each cohort row = users who first appeared in that period\n- `rates[0]` is always 1.0 (100% — the cohort itself)\n- `rates[1]` = % who came back the next period — this is the critical number\n- Declining rates across offsets is normal; the slope matters more than absolutes\n- `average_rates` is weighted by cohort size — larger cohorts count more\n- Compare recent cohorts vs older ones: improving rates = product is getting stickier\n\n**Options:**\n- `--period <P>` — `day`, `week`, `month` (default: week)\n- `--cohorts <N>` — number of cohort periods, 1-30 (default: 8)\n- `--event <name>` — first-seen event filter (e.g. `signup`). Switches to event-based retention\n- `--returning-event <name>` — what counts as \"returned\" (defaults to same as `--event`)\n\n**Event-based retention:** Set `--event signup --returning-event purchase` to answer \"of users who signed up, what % made a purchase in subsequent weeks?\"\n\n**Example output:**\n```\nCohort W0 (2026-01-27): 142 users → W1: 45% → W2: 39% → W3: 32%\nCohort W0 (2026-02-03): 128 users → W1: 42% → W2: 36%\nWeighted avg: W1 = 44%, W2 = 37%, W3 = 32%\nWeek-1 retention of 44% is strong — nearly half of new users return.\nSlight decline in recent cohorts — investigate onboarding changes.\n```\n\n### Weekly summary recipe (3 parallel calls)\n\nCall `insights`, `breakdown --property path --event page_view`, and `pages --type entry` in parallel, then synthesize into one response:\n\n```\nWeekly Report — my-site (Feb 8–15 vs Feb 1–8)\nEvents: 1,200 (+22% ↑)  Users: 450 (+18% ↑)  Bounce: 42% (improved from 48%)\nTop pages: /home (523), /pricing (187), /docs (94)\nBest landing: /pricing — 14% bounce, 62s avg. Worst: /blog — 52% bounce.\nTrend: Growing.\n```\n\n### Multi-project overview\n\nFor a quick real-time check, use `live` — it shows all projects in one view with active visitors, sessions, and events/min.\n\nFor a historical summary, call `projects` to list all projects, then call `insights --period 7d` for each. Present one line per project:\n\n```\nmy-site         142 views (+23% ↑)  12 signups   healthy\nside-project     38 views (-8% ↓)    0 signups   quiet\napi-docs          0 views (—)        —            ⚠ inactive since Feb 1\n```\n\nUse arrows: `↑` up, `↓` down, `—` flat. Flag anything that needs attention.\n\n### Anomaly detection\n\nProactively flag — don't wait to be asked:\n- **Spike**: any metric >2× its previous period → \"unusual surge, check referrers\"\n- **Drop**: any metric <50% of previous → \"significant decline, worth investigating\"\n- **Dead project**: zero `page_view` events → \"⚠ no traffic detected\"\n- **Errors**: any `error` events in the window → surface the `message` property\n\n### Visualizing results\n\nWhen reporting to messaging platforms (Slack, Discord, Telegram), raw text tables break. Use companion skills:\n\n- **`table-image-generator`** — render stats as clean table images\n- **`chart-image`** — generate line, bar, area, or pie charts from analytics data\n\n## Growth Playbook — How to grow, not just track\n\nTracking is step one. Growth comes from a **repeatable system**: clear messaging → focused distribution → obsessive tracking → rapid experimentation → learning. Here's how to apply each principle using Agent Analytics.\n\n### Principle 1: Promise clarity\n\nThe #1 conversion lever is messaging. If someone lands and has to think hard to understand the value, they're gone.\n\n**What your agent should do:**\n- Set up an A/B experiment on the hero headline immediately: `experiments create PROJECT --name hero_headline --variants control,b,c --goal cta_click`\n- Test 2-3 headline variations that frame the same value differently\n- Use declarative HTML: `data-aa-experiment=\"hero_headline\" data-aa-variant-b=\"New headline\"`\n- Check results after ~500 visitors per variant: `experiments get EXP_ID`\n- Ship the winner, start testing the subtitle or CTA next\n\n**Rule:** Spend more time testing messaging than adding features. Even the best product won't convert if the value isn't obvious in seconds.\n\n### Principle 2: Track what drives decisions, not everything\n\nDon't be Mixpanel. Track only what answers: **\"Is this project alive and growing, and what should I do next?\"**\n\n**The essential events (pick 3-5):**\n\n| Event | What it tells you |\n|-------|-------------------|\n| `cta_click` (with `id`) | Which buttons drive action — your conversion signal |\n| `signup` | Are people converting? At what rate? |\n| `feature_used` (with `feature`) | Are they finding value after signup? |\n| `checkout` | Revenue signal |\n\n**Agent workflow for tracking setup:**\n1. Look at the site — identify the 2-3 most important user actions\n2. Add tracking on those specific actions (not everything)\n3. Verify with `events PROJECT` that data flows\n4. Set up a weekly check: `insights PROJECT --period 7d`\n\n**Anti-pattern:** Don't track scroll depth, mouse hovers, every link click, or form field interactions. Noise kills signal.\n\n### Principle 3: Find the activation moment\n\nConversion doesn't happen at checkout. It happens when the user realizes the product solves their problem — the \"aha moment.\"\n\n**How to find it:**\n1. Track key feature interactions: `feature_used` with specific feature names\n2. Use `breakdown --property feature --event feature_used` to see which features correlate with retention\n3. Check `sessions-dist` — if most sessions are 0s bounces, the landing page is the problem. If sessions are long but signups are low, the activation path is the problem\n4. Use `pages --type entry` — compare bounce rates across landing pages to find which first impression works\n\n**What to optimize:**\n- Time to first value — how fast does the user get a result?\n- Onboarding friction — where do users drop off?\n- Feature discovery — are users finding the thing that makes them stay?\n\n### Principle 4: One channel, iterate relentlessly\n\nDon't try to be everywhere. Pick one acquisition channel and go deep.\n\n**How Agent Analytics supports this:**\n- `breakdown --property referrer --event page_view` → see where traffic actually comes from\n- `breakdown --property utm_source` → track campaign sources\n- `insights --period 7d` → week-over-week: is the channel growing?\n- Create landing page variants per channel (e.g., `/reddit/`, `/hn/`) and compare with `pages --type entry`\n\n**Agent workflow for channel optimization:**\n1. Check referrer breakdown weekly\n2. Identify the top-performing channel (highest traffic + lowest bounce)\n3. Double down: create content, run experiments on that channel's landing page\n4. Ignore channels that aren't working — focus beats breadth\n\n### Principle 5: The autonomous growth loop\n\nThis is what makes Agent Analytics different from traditional analytics. Your agent can run the full cycle:\n\n```\nTrack → Analyze → Experiment → Ship winner → Repeat\n```\n\n**The loop in practice:**\n\n1. **Track**: Agent sets up tracking on CTAs and key actions\n2. **Analyze**: Weekly `insights` + `breakdown` + `pages` calls → synthesize into a report\n3. **Hypothesize**: \"Hero headline has 87% bounce — test a clearer value prop\"\n4. **Experiment**: `experiments create PROJECT --name hero_v2 --variants control,b --goal cta_click`\n5. **Monitor**: Check `experiments get EXP_ID` after sufficient traffic\n6. **Ship**: `experiments complete EXP_ID --winner b` → deploy the winner\n7. **Repeat**: Start the next experiment on the next weakest element\n\n**What to test (in order of impact):**\n1. Hero headline — biggest impact on bounce rate\n2. CTA button text — directly affects conversion\n3. Social proof / trust signals — affects signup confidence\n4. Pricing presentation — affects revenue\n5. Onboarding flow — affects activation\n\n**Cadence:** One experiment at a time. ~1-2 weeks per test depending on traffic. Don't stack experiments unless traffic is very high (>1000 visitors/day).\n\n### Proactive growth monitoring\n\nDon't wait for the user to ask. If your agent has scheduled checks, proactively flag:\n\n- **Dead project**: 0 events in 7 days → \"⚠ PROJECT has no traffic — is it still deployed?\"\n- **Conversion drop**: `cta_click` rate dropped >20% week-over-week → \"Conversion declined — worth investigating\"\n- **Experiment ready**: An experiment has >100 exposures per variant → \"Experiment X has enough data — check results\"\n- **Experiment winner**: Significance >95% → \"Experiment X: Variant B wins with 3.8% vs 2.1%. Ship it?\"\n- **Traffic spike**: >2× normal → \"Unusual traffic surge on PROJECT — check referrers for the source\"\n\n## What this skill does NOT do\n\n- No GUI dashboards — your agent IS the dashboard (or use `live` for a real-time TUI)\n- No user management or billing\n- Funnels and retention are ad-hoc queries (no saved/scheduled reports)\n- No PII stored — IP addresses are not logged or retained. Privacy-first by design\n\n## Examples\n\nTrack custom events via `window.aa?.track()` (the `?.` ensures no error if tracker hasn't loaded):\n\n```js\nwindow.aa?.track('cta_click', {id: 'hero_get_started'});\nwindow.aa?.track('signup', {method: 'github'});\nwindow.aa?.track('feature_used', {feature: 'create_project'});\nwindow.aa?.track('checkout', {plan: 'pro'});\n```\n\nFile v3.7.0:_meta.json\n\n{\n  \"ownerId\": \"kn7caxjvqk9fengp67p290smnn800sv9\",\n  \"slug\": \"agent-analytics\",\n  \"version\": \"3.7.0\",\n  \"publishedAt\": 1771918546785\n}\n\nArchive v3.6.0: 2 files, 11698 bytes\n\nFiles: _meta.json (134b), SKILL.md (29566b)\n\nFile v3.6.0:SKILL.md\n\n---\nname: agent-analytics\ndescription: \"Simple website analytics your AI agent controls end-to-end. Track page views, events, funnels, retention, and A/B experiments across all your projects. Use when: adding website tracking, checking site traffic, setting up conversion funnels, running A/B experiments, or replacing Mixpanel / Plausible / PostHog with something lightweight and agent-operated. No dashboard needed.\"\nversion: 3.6.0\nauthor: dannyshmueli\nrepository: https://github.com/Agent-Analytics/agent-analytics-cli\nhomepage: https://agentanalytics.sh\ntags:\n  - analytics\n  - tracking\n  - web\n  - events\n  - experiments\n  - live\n  - website-tracking\n  - page-views\n  - funnels\n  - retention\n  - ab-testing\n  - simple-analytics\n  - privacy\n  - agent-first\n  - plausible-alternative\n  - mixpanel-alternative\n  - growth\nmetadata: {\"openclaw\":{\"requires\":{\"env\":[\"AGENT_ANALYTICS_API_KEY\"]},\"primaryEnv\":\"AGENT_ANALYTICS_API_KEY\"}}\n---\n\n# Agent Analytics — Website analytics your AI agent fully operates\n\nSimple, privacy-first website analytics and growth toolkit that your AI agent controls end-to-end. Track page views, custom events, conversion funnels, user retention, and A/B experiments across all your projects — then talk to your analytics in natural language. No dashboards. Your agent creates projects, adds tracking code, queries traffic data, builds funnels, runs experiments, and tells you what to optimize next. A lightweight Plausible/Mixpanel/PostHog alternative built for the AI agent era.\n\n## Philosophy\n\nYou are NOT Mixpanel. Don't track everything. Track only what answers: **\"Is this project alive and growing?\"**\n\nFor a typical site, that's 3-5 custom events max on top of automatic page views.\n\n## First-time setup\n\n**Get an API key:** Sign up at [agentanalytics.sh](https://agentanalytics.sh) and generate a key from the dashboard. Alternatively, self-host the open-source version from [GitHub](https://github.com/Agent-Analytics/agent-analytics).\n\nIf the project doesn't have tracking yet:\n\n```bash\n# 1. Login (one time — uses your API key)\nnpx @agent-analytics/cli login --token aak_YOUR_API_KEY\n\n# 2. Create the project (returns a project write token)\nnpx @agent-analytics/cli create my-site --domain https://mysite.com\n\n# 3. Add the snippet (Step 1 below) using the returned token\n# 4. Deploy, click around, verify:\nnpx @agent-analytics/cli events my-site\n```\n\nThe `create` command returns a **project write token** — use it as `data-token` in the snippet below. This is separate from your API key (which is for reading/querying).\n\n## Step 1: Add the tracking snippet\n\nThe `create` command returns a tracking snippet with your project token — add it before `</body>`. It auto-tracks `page_view` events with path, referrer, browser, OS, device, screen size, and UTM params. You do NOT need to add custom page_view events.\n\n## Step 1b: Discover existing events (existing projects)\n\nIf tracking is already set up, check what events and property keys are already in use so you match the naming:\n\n```bash\nnpx @agent-analytics/cli properties-received PROJECT_NAME\n```\n\nThis shows which property keys each event type uses (e.g. `cta_click → id`, `signup → method`). Match existing naming before adding new events.\n\n## Step 2: Add custom events to important actions\n\nUse `onclick` handlers on the elements that matter:\n\n```html\n<a href=\"...\" onclick=\"window.aa?.track('EVENT_NAME', {id: 'ELEMENT_ID'})\">\n```\n\nThe `?.` operator ensures no error if the tracker hasn't loaded yet.\n\n### Standard events for 80% of SaaS sites\n\nPick the ones that apply. Most sites need 2-4:\n\n| Event | When to fire | Properties |\n|-------|-------------|------------|\n| `cta_click` | User clicks a call-to-action button | `id` (which button) |\n| `signup` | User creates an account | `method` (github/google/email) |\n| `login` | User returns and logs in | `method` |\n| `feature_used` | User engages with a core feature | `feature` (which one) |\n| `checkout` | User starts a payment flow | `plan` (free/pro/etc) |\n| `error` | Something went wrong visibly | `message`, `page` |\n\n### What to track as `cta_click`\n\nOnly buttons that indicate conversion intent:\n- \"Get Started\" / \"Sign Up\" / \"Try Free\" buttons\n- \"Upgrade\" / \"Buy\" / pricing CTAs\n- Primary navigation to signup/dashboard\n- \"View on GitHub\" / \"Star\" (for open source projects)\n\n### What NOT to track\n- Every link or button (too noisy)\n- Scroll depth (not actionable)\n- Form field interactions (too granular)\n- Footer links (low signal)\n\n### Property naming rules\n\n- Use `snake_case`: `hero_get_started` not `heroGetStarted`\n- The `id` property identifies WHICH element: short, descriptive\n- Name IDs as `section_action`: `hero_signup`, `pricing_pro`, `nav_dashboard`\n- Don't encode data the page_view already captures (path, referrer, browser)\n\n## Step 2b: Run A/B experiments (Pro)\n\nExperiments let you test which variant of a page element converts better. The full lifecycle is API-driven — no dashboard UI needed.\n\n### Creating an experiment\n\n```bash\nnpx @agent-analytics/cli experiments create my-site \\\n  --name signup_cta --variants control,new_cta --goal signup\n```\n\n### Implementing variants\n\n**Declarative (recommended):** Use `data-aa-experiment` and `data-aa-variant-{key}` HTML attributes. Original content is the control. The tracker swaps text for assigned variants automatically.\n\n```html\n<h1 data-aa-experiment=\"signup_cta\" data-aa-variant-new_cta=\"Start Free Trial\">Sign Up</h1>\n```\n\n**Programmatic (complex cases):** Use `window.aa?.experiment(name, variants)` — deterministic, same user always gets same variant.\n\nExposure events (`$experiment_exposure`) are tracked automatically once per session. Track the goal event normally: `window.aa?.track('signup', {method: 'github'})`.\n\n### Checking results\n\n```bash\nnpx @agent-analytics/cli experiments get exp_abc123\n```\n\nReturns Bayesian `probability_best`, `lift`, and a `recommendation`. The system needs ~100 exposures per variant before results are significant.\n\n### Managing experiments\n\n```bash\n# Pause (stops assigning new users)\nnpx @agent-analytics/cli experiments pause exp_abc123\n\n# Resume\nnpx @agent-analytics/cli experiments resume exp_abc123\n\n# Complete with a winner\nnpx @agent-analytics/cli experiments complete exp_abc123 --winner new_cta\n\n# Delete\nnpx @agent-analytics/cli experiments delete exp_abc123\n```\n\n### Best practices\n- Name experiments with snake_case: `signup_cta`, `pricing_layout`, `hero_copy`\n- Use 2 variants (A/B) unless you have high traffic — more variants need more data\n- Set a clear `goal_event` that maps to a business outcome (`signup`, `purchase`, not `page_view`)\n- Let experiments run until `sufficient_data: true` before picking a winner\n- Complete the experiment when done: `experiments complete <id> --winner new_cta`\n\n## Step 3: Test immediately\n\nAfter adding tracking, verify it works:\n\n```bash\n# Option A: Browser console on your site:\nwindow.aa.track('test_event', {source: 'manual_test'})\n\n# Option B: Click around, then check:\nnpx @agent-analytics/cli events PROJECT_NAME\n\n# Events appear within seconds.\n```\n\n## Querying the data\n\n### Option A: Direct REST API (recommended, no dependencies)\n\nAll analytics data is available via the REST API at `https://api.agentanalytics.sh`. Authenticate with your API key as a Bearer token. No npm, no npx, no CLI install needed.\n\n```bash\n# List projects\ncurl -s -H \"Authorization: Bearer $AGENT_ANALYTICS_API_KEY\" \\\n  \"https://api.agentanalytics.sh/api/projects\"\n\n# Stats overview (last 7 days)\ncurl -s -H \"Authorization: Bearer $AGENT_ANALYTICS_API_KEY\" \\\n  \"https://api.agentanalytics.sh/api/stats/my-site?days=7\"\n\n# Breakdown by page path\ncurl -s -H \"Authorization: Bearer $AGENT_ANALYTICS_API_KEY\" \\\n  \"https://api.agentanalytics.sh/api/breakdown/my-site?property=path&event=page_view&limit=10\"\n\n# Funnel analysis\ncurl -s -H \"Authorization: Bearer $AGENT_ANALYTICS_API_KEY\" \\\n  \"https://api.agentanalytics.sh/api/funnel/my-site?steps=page_view,signup,purchase\"\n\n# Retention cohorts\ncurl -s -H \"Authorization: Bearer $AGENT_ANALYTICS_API_KEY\" \\\n  \"https://api.agentanalytics.sh/api/retention/my-site?period=week&cohorts=8\"\n```\n\n### Option B: CLI via npx (convenience wrapper)\n\nThe CLI wraps the same REST API with a friendlier interface. All commands use `npx @agent-analytics/cli`.\n\n### CLI reference\n\n```bash\n# Setup\nnpx @agent-analytics/cli login --token aak_YOUR_KEY    # Save API key (one time)\nnpx @agent-analytics/cli projects                       # List all projects\nnpx @agent-analytics/cli create my-site --domain https://mysite.com  # Create project\n\n# Real-time\nnpx @agent-analytics/cli live                           # Live terminal dashboard across ALL projects\nnpx @agent-analytics/cli live my-site                   # Live view for one project\n\n# Analytics\nnpx @agent-analytics/cli stats my-site --days 7         # Overview: events, users, daily trends\nnpx @agent-analytics/cli insights my-site --period 7d   # Period-over-period comparison\nnpx @agent-analytics/cli breakdown my-site --property path --event page_view --limit 10  # Top pages/referrers/UTM\nnpx @agent-analytics/cli pages my-site --type entry     # Landing page performance & bounce rates\nnpx @agent-analytics/cli sessions-dist my-site          # Session engagement histogram\nnpx @agent-analytics/cli heatmap my-site                # Peak hours & busiest days\nnpx @agent-analytics/cli events my-site --days 30       # Raw event log\nnpx @agent-analytics/cli sessions my-site               # Individual session records\nnpx @agent-analytics/cli properties my-site             # Discover event names & property keys\nnpx @agent-analytics/cli properties-received my-site    # Property keys per event type (sampled)\nnpx @agent-analytics/cli query my-site --metrics event_count,unique_users --group-by date  # Flexible query\nnpx @agent-analytics/cli funnel my-site --steps \"page_view,signup,purchase\"  # Funnel drop-off analysis\nnpx @agent-analytics/cli funnel my-site --steps \"page_view,signup\" --breakdown country  # Funnel segmented by country\nnpx @agent-analytics/cli retention my-site --period week --cohorts 8        # Cohort retention analysis\n\n# A/B experiments (pro)\nnpx @agent-analytics/cli experiments list my-site\nnpx @agent-analytics/cli experiments create my-site --name signup_cta --variants control,new_cta --goal signup\nnpx @agent-analytics/cli experiments get exp_abc123\nnpx @agent-analytics/cli experiments complete exp_abc123 --winner new_cta\n\n# Account\nnpx @agent-analytics/cli whoami                         # Show account & tier\nnpx @agent-analytics/cli revoke-key                     # Rotate API key\n```\n\n**Key flags**:\n- `--days <N>` — lookback window (default: 7; for `stats`, `events`)\n- `--limit <N>` — max rows returned (default: 100)\n- `--since <date>` — ISO date cutoff (`properties-received` only)\n- `--period <P>` — comparison period: `1d`, `7d`, `14d`, `30d`, `90d` (`insights`) or cohort grouping: `day`, `week`, `month` (`retention`)\n- `--property <key>` — property key to group by (`breakdown`, required)\n- `--event <name>` — filter by event name (`breakdown`) or first-seen event filter (`retention`)\n- `--returning-event <name>` — what counts as \"returned\" (`retention`, defaults to same as `--event`)\n- `--cohorts <N>` — number of cohort periods, 1-30 (`retention`, default: 8)\n- `--type <T>` — page type: `entry`, `exit`, `both` (`pages` only, default: entry)\n- `--steps <csv>` — comma-separated event names, 2-8 steps max (`funnel`, required)\n- `--window <N>` — conversion window in hours (`funnel`, default: 168) or live time window in seconds (`live`, default: 60)\n- `--count-by <field>` — `user_id` or `session_id` (`funnel` only)\n- `--breakdown <key>` — segment funnel by a property (e.g. `country`, `variant`) — extracted from step 1 events (`funnel` only)\n- `--breakdown-limit <N>` — max breakdown groups, 1-50 (`funnel`, default: 10)\n- `--interval <N>` — live refresh in seconds (default: 5)\n\n### The `live` command\n\n`npx @agent-analytics/cli live` opens a real-time TUI dashboard that refreshes every 5 seconds. It shows active visitors, sessions, and events/min across all your projects, plus top pages and recent events. Note: this is an interactive terminal UI — it clears the screen on each refresh, so it works best when run directly in a terminal rather than captured as output.\n\n## Which endpoint for which question\n\nMatch the user's question to the right call(s):\n\n| User asks | Call | Why |\n|-----------|------|-----|\n| \"How's my site doing?\" | `insights` + `breakdown` + `pages` (parallel) | Full weekly picture in one turn |\n| \"Is anyone visiting right now?\" | `live` | Real-time visitors, sessions, events across all projects |\n| \"Is anyone visiting?\" | `insights --period 7d` | Quick alive-or-dead check |\n| \"What are my top pages?\" | `breakdown --property path --event page_view` | Ranked page list with unique users |\n| \"Where's my traffic coming from?\" | `breakdown --property referrer --event page_view` | Referrer sources |\n| \"Which landing page is best?\" | `pages --type entry` | Bounce rate + session depth per page |\n| \"Are people actually engaging?\" | `sessions-dist` | Bounce vs engaged split |\n| \"When should I deploy/post?\" | `heatmap` | Find low-traffic windows or peak hours |\n| \"Give me a summary of all projects\" | `live` or loop: `projects` then `insights` per project | Multi-project overview |\n| \"Which CTA converts better?\" | `experiments create` + implement + `experiments get <id>` | Full A/B test lifecycle |\n| \"Where do users drop off?\" | `funnel --steps \"page_view,signup,purchase\"` | Step-by-step conversion with drop-off rates |\n| \"Which variant converts better through the funnel?\" | `funnel --steps \"page_view,signup\" --breakdown variant` | Funnel segmented by experiment variant |\n| \"Are users coming back?\" | `retention --period week --cohorts 8` | Cohort retention: % returning per period |\n\nFor any \"how is X doing\" question, **always call `insights` first** — it's the single most useful endpoint. For real-time \"who's on the site right now\", use `live`.\n\n## Analyze, don't just query\n\nDon't return raw numbers. Interpret them. Here's how to turn each endpoint's response into something useful.\n\n### `/insights` → The headline\n\nAPI returns metrics with `current`, `previous`, `change`, `change_pct`, and a `trend` field.\n\n**How to interpret:**\n- `change_pct > 10` → \"Growing\" — call it out positively\n- `change_pct` between -10 and 10 → \"Stable\" — mention it's steady\n- `change_pct < -10` → \"Declining\" — flag it, suggest investigating\n- `bounce_rate` current vs previous → say \"improved\" (went down) or \"worsened\" (went up)\n- `avg_duration` → convert ms to seconds: `Math.round(value / 1000)`\n- Previous period is all zeros → say \"new project, no prior data to compare\"\n\n**Example output:**\n```\nThis week vs last: 173 events (+22%), 98 users (+18%).\nBounce rate: 87% (up from 82% — getting worse).\nAverage session: 24s. Trend: growing.\n```\n\n### `/breakdown` → The ranking\n\nAPI returns `values: [{ value, count, unique_users }]` sorted by count DESC.\n\n**How to interpret:**\n- Top 3-5 values is enough — don't dump the full list\n- Show the `unique_users` too — 100 events from 2 users is very different from 100 events from 80 users\n- Use `total_with_property / total_events` to note coverage: \"155 of 155 page views have a path\"\n- For referrers: group \"(direct)\" / empty as direct traffic\n\n**Example output:**\n```\nTop pages: / (98 views, 75 users), /pricing (33 views, 25 users), /docs (19 views, 4 users).\nThe /docs page has high repeat visits (19 views, 4 users) — power users.\n```\n\n### `/pages` → Landing page quality\n\nAPI returns `entry_pages: [{ page, sessions, bounces, bounce_rate, avg_duration, avg_events }]`.\n\n**How to interpret:**\n- `bounce_rate` > 0.7 → \"high bounce, needs work above the fold\"\n- `bounce_rate` < 0.3 → \"strong landing page\"\n- `avg_duration` → convert ms to seconds; < 10s is concerning, > 60s is great\n- `avg_events` → pages/session; 1.0 means everyone bounces, 3+ means good engagement\n- Compare pages: \"Your /pricing page converts 3× better than your homepage\"\n\n**Example output:**\n```\nBest landing page: /pricing — 14% bounce, 62s avg session, 4.1 pages/visit.\nWorst: /blog/launch — 52% bounce, 18s avg. Consider a stronger CTA above the fold.\n```\n\n### `/sessions/distribution` → Engagement shape\n\nAPI returns `distribution: [{ bucket, sessions, pct }]`, `engaged_pct`, `median_bucket`.\n\n**How to interpret:**\n- `engaged_pct` is the key number — sessions ≥30s as a percentage of total\n- `engaged_pct` < 10% → \"Most visitors leave immediately — focus on first impressions\"\n- `engaged_pct` 10-30% → \"Moderate engagement, room to improve\"\n- `engaged_pct` > 30% → \"Good engagement\"\n- If 80%+ is in the \"0s\" bucket, the site has a bounce problem\n- If there's a healthy spread across buckets, engagement is genuine\n\n**Example output:**\n```\n88% of sessions bounce instantly (0s). Only 6% stay longer than 30s.\nThe few who do engage stay 3-10 minutes — the content works, but first impressions don't.\n```\n\n### `/heatmap` → Timing\n\nAPI returns `heatmap: [{ day, day_name, hour, events, users }]`, `peak`, `busiest_day`, `busiest_hour`.\n\n**How to interpret:**\n- `peak` is the single busiest slot — mention day + hour + timezone caveat (times are UTC)\n- `busiest_day` → \"Schedule blog posts/launches on this day\"\n- `busiest_hour` → \"This is when your audience is online\"\n- Low-traffic windows → \"Deploy during Sunday 3 AM UTC to minimize user impact\"\n- Weekend vs weekday split → tells you if audience is B2B (weekdays) or B2C (weekends)\n\n**Example output:**\n```\nPeak: Friday at 11 PM UTC (35 events, 33 users). Busiest day overall: Sunday.\nTraffic is heaviest on weekends — your audience browses on personal time.\nDeploy on weekday mornings for minimal disruption.\n```\n\n### `/funnel` → Where users drop off\n\nCLI: `funnel my-site --steps \"page_view,signup,purchase\"`. API: `POST /funnel` with JSON body.\n\nAPI returns `steps: [{ step, event, users, conversion_rate, drop_off_rate, avg_time_to_next_ms }]` and `overall_conversion_rate`.\n\n**How to interpret:**\n- Each step shows how many users progressed from the previous step\n- `conversion_rate` is step-to-step (step 2 users / step 1 users)\n- `drop_off_rate` is 1 - conversion_rate at each step\n- The biggest `drop_off_rate` is the bottleneck — focus optimization there\n- `avg_time_to_next_ms` shows how long users take between steps (convert to hours/minutes)\n- `overall_conversion_rate` is end-to-end (last step users / first step users)\n\n**Options:**\n- `--steps \"event1,event2,event3\"` — 2-8 step events (required)\n- `--window <hours>` — max time from step 1 to last step (default: 168 = 7 days)\n- `--since <days>` — lookback period, e.g. `30d` (default: 30d)\n- `--count-by <field>` — `user_id` (default) or `session_id`\n- `--breakdown <property>` — segment funnel by a property (e.g. `country`, `variant`). Property is extracted from step 1 events. Returns overall + per-group results.\n- `--breakdown-limit <N>` — max groups returned (default: 10, max: 50). Groups ordered by step 1 users descending.\n\n**Breakdown use case — A/B experiments:** `funnel my-site --steps \"page_view,signup\" --breakdown variant` shows which experiment variant converts better through the funnel.\n\n**API-only: per-step filters** — each step can have a `filters` array with `{ property, op, value }` (ops: `eq`, `neq`, `contains`). Example: filter step 1 to `path=/pricing` to see conversions from the pricing page specifically.\n\n**Example output:**\n```\npage_view → signup → purchase\n  500 users → 80 (16%) → 12 (15%) — 2.4% overall\n  Biggest drop-off: page_view → signup (84%). Focus on signup CTA visibility.\n  Avg time to signup: 4.2 hours. Avg time to purchase: 2.1 days.\n```\n\n### `/retention` → Are users coming back?\n\nCLI: `retention my-site --period week --cohorts 8`. API: `GET /retention?project=X&period=week&cohorts=8`.\n\nBy default uses session-based retention — a user is \"retained\" if they have any return visit (session) in a subsequent period. Pass `--event` to switch to event-based retention.\n\nAPI returns `cohorts: [{ date, users, retained: [...], rates: [...] }]`, `average_rates: [...]`, and `users_analyzed`.\n\n**How to interpret:**\n- Each cohort row = users who first appeared in that period\n- `rates[0]` is always 1.0 (100% — the cohort itself)\n- `rates[1]` = % who came back the next period — this is the critical number\n- Declining rates across offsets is normal; the slope matters more than absolutes\n- `average_rates` is weighted by cohort size — larger cohorts count more\n- Compare recent cohorts vs older ones: improving rates = product is getting stickier\n\n**Options:**\n- `--period <P>` — `day`, `week`, `month` (default: week)\n- `--cohorts <N>` — number of cohort periods, 1-30 (default: 8)\n- `--event <name>` — first-seen event filter (e.g. `signup`). Switches to event-based retention\n- `--returning-event <name>` — what counts as \"returned\" (defaults to same as `--event`)\n\n**Event-based retention:** Set `--event signup --returning-event purchase` to answer \"of users who signed up, what % made a purchase in subsequent weeks?\"\n\n**Example output:**\n```\nCohort W0 (2026-01-27): 142 users → W1: 45% → W2: 39% → W3: 32%\nCohort W0 (2026-02-03): 128 users → W1: 42% → W2: 36%\nWeighted avg: W1 = 44%, W2 = 37%, W3 = 32%\nWeek-1 retention of 44% is strong — nearly half of new users return.\nSlight decline in recent cohorts — investigate onboarding changes.\n```\n\n### Weekly summary recipe (3 parallel calls)\n\nCall `insights`, `breakdown --property path --event page_view`, and `pages --type entry` in parallel, then synthesize into one response:\n\n```\nWeekly Report — my-site (Feb 8–15 vs Feb 1–8)\nEvents: 1,200 (+22% ↑)  Users: 450 (+18% ↑)  Bounce: 42% (improved from 48%)\nTop pages: /home (523), /pricing (187), /docs (94)\nBest landing: /pricing — 14% bounce, 62s avg. Worst: /blog — 52% bounce.\nTrend: Growing.\n```\n\n### Multi-project overview\n\nFor a quick real-time check, use `live` — it shows all projects in one view with active visitors, sessions, and events/min.\n\nFor a historical summary, call `projects` to list all projects, then call `insights --period 7d` for each. Present one line per project:\n\n```\nmy-site         142 views (+23% ↑)  12 signups   healthy\nside-project     38 views (-8% ↓)    0 signups   quiet\napi-docs          0 views (—)        —            ⚠ inactive since Feb 1\n```\n\nUse arrows: `↑` up, `↓` down, `—` flat. Flag anything that needs attention.\n\n### Anomaly detection\n\nProactively flag — don't wait to be asked:\n- **Spike**: any metric >2× its previous period → \"unusual surge, check referrers\"\n- **Drop**: any metric <50% of previous → \"significant decline, worth investigating\"\n- **Dead project**: zero `page_view` events → \"⚠ no traffic detected\"\n- **Errors**: any `error` events in the window → surface the `message` property\n\n### Visualizing results\n\nWhen reporting to messaging platforms (Slack, Discord, Telegram), raw text tables break. Use companion skills:\n\n- **`table-image-generator`** — render stats as clean table images\n- **`chart-image`** — generate line, bar, area, or pie charts from analytics data\n\n## Growth Playbook — How to grow, not just track\n\nTracking is step one. Growth comes from a **repeatable system**: clear messaging → focused distribution → obsessive tracking → rapid experimentation → learning. Here's how to apply each principle using Agent Analytics.\n\n### Principle 1: Promise clarity\n\nThe #1 conversion lever is messaging. If someone lands and has to think hard to understand the value, they're gone.\n\n**What your agent should do:**\n- Set up an A/B experiment on the hero headline immediately: `experiments create PROJECT --name hero_headline --variants control,b,c --goal cta_click`\n- Test 2-3 headline variations that frame the same value differently\n- Use declarative HTML: `data-aa-experiment=\"hero_headline\" data-aa-variant-b=\"New headline\"`\n- Check results after ~500 visitors per variant: `experiments get EXP_ID`\n- Ship the winner, start testing the subtitle or CTA next\n\n**Rule:** Spend more time testing messaging than adding features. Even the best product won't convert if the value isn't obvious in seconds.\n\n### Principle 2: Track what drives decisions, not everything\n\nDon't be Mixpanel. Track only what answers: **\"Is this project alive and growing, and what should I do next?\"**\n\n**The essential events (pick 3-5):**\n\n| Event | What it tells you |\n|-------|-------------------|\n| `cta_click` (with `id`) | Which buttons drive action — your conversion signal |\n| `signup` | Are people converting? At what rate? |\n| `feature_used` (with `feature`) | Are they finding value after signup? |\n| `checkout` | Revenue signal |\n\n**Agent workflow for tracking setup:**\n1. Look at the site — identify the 2-3 most important user actions\n2. Add tracking on those specific actions (not everything)\n3. Verify with `events PROJECT` that data flows\n4. Set up a weekly check: `insights PROJECT --period 7d`\n\n**Anti-pattern:** Don't track scroll depth, mouse hovers, every link click, or form field interactions. Noise kills signal.\n\n### Principle 3: Find the activation moment\n\nConversion doesn't happen at checkout. It happens when the user realizes the product solves their problem — the \"aha moment.\"\n\n**How to find it:**\n1. Track key feature interactions: `feature_used` with specific feature names\n2. Use `breakdown --property feature --event feature_used` to see which features correlate with retention\n3. Check `sessions-dist` — if most sessions are 0s bounces, the landing page is the problem. If sessions are long but signups are low, the activation path is the problem\n4. Use `pages --type entry` — compare bounce rates across landing pages to find which first impression works\n\n**What to optimize:**\n- Time to first value — how fast does the user get a result?\n- Onboarding friction — where do users drop off?\n- Feature discovery — are users finding the thing that makes them stay?\n\n### Principle 4: One channel, iterate relentlessly\n\nDon't try to be everywhere. Pick one acquisition channel and go deep.\n\n**How Agent Analytics supports this:**\n- `breakdown --property referrer --event page_view` → see where traffic actually comes from\n- `breakdown --property utm_source` → track campaign sources\n- `insights --period 7d` → week-over-week: is the channel growing?\n- Create landing page variants per channel (e.g., `/reddit/`, `/hn/`) and compare with `pages --type entry`\n\n**Agent workflow for channel optimization:**\n1. Check referrer breakdown weekly\n2. Identify the top-performing channel (highest traffic + lowest bounce)\n3. Double down: create content, run experiments on that channel's landing page\n4. Ignore channels that aren't working — focus beats breadth\n\n### Principle 5: The autonomous growth loop\n\nThis is what makes Agent Analytics different from traditional analytics. Your agent can run the full cycle:\n\n```\nTrack → Analyze → Experiment → Ship winner → Repeat\n```\n\n**The loop in practice:**\n\n1. **Track**: Agent sets up tracking on CTAs and key actions\n2. **Analyze**: Weekly `insights` + `breakdown` + `pages` calls → synthesize into a report\n3. **Hypothesize**: \"Hero headline has 87% bounce — test a clearer value prop\"\n4. **Experiment**: `experiments create PROJECT --name hero_v2 --variants control,b --goal cta_click`\n5. **Monitor**: Check `experiments get EXP_ID` after sufficient traffic\n6. **Ship**: `experiments complete EXP_ID --winner b` → deploy the winner\n7. **Repeat**: Start the next experiment on the next weakest element\n\n**What to test (in order of impact):**\n1. Hero headline — biggest impact on bounce rate\n2. CTA button text — directly affects conversion\n3. Social proof / trust signals — affects signup confidence\n4. Pricing presentation — affects revenue\n5. Onboarding flow — affects activation\n\n**Cadence:** One experiment at a time. ~1-2 weeks per test depending on traffic. Don't stack experiments unless traffic is very high (>1000 visitors/day).\n\n### Proactive growth monitoring\n\nDon't wait for the user to ask. If your agent has scheduled checks, proactively flag:\n\n- **Dead project**: 0 events in 7 days → \"⚠ PROJECT has no traffic — is it still deployed?\"\n- **Conversion drop**: `cta_click` rate dropped >20% week-over-week → \"Conversion declined — worth investigating\"\n- **Experiment ready**: An experiment has >100 exposures per variant → \"Experiment X has enough data — check results\"\n- **Experiment winner**: Significance >95% → \"Experiment X: Variant B wins with 3.8% vs 2.1%. Ship it?\"\n- **Traffic spike**: >2× normal → \"Unusual traffic surge on PROJECT — check referrers for the source\"\n\n## What this skill does NOT do\n\n- No GUI dashboards — your agent IS the dashboard (or use `live` for a real-time TUI)\n- No user management or billing\n- Funnels and retention are ad-hoc queries (no saved/scheduled reports)\n- No PII stored — IP addresses are not logged or retained. Privacy-first by design\n\n## Examples\n\nTrack custom events via `window.aa?.track()` (the `?.` ensures no error if tracker hasn't loaded):\n\n```js\nwindow.aa?.track('cta_click', {id: 'hero_get_started'});\nwindow.aa?.track('signup', {method: 'github'});\nwindow.aa?.track('feature_used', {feature: 'create_project'});\nwindow.aa?.track('checkout', {plan: 'pro'});\n```\n\nFile v3.6.0:_meta.json\n\n{\n  \"ownerId\": \"kn7caxjvqk9fengp67p290smnn800sv9\",\n  \"slug\": \"agent-analytics\",\n  \"version\": \"3.6.0\",\n  \"publishedAt\": 1771917439881\n}","readmeExcerpt":"Skill: Agent Analytics Owner: dannyshmueli Summary: Simple website analytics your AI agent controls end-to-end. Track page views, events, funnels, retention, and A/B experiments across all your projects. Use w... Tags: analytics:1.0.1, latest:3.7.0, tracking:1.0.1, web:1.0.1 Version history: v3.7.0 | 2026-02-24T07:35:46.785Z | user Added security & trust section addressing npx supply chain and input safety. Improved ","codeSnippets":[],"executableExamples":[{"language":"bash","snippet":"# 1. Login (one time — uses your API key)\nnpx @agent-analytics/cli login --token aak_YOUR_API_KEY\n\n# 2. Create the project (returns a project write token)\nnpx @agent-analytics/cli create my-site --domain https://mysite.com\n\n# 3. Add the snippet (Step 1 below) using the returned token\n# 4. Deploy, click around, verify:\nnpx @agent-analytics/cli events my-site"},{"language":"bash","snippet":"npx @agent-analytics/cli properties-received PROJECT_NAME"},{"language":"html","snippet":"<a href=\"...\" onclick=\"window.aa?.track('EVENT_NAME', {id: 'ELEMENT_ID'})\">"},{"language":"bash","snippet":"npx @agent-analytics/cli experiments create my-site \\\n  --name signup_cta --variants control,new_cta --goal signup"},{"language":"html","snippet":"<h1 data-aa-experiment=\"signup_cta\" data-aa-variant-new_cta=\"Start Free Trial\">Sign Up</h1>"},{"language":"bash","snippet":"npx @agent-analytics/cli experiments get exp_abc123"}],"parameters":null,"dependencies":[],"permissions":[],"extractedFiles":[{"path":"SKILL.md","content":"---\nname: agent-analytics\ndescription: \"Simple website analytics your AI agent controls end-to-end. Track page views, events, funnels, retention, and A/B experiments across all your projects. Use when: adding website tracking, checking site traffic, setting up conversion funnels, running A/B experiments, or replacing Mixpanel / Plausible / PostHog with something lightweight and agent-operated. No dashboard needed.\"\nversion: 3.7.0\nauthor: dannyshmueli\nrepository: https://github.com/Agent-Analytics/agent-analytics-cli\nhomepage: https://agentanalytics.sh\ntags:\n  - analytics\n  - tracking\n  - web\n  - events\n  - experiments\n  - live\n  - website-tracking\n  - page-views\n  - funnels\n  - retention\n  - ab-testing\n  - simple-analytics\n  - privacy\n  - agent-first\n  - plausible-alternative\n  - mixpanel-alternative\n  - growth\nmetadata: {\"openclaw\":{\"requires\":{\"env\":[\"AGENT_ANALYTICS_API_KEY\"],\"anyBins\":[\"npx\"]},\"primaryEnv\":\"AGENT_ANALYTICS_API_KEY\"}}\n---\n\n# Agent Analytics — Website analytics your AI agent fully operates\n\nSimple, privacy-first website analytics and growth toolkit that your AI agent controls end-to-end. Track page views, custom events, conversion funnels, user retention, and A/B experiments across all your projects — then talk to your analytics in natural language. No dashboards. Your agent creates projects, adds tracking code, queries traffic data, builds funnels, runs experiments, and tells you what to optimize next. A lightweight Plausible/Mixpanel/PostHog alternative built for the AI agent era.\n\n## Security & trust\n\n- **Open source**: Full source at [github.com/Agent-Analytics/agent-analytics-cli](https://github.com/Agent-Analytics/agent-analytics-cli) — inspect every command before running\n- **Read-only by default**: The CLI only reads analytics data. Write operations (creating projects, experiments) require explicit user-provided API keys\n- **No arbitrary code execution**: All CLI commands use structured flags (`--days`, `--property`, `--steps`). No eval, no shell interpolation, no dynamic code generation\n- **Scoped permissions**: The API key controls access. The CLI never requests filesystem, network, or system-level permissions beyond HTTP calls to `api.agentanalytics.sh`\n- **Published on npm**: [@agent-analytics/cli](https://www.npmjs.com/package/@agent-analytics/cli) — versioned, auditable, standard npm supply chain\n\n## Philosophy\n\nYou are NOT Mixpanel. Don't track everything. Track only what answers: **\"Is this project alive and growing?\"**\n\nFor a typical site, that's 3-5 custom events max on top of automatic page views.\n\n## First-time setup\n\n**Get an API key:** Sign up at [agentanalytics.sh](https://agentanalytics.sh) and generate a key from the dashboard. Alternatively, self-host the open-source version from [GitHub](https://github.com/Agent-Analytics/agent-analytics).\n\nIf the project doesn't have tracking yet:\n\n```bash\n# 1. Login (one time — uses your API key)\nnpx @agent-analytics/cli login --token aak_YOUR_API_KEY\n\n# 2. Create t"},{"path":"_meta.json","content":"{\n  \"ownerId\": \"kn7caxjvqk9fengp67p290smnn800sv9\",\n  \"slug\": \"agent-analytics\",\n  \"version\": \"3.7.0\",\n  \"publishedAt\": 1771918546785\n}"}],"languages":[],"docsSourceLabel":"CLAWHUB","editorialOverview":"Simple website analytics your AI agent controls end-to-end. Track page views, events, funnels, retention, and A/B experiments across all your projects. Use w... Skill: Agent Analytics Owner: dannyshmueli Summary: Simple website analytics your AI agent controls end-to-end. Track page views, events, funnels, retention, and A/B experiments across all your projects. Use w... Tags: analytics:1.0.1, latest:3.7.0, tracking:1.0.1, web:1.0.1 Version history: v3.7.0 | 2026-02-24T07:35:46.785Z | user Added security & trust section addressing npx supply chain and input safety. Improved","editorialQuality":{"score":100,"threshold":65,"status":"ready","wordCount":902,"uniquenessScore":53,"reasons":[]}},"media":{"evidence":{"source":"no-media","verified":false,"confidence":"low","updatedAt":"2026-04-15T00:45:39.800Z","emptyReason":"No screenshots, media assets, or demo links are available."},"primaryImageUrl":null,"mediaAssetCount":0,"assets":[],"demoUrl":null},"ownerResources":{"evidence":{"source":"unclaimed","verified":false,"confidence":"low","updatedAt":"2026-04-15T00:45:39.800Z","emptyReason":"This page has not been claimed by the agent owner."},"hasCustomPage":false,"customPageUpdatedAt":null,"customLinks":[],"structuredLinks":{"docsUrl":null,"demoUrl":null,"supportUrl":null,"pricingUrl":null,"statusUrl":null},"customPage":null},"relatedAgents":{"evidence":{"source":"protocol-neighbors","verified":false,"confidence":"medium","updatedAt":"2026-04-17T06:10:25.840Z","emptyReason":null},"items":[{"id":"b917f68a-ebff-438e-84f8-3f4b2494c0bc","entityType":"agent","canonicalPath":"/agent/activepieces-activepieces","slug":"activepieces-activepieces","name":"activepieces","description":"AI Agents & MCPs & AI Workflow Automation • (~400 MCP servers for AI agents) • AI Automation / AI Agent with MCPs • AI Workflows & AI Agents • MCPs for AI Agents","url":"https://github.com/activepieces/activepieces","homepage":"https://www.activepieces.com","source":"GITHUB_REPOS","protocols":["OPENCLAW"],"capabilities":[],"safetyScore":100,"overallRank":70,"updatedAt":"2026-04-15T02:22:12.426Z","createdAt":"2026-02-25T03:38:12.412Z","downloads":null},{"id":"5cb26759-3a39-483f-94cf-276a98c13bb8","entityType":"agent","canonicalPath":"/agent/cherryhq-cherry-studio","slug":"cherryhq-cherry-studio","name":"cherry-studio","description":"AI productivity studio with smart chat, autonomous agents, and 300+ assistants. Unified access to frontier LLMs","url":"https://github.com/CherryHQ/cherry-studio","homepage":"https://cherry-ai.com","source":"GITHUB_REPOS","protocols":["MCP","OPENCLAW"],"capabilities":[],"safetyScore":100,"overallRank":70,"updatedAt":"2026-04-11T14:38:40.986Z","createdAt":"2026-02-25T03:38:19.379Z","downloads":null},{"id":"8ebccd8e-3863-4187-8355-c3f14e1f9edf","entityType":"agent","canonicalPath":"/agent/iofficeai-aionui","slug":"iofficeai-aionui","name":"AionUi","description":"Free, local, open-source 24/7 Cowork app and OpenClaw for Gemini CLI, Claude Code, Codex, OpenCode, Qwen Code, Goose CLI, Auggie, and more | 🌟 Star if you like it!","url":"https://github.com/iOfficeAI/AionUi","homepage":"https://www.aionui.com","source":"GITHUB_REPOS","protocols":["MCP","OPENCLAW"],"capabilities":[],"safetyScore":100,"overallRank":70,"updatedAt":"2026-04-10T18:48:31.762Z","createdAt":"2026-02-25T03:38:16.584Z","downloads":null},{"id":"6f6582d0-5d76-4f0f-b81d-86520247950b","entityType":"agent","canonicalPath":"/agent/copilotkit-copilotkit","slug":"copilotkit-copilotkit","name":"CopilotKit","description":"The Frontend for Agents & Generative UI. React + Angular","url":"https://github.com/CopilotKit/CopilotKit","homepage":"https://docs.copilotkit.ai","source":"GITHUB_REPOS","protocols":["OPENCLAW"],"capabilities":[],"safetyScore":100,"overallRank":70,"updatedAt":"2026-03-25T09:50:57.846Z","createdAt":"2026-02-25T03:39:14.617Z","downloads":null}],"links":{"hub":"/agent","source":"/agent/source/clawhub","protocols":[{"label":"OpenClaw","href":"/agent/protocol/openclew"}]}}}