{
  "schemaVersion": "1.0",
  "item": {
    "slug": "powerdrill-data-analysis-skill",
    "name": "powerdrill-data-analysis",
    "source": "tencent",
    "type": "skill",
    "category": "数据分析",
    "sourceUrl": "https://clawhub.ai/javainthinking/powerdrill-data-analysis-skill",
    "canonicalUrl": "https://clawhub.ai/javainthinking/powerdrill-data-analysis-skill",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/powerdrill-data-analysis-skill",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=powerdrill-data-analysis-skill",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "README.md",
      "SKILL.md",
      "scripts/powerdrill_client.py"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "slug": "powerdrill-data-analysis-skill",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T15:33:36.412Z",
      "expiresAt": "2026-05-07T15:33:36.412Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=powerdrill-data-analysis-skill",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=powerdrill-data-analysis-skill",
        "contentDisposition": "attachment; filename=\"powerdrill-data-analysis-skill-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null,
        "slug": "powerdrill-data-analysis-skill"
      },
      "scope": "item",
      "summary": "Item download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this item.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/powerdrill-data-analysis-skill"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/powerdrill-data-analysis-skill",
    "agentPageUrl": "https://openagent3.xyz/skills/powerdrill-data-analysis-skill/agent",
    "manifestUrl": "https://openagent3.xyz/skills/powerdrill-data-analysis-skill/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/powerdrill-data-analysis-skill/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Powerdrill Data Analysis Skill",
        "body": "Analyze data using the Powerdrill API via the Python client at scripts/powerdrill_client.py. All operations use the Powerdrill REST API v2 (https://ai.data.cloud/api)."
      },
      {
        "title": "Prerequisites & Setup",
        "body": "Before using any Powerdrill functions, the user must have:\n\nA Powerdrill Teamspace - Created by following: https://www.youtube.com/watch?v=I-0yGD9HeDw\nAPI Credentials - Obtained by following: https://www.youtube.com/watch?v=qs-GsUgjb1g\n\nSet these environment variables before running any script:\n\nexport POWERDRILL_USER_ID=\"your_user_id\"\nexport POWERDRILL_PROJECT_API_KEY=\"your_project_api_key\"\n\nThe only Python dependency is requests. Install with: pip install requests\n\nIf a call fails with an authentication error, verify the two environment variables are set and the API key is valid."
      },
      {
        "title": "How to Use",
        "body": "Import the client module and call functions directly. All functions read credentials from the environment automatically.\n\nimport sys\nsys.path.insert(0, \"/absolute/path/to/scripts\")  # adjust to actual location\nfrom powerdrill_client import *\n\nOr run via CLI:\n\npython scripts/powerdrill_client.py <command> [args]"
      },
      {
        "title": "Datasets",
        "body": "list_datasets(page_number=1, page_size=10, search=None) -> dict\n\nList datasets in the user's account. Typically the first step in any workflow.\n\nresult = list_datasets(search=\"sales\")\nfor ds in result[\"data\"][\"records\"]:\n    print(ds[\"id\"], ds[\"name\"])\n\ncreate_dataset(name, description=\"\") -> dict\n\nCreate a new empty dataset. Returns {\"data\": {\"id\": \"dset-...\"}}.\n\nds = create_dataset(\"Q4 Sales Data\", \"Quarterly sales analysis\")\ndataset_id = ds[\"data\"][\"id\"]\n\nget_dataset_overview(dataset_id) -> dict\n\nGet dataset summary, exploration questions, and keywords. Use after data sources are synced.\n\noverview = get_dataset_overview(dataset_id)\nprint(overview[\"data\"][\"summary\"])\nfor q in overview[\"data\"][\"exploration_questions\"]:\n    print(f\"  - {q}\")\n\nget_dataset_status(dataset_id) -> dict\n\nCheck how many data sources are synced/syncing/invalid.\n\nstatus = get_dataset_status(dataset_id)\n# status[\"data\"] = {\"synched_count\": 3, \"synching_count\": 0, \"invalid_count\": 0}\n\ndelete_dataset(dataset_id) -> dict\n\nPermanently delete a dataset and all its data sources. Irreversible - always confirm with the user first."
      },
      {
        "title": "Data Sources",
        "body": "list_data_sources(dataset_id, page_number=1, page_size=10, status=None) -> dict\n\nList files within a dataset. Filter by status: synched, synching, invalid.\n\nsources = list_data_sources(dataset_id, status=\"synched\")\n\ncreate_data_source(dataset_id, name, *, url=None, file_object_key=None) -> dict\n\nCreate a data source from a public URL or an uploaded file key. Provide exactly one of url or file_object_key.\n\n# From public URL\nds = create_data_source(dataset_id, \"report.pdf\", url=\"https://example.com/report.pdf\")\n\n# From uploaded file (see upload_local_file)\nds = create_data_source(dataset_id, \"data.csv\", file_object_key=key)\n\nupload_local_file(file_path) -> str\n\nUpload a local file via multipart upload. Returns file_object_key for use with create_data_source().\n\nSupported formats: .csv, .tsv, .md, .mdx, .json, .txt, .pdf, .pptx, .docx, .xls, .xlsx\n\nupload_and_create_data_source(dataset_id, file_path) -> dict\n\nConvenience function: uploads a local file then creates the data source in one call.\n\nresult = upload_and_create_data_source(dataset_id, \"/path/to/sales.csv\")\ndatasource_id = result[\"data\"][\"id\"]\n\nwait_for_dataset_sync(dataset_id, max_attempts=30, delay_seconds=3.0) -> dict\n\nPoll until all data sources in the dataset are synced. Raises RuntimeError on timeout or if invalid sources are detected.\n\nupload_and_create_data_source(dataset_id, \"data.csv\")\nwait_for_dataset_sync(dataset_id)  # blocks until synced"
      },
      {
        "title": "Sessions",
        "body": "create_session(name, output_language=\"AUTO\", job_mode=\"AUTO\", max_contextual_job_history=10) -> dict\n\nCreate an analysis session. Required before running jobs.\n\nsession = create_session(\"Sales Analysis Session\")\nsession_id = session[\"data\"][\"id\"]\n\nlist_sessions(page_number=1, page_size=10, search=None) -> dict\n\nList existing sessions. Use to find a previous session for resumption.\n\ndelete_session(session_id) -> dict\n\nDelete a session. Use during cleanup after analysis is complete."
      },
      {
        "title": "Jobs (Data Analysis)",
        "body": "create_job(session_id, question, dataset_id=None, datasource_ids=None, stream=False, output_language=\"AUTO\", job_mode=\"AUTO\") -> dict\n\nRun a natural-language analysis query. This is the core analysis function.\n\nNon-streaming (default): returns full response with all blocks.\n\nresult = create_job(session_id, \"What are the top 5 products by revenue?\", dataset_id=dataset_id)\nfor block in result[\"data\"][\"blocks\"]:\n    if block[\"type\"] == \"MESSAGE\":\n        print(block[\"content\"])\n    elif block[\"type\"] == \"TABLE\":\n        print(f\"Table: {block['content']['url']}\")\n    elif block[\"type\"] == \"IMAGE\":\n        print(f\"Chart: {block['content']['url']}\")\n\nStreaming: returns parsed result with accumulated text and separate blocks.\n\nresult = create_job(session_id, \"Summarize trends\", dataset_id=dataset_id, stream=True)\nprint(result[\"text\"])        # accumulated MESSAGE text\nfor b in result[\"blocks\"]:   # TABLE, IMAGE, etc.\n    print(b[\"type\"], b[\"content\"])\n\nResponse block types:\n\nMESSAGE - Analytical text\nCODE - Code snippets (Markdown)\nTABLE - {name, url, expires_at} - download before expiration\nIMAGE - {name, url, expires_at} - download before expiration\nSOURCES - Citation references\nQUESTIONS - Suggested follow-up questions\nCHART_INFO - Chart configuration and data"
      },
      {
        "title": "Cleanup",
        "body": "cleanup(session_id=None, dataset_id=None) -> None\n\nDelete session and/or dataset after analysis. Always call this when done.\n\ncleanup(session_id=session_id, dataset_id=dataset_id)\n\ncleanup_session(session_id) -> None / cleanup_dataset(dataset_id) -> None\n\nDelete individual resources. Errors are logged but not raised."
      },
      {
        "title": "Full analysis workflow (upload, analyze, cleanup)",
        "body": "from powerdrill_client import *\n\n# 1. Create dataset and upload data\nds = create_dataset(\"My Analysis\")\ndataset_id = ds[\"data\"][\"id\"]\n\nupload_and_create_data_source(dataset_id, \"/path/to/data.csv\")\nwait_for_dataset_sync(dataset_id)\n\n# 2. Create session and run analysis\nsession = create_session(\"Analysis Session\")\nsession_id = session[\"data\"][\"id\"]\n\nresult = create_job(session_id, \"What are the key trends?\", dataset_id=dataset_id)\nfor block in result[\"data\"][\"blocks\"]:\n    if block[\"type\"] == \"MESSAGE\":\n        print(block[\"content\"])\n\n# 3. Ask follow-up questions (same session for context)\nresult = create_job(session_id, \"Break this down by region\", dataset_id=dataset_id)\n\n# 4. Cleanup when done\ncleanup(session_id=session_id, dataset_id=dataset_id)"
      },
      {
        "title": "Analyze existing dataset",
        "body": "from powerdrill_client import *\n\n# 1. Find the dataset\ndatasets = list_datasets(search=\"sales\")\ndataset_id = datasets[\"data\"][\"records\"][0][\"id\"]\n\n# 2. Explore it\noverview = get_dataset_overview(dataset_id)\nprint(overview[\"data\"][\"summary\"])\n\n# 3. Create session and analyze\nsession = create_session(\"Quick Analysis\")\nsession_id = session[\"data\"][\"id\"]\n\nresult = create_job(session_id, overview[\"data\"][\"exploration_questions\"][0], dataset_id=dataset_id)\n\n# 4. Cleanup session when done (keep dataset)\ncleanup_session(session_id)"
      },
      {
        "title": "CLI usage",
        "body": "# List datasets\npython scripts/powerdrill_client.py list-datasets --search \"sales\"\n\n# Create dataset + upload file\npython scripts/powerdrill_client.py create-dataset \"Test Data\"\npython scripts/powerdrill_client.py upload-file dset-xxx /path/to/file.csv\npython scripts/powerdrill_client.py wait-sync dset-xxx\n\n# Create session and run a job\npython scripts/powerdrill_client.py create-session \"My Session\"\npython scripts/powerdrill_client.py create-job SESSION_ID \"Summarize the data\" --dataset-id dset-xxx\n\n# Cleanup\npython scripts/powerdrill_client.py cleanup --session-id SESSION_ID --dataset-id dset-xxx"
      },
      {
        "title": "Error Handling",
        "body": "Authentication errors: Verify POWERDRILL_USER_ID and POWERDRILL_PROJECT_API_KEY. Direct the user to the setup videos above.\nDataset not found: Re-run list_datasets() to verify the ID. The dataset may have been deleted.\nJob execution failure: Ensure the dataset has at least one synced data source (wait_for_dataset_sync()). Retry with a rephrased question.\nUpload timeout: wait_for_dataset_sync() polls up to 30 attempts (90s). Use get_dataset_status() to check manually.\nInvalid data sources: Check file format is supported. Re-upload with correct file type.\nRate limiting: Wait before retrying. Space out rapid sequential API calls."
      },
      {
        "title": "Important Notes",
        "body": "Always create a session before running analysis jobs\nAlways call cleanup() to delete sessions and datasets after analysis is complete\nSessions maintain conversational context - reuse the same session for related follow-up questions\nTABLE and IMAGE URLs in job responses expire - download or present results promptly\nCall wait_for_dataset_sync() after uploading files, before running analysis\nDataset and session names are limited to 128 characters\nSupported file formats: .csv, .tsv, .md, .mdx, .json, .txt, .pdf, .pptx, .docx, .xls, .xlsx"
      }
    ],
    "body": "Powerdrill Data Analysis Skill\n\nAnalyze data using the Powerdrill API via the Python client at scripts/powerdrill_client.py. All operations use the Powerdrill REST API v2 (https://ai.data.cloud/api).\n\nPrerequisites & Setup\n\nBefore using any Powerdrill functions, the user must have:\n\nA Powerdrill Teamspace - Created by following: https://www.youtube.com/watch?v=I-0yGD9HeDw\nAPI Credentials - Obtained by following: https://www.youtube.com/watch?v=qs-GsUgjb1g\n\nSet these environment variables before running any script:\n\nexport POWERDRILL_USER_ID=\"your_user_id\"\nexport POWERDRILL_PROJECT_API_KEY=\"your_project_api_key\"\n\n\nThe only Python dependency is requests. Install with: pip install requests\n\nIf a call fails with an authentication error, verify the two environment variables are set and the API key is valid.\n\nHow to Use\n\nImport the client module and call functions directly. All functions read credentials from the environment automatically.\n\nimport sys\nsys.path.insert(0, \"/absolute/path/to/scripts\")  # adjust to actual location\nfrom powerdrill_client import *\n\n\nOr run via CLI:\n\npython scripts/powerdrill_client.py <command> [args]\n\nAvailable Functions\nDatasets\nlist_datasets(page_number=1, page_size=10, search=None) -> dict\n\nList datasets in the user's account. Typically the first step in any workflow.\n\nresult = list_datasets(search=\"sales\")\nfor ds in result[\"data\"][\"records\"]:\n    print(ds[\"id\"], ds[\"name\"])\n\ncreate_dataset(name, description=\"\") -> dict\n\nCreate a new empty dataset. Returns {\"data\": {\"id\": \"dset-...\"}}.\n\nds = create_dataset(\"Q4 Sales Data\", \"Quarterly sales analysis\")\ndataset_id = ds[\"data\"][\"id\"]\n\nget_dataset_overview(dataset_id) -> dict\n\nGet dataset summary, exploration questions, and keywords. Use after data sources are synced.\n\noverview = get_dataset_overview(dataset_id)\nprint(overview[\"data\"][\"summary\"])\nfor q in overview[\"data\"][\"exploration_questions\"]:\n    print(f\"  - {q}\")\n\nget_dataset_status(dataset_id) -> dict\n\nCheck how many data sources are synced/syncing/invalid.\n\nstatus = get_dataset_status(dataset_id)\n# status[\"data\"] = {\"synched_count\": 3, \"synching_count\": 0, \"invalid_count\": 0}\n\ndelete_dataset(dataset_id) -> dict\n\nPermanently delete a dataset and all its data sources. Irreversible - always confirm with the user first.\n\nData Sources\nlist_data_sources(dataset_id, page_number=1, page_size=10, status=None) -> dict\n\nList files within a dataset. Filter by status: synched, synching, invalid.\n\nsources = list_data_sources(dataset_id, status=\"synched\")\n\ncreate_data_source(dataset_id, name, *, url=None, file_object_key=None) -> dict\n\nCreate a data source from a public URL or an uploaded file key. Provide exactly one of url or file_object_key.\n\n# From public URL\nds = create_data_source(dataset_id, \"report.pdf\", url=\"https://example.com/report.pdf\")\n\n# From uploaded file (see upload_local_file)\nds = create_data_source(dataset_id, \"data.csv\", file_object_key=key)\n\nupload_local_file(file_path) -> str\n\nUpload a local file via multipart upload. Returns file_object_key for use with create_data_source().\n\nSupported formats: .csv, .tsv, .md, .mdx, .json, .txt, .pdf, .pptx, .docx, .xls, .xlsx\n\nupload_and_create_data_source(dataset_id, file_path) -> dict\n\nConvenience function: uploads a local file then creates the data source in one call.\n\nresult = upload_and_create_data_source(dataset_id, \"/path/to/sales.csv\")\ndatasource_id = result[\"data\"][\"id\"]\n\nwait_for_dataset_sync(dataset_id, max_attempts=30, delay_seconds=3.0) -> dict\n\nPoll until all data sources in the dataset are synced. Raises RuntimeError on timeout or if invalid sources are detected.\n\nupload_and_create_data_source(dataset_id, \"data.csv\")\nwait_for_dataset_sync(dataset_id)  # blocks until synced\n\nSessions\ncreate_session(name, output_language=\"AUTO\", job_mode=\"AUTO\", max_contextual_job_history=10) -> dict\n\nCreate an analysis session. Required before running jobs.\n\nsession = create_session(\"Sales Analysis Session\")\nsession_id = session[\"data\"][\"id\"]\n\nlist_sessions(page_number=1, page_size=10, search=None) -> dict\n\nList existing sessions. Use to find a previous session for resumption.\n\ndelete_session(session_id) -> dict\n\nDelete a session. Use during cleanup after analysis is complete.\n\nJobs (Data Analysis)\ncreate_job(session_id, question, dataset_id=None, datasource_ids=None, stream=False, output_language=\"AUTO\", job_mode=\"AUTO\") -> dict\n\nRun a natural-language analysis query. This is the core analysis function.\n\nNon-streaming (default): returns full response with all blocks.\n\nresult = create_job(session_id, \"What are the top 5 products by revenue?\", dataset_id=dataset_id)\nfor block in result[\"data\"][\"blocks\"]:\n    if block[\"type\"] == \"MESSAGE\":\n        print(block[\"content\"])\n    elif block[\"type\"] == \"TABLE\":\n        print(f\"Table: {block['content']['url']}\")\n    elif block[\"type\"] == \"IMAGE\":\n        print(f\"Chart: {block['content']['url']}\")\n\n\nStreaming: returns parsed result with accumulated text and separate blocks.\n\nresult = create_job(session_id, \"Summarize trends\", dataset_id=dataset_id, stream=True)\nprint(result[\"text\"])        # accumulated MESSAGE text\nfor b in result[\"blocks\"]:   # TABLE, IMAGE, etc.\n    print(b[\"type\"], b[\"content\"])\n\n\nResponse block types:\n\nMESSAGE - Analytical text\nCODE - Code snippets (Markdown)\nTABLE - {name, url, expires_at} - download before expiration\nIMAGE - {name, url, expires_at} - download before expiration\nSOURCES - Citation references\nQUESTIONS - Suggested follow-up questions\nCHART_INFO - Chart configuration and data\nCleanup\ncleanup(session_id=None, dataset_id=None) -> None\n\nDelete session and/or dataset after analysis. Always call this when done.\n\ncleanup(session_id=session_id, dataset_id=dataset_id)\n\ncleanup_session(session_id) -> None / cleanup_dataset(dataset_id) -> None\n\nDelete individual resources. Errors are logged but not raised.\n\nRecommended Workflows\nFull analysis workflow (upload, analyze, cleanup)\nfrom powerdrill_client import *\n\n# 1. Create dataset and upload data\nds = create_dataset(\"My Analysis\")\ndataset_id = ds[\"data\"][\"id\"]\n\nupload_and_create_data_source(dataset_id, \"/path/to/data.csv\")\nwait_for_dataset_sync(dataset_id)\n\n# 2. Create session and run analysis\nsession = create_session(\"Analysis Session\")\nsession_id = session[\"data\"][\"id\"]\n\nresult = create_job(session_id, \"What are the key trends?\", dataset_id=dataset_id)\nfor block in result[\"data\"][\"blocks\"]:\n    if block[\"type\"] == \"MESSAGE\":\n        print(block[\"content\"])\n\n# 3. Ask follow-up questions (same session for context)\nresult = create_job(session_id, \"Break this down by region\", dataset_id=dataset_id)\n\n# 4. Cleanup when done\ncleanup(session_id=session_id, dataset_id=dataset_id)\n\nAnalyze existing dataset\nfrom powerdrill_client import *\n\n# 1. Find the dataset\ndatasets = list_datasets(search=\"sales\")\ndataset_id = datasets[\"data\"][\"records\"][0][\"id\"]\n\n# 2. Explore it\noverview = get_dataset_overview(dataset_id)\nprint(overview[\"data\"][\"summary\"])\n\n# 3. Create session and analyze\nsession = create_session(\"Quick Analysis\")\nsession_id = session[\"data\"][\"id\"]\n\nresult = create_job(session_id, overview[\"data\"][\"exploration_questions\"][0], dataset_id=dataset_id)\n\n# 4. Cleanup session when done (keep dataset)\ncleanup_session(session_id)\n\nCLI usage\n# List datasets\npython scripts/powerdrill_client.py list-datasets --search \"sales\"\n\n# Create dataset + upload file\npython scripts/powerdrill_client.py create-dataset \"Test Data\"\npython scripts/powerdrill_client.py upload-file dset-xxx /path/to/file.csv\npython scripts/powerdrill_client.py wait-sync dset-xxx\n\n# Create session and run a job\npython scripts/powerdrill_client.py create-session \"My Session\"\npython scripts/powerdrill_client.py create-job SESSION_ID \"Summarize the data\" --dataset-id dset-xxx\n\n# Cleanup\npython scripts/powerdrill_client.py cleanup --session-id SESSION_ID --dataset-id dset-xxx\n\nError Handling\nAuthentication errors: Verify POWERDRILL_USER_ID and POWERDRILL_PROJECT_API_KEY. Direct the user to the setup videos above.\nDataset not found: Re-run list_datasets() to verify the ID. The dataset may have been deleted.\nJob execution failure: Ensure the dataset has at least one synced data source (wait_for_dataset_sync()). Retry with a rephrased question.\nUpload timeout: wait_for_dataset_sync() polls up to 30 attempts (90s). Use get_dataset_status() to check manually.\nInvalid data sources: Check file format is supported. Re-upload with correct file type.\nRate limiting: Wait before retrying. Space out rapid sequential API calls.\nImportant Notes\nAlways create a session before running analysis jobs\nAlways call cleanup() to delete sessions and datasets after analysis is complete\nSessions maintain conversational context - reuse the same session for related follow-up questions\nTABLE and IMAGE URLs in job responses expire - download or present results promptly\nCall wait_for_dataset_sync() after uploading files, before running analysis\nDataset and session names are limited to 128 characters\nSupported file formats: .csv, .tsv, .md, .mdx, .json, .txt, .pdf, .pptx, .docx, .xls, .xlsx"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/javainthinking/powerdrill-data-analysis-skill",
    "publisherUrl": "https://clawhub.ai/javainthinking/powerdrill-data-analysis-skill",
    "owner": "javainthinking",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/powerdrill-data-analysis-skill",
    "downloadUrl": "https://openagent3.xyz/downloads/powerdrill-data-analysis-skill",
    "agentUrl": "https://openagent3.xyz/skills/powerdrill-data-analysis-skill/agent",
    "manifestUrl": "https://openagent3.xyz/skills/powerdrill-data-analysis-skill/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/powerdrill-data-analysis-skill/agent.md"
  }
}