Requirements
- Target platform
- OpenClaw
- Install method
- Manual import
- Extraction
- Extract archive
- Prerequisites
- OpenClaw
- Primary doc
- SKILL.md
Scan websites and content to identify SEO gaps, analyze meta tags, technical factors, keyword use, and provide competitor comparison insights.
Scan websites and content to identify SEO gaps, analyze meta tags, technical factors, keyword use, and provide competitor comparison insights.
Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.
I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete.
I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run.
Comprehensive SEO analysis for content and websites.
# Using PageSpeed Insights API curl "https://www.googleapis.com/pagespeedonline/v5/runPagespeed?url=https://example.com&strategy=mobile" # Using lighthouse locally npx lighthouse https://example.com --view
curl https://example.com/robots.txt
curl https://example.com/sitemap.xml | xmllint --format -
openssl s_client -connect example.com:443 -servername example.com 2>/dev/null | openssl x509 -noout -dates
# Google's Mobile-Friendly Test API curl "https://searchconsole.googleapis.com/v1/urlTestingTools/mobileFriendlyTest:run?key=YOUR_API_KEY" \ -H "Content-Type: application/json" \ -d '{"url":"https://example.com"}'
import re from collections import Counter def keyword_density(text, keyword): words = re.findall(r'\b\w+\b', text.lower()) keyword_count = text.lower().count(keyword.lower()) density = (keyword_count / len(words)) * 100 return { "keyword": keyword, "count": keyword_count, "total_words": len(words), "density": f"{density:.2f}%" } # Target: 1-2% density
import textstat text = "Your content here..." flesch = textstat.flesch_reading_ease(text) # 90-100: Very Easy # 60-70: Standard # 0-30: Very Difficult grade = textstat.flesch_kincaid_grade(text) # Target: 8-9 for general audience
from bs4 import BeautifulSoup def analyze_headings(html): soup = BeautifulSoup(html, 'html.parser') headings = { 'h1': soup.find_all('h1'), 'h2': soup.find_all('h2'), 'h3': soup.find_all('h3'), } issues = [] if len(headings['h1']) == 0: issues.append("Missing H1 tag") elif len(headings['h1']) > 1: issues.append("Multiple H1 tags (should be one)") return { "counts": {k: len(v) for k, v in headings.items()}, "issues": issues }
from bs4 import BeautifulSoup import requests def audit_meta_tags(url): response = requests.get(url) soup = BeautifulSoup(response.text, 'html.parser') title = soup.find('title') description = soup.find('meta', attrs={'name': 'description'}) keywords = soup.find('meta', attrs={'name': 'keywords'}) issues = [] if not title or len(title.text) < 30: issues.append("Title too short or missing") elif len(title.text) > 60: issues.append("Title too long (>60 chars)") if not description: issues.append("Meta description missing") elif len(description.get('content', '')) < 120: issues.append("Meta description too short") elif len(description.get('content', '')) > 160: issues.append("Meta description too long") return { "title": title.text if title else None, "description": description.get('content') if description else None, "issues": issues }
# Using Google's Rich Results Test curl "https://searchconsole.googleapis.com/v1/urlTestingTools/richResultsTest:run" \ -H "Authorization: Bearer YOUR_TOKEN" \ -H "Content-Type: application/json" \ -d '{"url":"https://example.com"}'
// Article { "@context": "https://schema.org", "@type": "Article", "headline": "Title", "author": {"@type": "Person", "name": "Author"}, "datePublished": "2026-01-01" } // Local Business { "@context": "https://schema.org", "@type": "LocalBusiness", "name": "Business Name", "address": {"@type": "PostalAddress", "streetAddress": "123 Main"}, "telephone": "+1-555-555-5555" } // FAQ { "@context": "https://schema.org", "@type": "FAQPage", "mainEntity": [{ "@type": "Question", "name": "Question?", "acceptedAnswer": {"@type": "Answer", "text": "Answer"} }] }
import requests from bs4 import BeautifulSoup def compare_seo(target_url, competitor_url): def get_metrics(url): r = requests.get(url) soup = BeautifulSoup(r.text, 'html.parser') return { "title_len": len(soup.find('title').text) if soup.find('title') else 0, "h1_count": len(soup.find_all('h1')), "h2_count": len(soup.find_all('h2')), "word_count": len(soup.get_text().split()), "images": len(soup.find_all('img')), "images_no_alt": len([i for i in soup.find_all('img') if not i.get('alt')]) } return { "target": get_metrics(target_url), "competitor": get_metrics(competitor_url) }
Data access, storage, extraction, analysis, reporting, and insight generation.
Largest current source with strong distribution and engagement signals.