Requirements
- Target platform
- OpenClaw
- Install method
- Manual import
- Extraction
- Extract archive
- Prerequisites
- OpenClaw
- Primary doc
- SKILL.md
Perform comprehensive website health checks covering performance, broken links, security headers, accessibility, and SEO issues.
Perform comprehensive website health checks covering performance, broken links, security headers, accessibility, and SEO issues.
Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.
I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete.
I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run.
Comprehensive website health check for performance, accessibility, security, and user experience.
# One-command overview curl -I https://example.com && \ curl -w "DNS: %{time_namelookup}s\nConnect: %{time_connect}s\nTTFB: %{time_starttransfer}s\nTotal: %{time_total}s\n" -o /dev/null -s https://example.com
# Using curl for timing curl -w "DNS: %{time_namelookup}s\nConnect: %{time_connect}s\nSSL: %{time_appconnect}s\nTTFB: %{time_starttransfer}s\nTotal: %{time_total}s\nSize: %{size_download} bytes\n" -o /dev/null -s https://example.com # Using lighthouse npx lighthouse https://example.com --only-categories=performance --output=json
import requests from urllib.parse import urlparse def analyze_resources(url): response = requests.get(url) resources = [] # Parse HTML for resources from bs4 import BeautifulSoup soup = BeautifulSoup(response.text, 'html.parser') # Images for img in soup.find_all('img'): resources.append({ 'type': 'image', 'url': img.get('src'), 'size_estimate': 'unknown' }) # Scripts for script in soup.find_all('script', src=True): resources.append({ 'type': 'script', 'url': script.get('src') }) # Stylesheets for link in soup.find_all('link', rel='stylesheet'): resources.append({ 'type': 'stylesheet', 'url': link.get('href') }) return resources
# Using web-vitals CLI npx web-vitals https://example.com # LCP (Largest Contentful Paint): < 2.5s # FID (First Input Delay): < 100ms # CLS (Cumulative Layout Shift): < 0.1
import requests from bs4 import BeautifulSoup from urllib.parse import urljoin, urlparse def find_broken_links(base_url, max_depth=2): visited = set() broken = [] def check_page(url, depth): if depth > max_depth or url in visited: return visited.add(url) try: response = requests.get(url, timeout=10) if response.status_code >= 400: broken.append({'url': url, 'status': response.status_code}) return soup = BeautifulSoup(response.text, 'html.parser') for link in soup.find_all('a', href=True): href = urljoin(url, link['href']) if urlparse(href).netloc == urlparse(base_url).netloc: check_page(href, depth + 1) except Exception as e: broken.append({'url': url, 'error': str(e)}) check_page(base_url, 0) return broken
# Using wget wget --spider -r -l 2 https://example.com 2>&1 | grep -E "(broken|failed|error)" # Using linkchecker pip install LinkChecker linkchecker https://example.com
# Fetch and analyze headers curl -I https://example.com # Expected headers: # - Strict-Transport-Security (HSTS) # - X-Content-Type-Options: nosniff # - X-Frame-Options: DENY or SAMEORIGIN # - Content-Security-Policy # - X-XSS-Protection
import requests def audit_security_headers(url): response = requests.head(url) headers = response.headers recommended = { 'Strict-Transport-Security': 'Enable HSTS', 'X-Content-Type-Options': 'Set to nosniff', 'X-Frame-Options': 'Set to DENY or SAMEORIGIN', 'Content-Security-Policy': 'Define CSP', 'X-XSS-Protection': 'Enable XSS filter', 'Referrer-Policy': 'Set referrer policy', 'Permissions-Policy': 'Define permissions' } issues = [] for header, recommendation in recommended.items(): if header not in headers: issues.append(f"Missing {header}: {recommendation}") return { "present": {h: headers.get(h) for h in recommended if h in headers}, "missing": issues }
# Check SSL details openssl s_client -connect example.com:443 -servername example.com 2>/dev/null | openssl x509 -noout -text | grep -E "(Issuer|Not After|Subject)" # Quick expiry check openssl s_client -connect example.com:443 -servername example.com 2>/dev/null | openssl x509 -noout -dates
from bs4 import BeautifulSoup def accessibility_audit(html): soup = BeautifulSoup(html, 'html.parser') issues = [] # Check images for alt text for img in soup.find_all('img'): if not img.get('alt'): issues.append(f"Image missing alt: {img.get('src', 'unknown')}") # Check for lang attribute if not soup.find('html', lang=True): issues.append("Missing lang attribute on <html>") # Check headings hierarchy headings = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6'] prev_level = 0 for h in soup.find_all(headings): level = int(h.name[1]) if level > prev_level + 1: issues.append(f"Skipped heading level: h{prev_level} to h{level}") prev_level = level # Check for form labels for input_tag in soup.find_all('input'): if not input_tag.get('id') or not soup.find('label', attrs={'for': input_tag.get('id')}): if not input_tag.get('aria-label'): issues.append(f"Input missing label: {input_tag.get('name', 'unknown')}") return issues
# Using @axe-core/cli npx axe-cli https://example.com # Using pa11y npx pa11y https://example.com
def seo_quick_check(html, url): from bs4 import BeautifulSoup soup = BeautifulSoup(html, 'html.parser') issues = [] # Title title = soup.find('title') if not title: issues.append("Missing <title> tag") elif len(title.text) < 30 or len(title.text) > 60: issues.append(f"Title length suboptimal: {len(title.text)} chars (30-60 ideal)") # Meta description desc = soup.find('meta', attrs={'name': 'description'}) if not desc: issues.append("Missing meta description") # H1 h1_tags = soup.find_all('h1') if len(h1_tags) == 0: issues.append("Missing H1 tag") elif len(h1_tags) > 1: issues.append("Multiple H1 tags found") # Canonical if not soup.find('link', rel='canonical'): issues.append("Missing canonical tag") # Robots meta robots = soup.find('meta', attrs={'name': 'robots'}) if robots and 'noindex' in robots.get('content', ''): issues.append("Page is set to noindex") return issues
CheckCommandResponse headerscurl -I URLLoad timingcurl -w "%{time_total}s" -o /dev/null -s URLSSL checkopenssl s_client -connect HOST:443Broken linkslinkchecker URLAccessibilitynpx pa11y URLPerformancenpx lighthouse URLSecurity headerscurl -I URL | grep -i "x-|strict"
Data access, storage, extraction, analysis, reporting, and insight generation.
Largest current source with strong distribution and engagement signals.