{
  "schemaVersion": "1.0",
  "item": {
    "slug": "infra-as-code",
    "name": "Infrastructure as Code",
    "source": "tencent",
    "type": "skill",
    "category": "开发工具",
    "sourceUrl": "https://clawhub.ai/gitgoodordietrying/infra-as-code",
    "canonicalUrl": "https://clawhub.ai/gitgoodordietrying/infra-as-code",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/infra-as-code",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=infra-as-code",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "slug": "infra-as-code",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-05-04T09:24:27.541Z",
      "expiresAt": "2026-05-11T09:24:27.541Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=infra-as-code",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=infra-as-code",
        "contentDisposition": "attachment; filename=\"infra-as-code-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null,
        "slug": "infra-as-code"
      },
      "scope": "item",
      "summary": "Item download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this item.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/infra-as-code"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/infra-as-code",
    "agentPageUrl": "https://openagent3.xyz/skills/infra-as-code/agent",
    "manifestUrl": "https://openagent3.xyz/skills/infra-as-code/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/infra-as-code/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Infrastructure as Code",
        "body": "Define, deploy, and manage cloud infrastructure using declarative configuration. Covers Terraform (multi-cloud), AWS CloudFormation, and Pulumi (code-first), with patterns for compute, networking, storage, databases, and state management."
      },
      {
        "title": "When to Use",
        "body": "Setting up cloud infrastructure (VPCs, EC2, Lambda, S3, RDS, etc.)\nWriting or modifying Terraform configurations\nManaging Terraform state (remote backends, workspaces, imports)\nCreating CloudFormation templates\nUsing Pulumi for infrastructure in TypeScript/Python/Go\nPlanning and previewing infrastructure changes safely\nDebugging drift between declared state and actual resources\nSetting up multi-environment deployments (dev/staging/prod)"
      },
      {
        "title": "Quick Start",
        "body": "# Install: https://developer.hashicorp.com/terraform/install\n\n# Initialize a project\nmkdir infra && cd infra\nterraform init\n\n# Core workflow\nterraform plan        # Preview changes (safe, read-only)\nterraform apply       # Apply changes (creates/modifies resources)\nterraform destroy     # Tear down all resources\n\n# Format and validate\nterraform fmt -recursive    # Auto-format all .tf files\nterraform validate          # Check syntax and config validity"
      },
      {
        "title": "Project Structure",
        "body": "infra/\n  main.tf              # Primary resources\n  variables.tf         # Input variable declarations\n  outputs.tf           # Output values\n  providers.tf         # Provider configuration\n  terraform.tfvars     # Variable values (don't commit secrets)\n  backend.tf           # Remote state configuration\n  modules/\n    vpc/\n      main.tf\n      variables.tf\n      outputs.tf\n    compute/\n      main.tf\n      variables.tf\n      outputs.tf"
      },
      {
        "title": "Provider Configuration",
        "body": "# providers.tf\nterraform {\n  required_version = \">= 1.5\"\n  required_providers {\n    aws = {\n      source  = \"hashicorp/aws\"\n      version = \"~> 5.0\"\n    }\n  }\n}\n\nprovider \"aws\" {\n  region = var.aws_region\n\n  default_tags {\n    tags = {\n      Project     = var.project_name\n      Environment = var.environment\n      ManagedBy   = \"terraform\"\n    }\n  }\n}"
      },
      {
        "title": "Variables and Outputs",
        "body": "# variables.tf\nvariable \"aws_region\" {\n  type        = string\n  default     = \"us-east-1\"\n  description = \"AWS region for all resources\"\n}\n\nvariable \"environment\" {\n  type        = string\n  description = \"Deployment environment\"\n  validation {\n    condition     = contains([\"dev\", \"staging\", \"prod\"], var.environment)\n    error_message = \"Environment must be dev, staging, or prod.\"\n  }\n}\n\nvariable \"instance_type\" {\n  type    = string\n  default = \"t3.micro\"\n}\n\nvariable \"db_password\" {\n  type      = string\n  sensitive = true\n  description = \"Database password (pass via TF_VAR_db_password env var)\"\n}\n\n# outputs.tf\noutput \"vpc_id\" {\n  value       = aws_vpc.main.id\n  description = \"VPC ID\"\n}\n\noutput \"api_endpoint\" {\n  value = aws_lb.api.dns_name\n}"
      },
      {
        "title": "VPC + Networking",
        "body": "# Networking module\nresource \"aws_vpc\" \"main\" {\n  cidr_block           = \"10.0.0.0/16\"\n  enable_dns_support   = true\n  enable_dns_hostnames = true\n\n  tags = { Name = \"${var.project_name}-vpc\" }\n}\n\nresource \"aws_subnet\" \"public\" {\n  count             = 2\n  vpc_id            = aws_vpc.main.id\n  cidr_block        = \"10.0.${count.index + 1}.0/24\"\n  availability_zone = data.aws_availability_zones.available.names[count.index]\n\n  map_public_ip_on_launch = true\n  tags = { Name = \"${var.project_name}-public-${count.index + 1}\" }\n}\n\nresource \"aws_subnet\" \"private\" {\n  count             = 2\n  vpc_id            = aws_vpc.main.id\n  cidr_block        = \"10.0.${count.index + 10}.0/24\"\n  availability_zone = data.aws_availability_zones.available.names[count.index]\n\n  tags = { Name = \"${var.project_name}-private-${count.index + 1}\" }\n}\n\nresource \"aws_internet_gateway\" \"main\" {\n  vpc_id = aws_vpc.main.id\n}\n\nresource \"aws_route_table\" \"public\" {\n  vpc_id = aws_vpc.main.id\n\n  route {\n    cidr_block = \"0.0.0.0/0\"\n    gateway_id = aws_internet_gateway.main.id\n  }\n}\n\nresource \"aws_route_table_association\" \"public\" {\n  count          = 2\n  subnet_id      = aws_subnet.public[count.index].id\n  route_table_id = aws_route_table.public.id\n}\n\nresource \"aws_security_group\" \"web\" {\n  name_prefix = \"${var.project_name}-web-\"\n  vpc_id      = aws_vpc.main.id\n\n  ingress {\n    from_port   = 80\n    to_port     = 80\n    protocol    = \"tcp\"\n    cidr_blocks = [\"0.0.0.0/0\"]\n  }\n\n  ingress {\n    from_port   = 443\n    to_port     = 443\n    protocol    = \"tcp\"\n    cidr_blocks = [\"0.0.0.0/0\"]\n  }\n\n  egress {\n    from_port   = 0\n    to_port     = 0\n    protocol    = \"-1\"\n    cidr_blocks = [\"0.0.0.0/0\"]\n  }\n}\n\ndata \"aws_availability_zones\" \"available\" {\n  state = \"available\"\n}"
      },
      {
        "title": "Compute (EC2)",
        "body": "resource \"aws_instance\" \"app\" {\n  ami                    = data.aws_ami.ubuntu.id\n  instance_type          = var.instance_type\n  subnet_id              = aws_subnet.public[0].id\n  vpc_security_group_ids = [aws_security_group.web.id]\n  key_name               = var.key_pair_name\n\n  user_data = <<-EOF\n    #!/bin/bash\n    apt-get update\n    apt-get install -y docker.io\n    systemctl start docker\n    docker run -d -p 80:8080 ${var.docker_image}\n  EOF\n\n  tags = { Name = \"${var.project_name}-app\" }\n}\n\ndata \"aws_ami\" \"ubuntu\" {\n  most_recent = true\n  owners      = [\"099720109477\"] # Canonical\n\n  filter {\n    name   = \"name\"\n    values = [\"ubuntu/images/hvm-ssd/ubuntu-*-24.04-amd64-server-*\"]\n  }\n}"
      },
      {
        "title": "S3 + Static Website",
        "body": "resource \"aws_s3_bucket\" \"website\" {\n  bucket = \"${var.project_name}-website\"\n}\n\nresource \"aws_s3_bucket_website_configuration\" \"website\" {\n  bucket = aws_s3_bucket.website.id\n\n  index_document { suffix = \"index.html\" }\n  error_document { key = \"error.html\" }\n}\n\nresource \"aws_s3_bucket_public_access_block\" \"website\" {\n  bucket = aws_s3_bucket.website.id\n\n  block_public_acls       = false\n  block_public_policy     = false\n  ignore_public_acls      = false\n  restrict_public_buckets = false\n}\n\nresource \"aws_s3_bucket_policy\" \"website\" {\n  bucket = aws_s3_bucket.website.id\n\n  policy = jsonencode({\n    Version = \"2012-10-17\"\n    Statement = [{\n      Sid       = \"PublicRead\"\n      Effect    = \"Allow\"\n      Principal = \"*\"\n      Action    = \"s3:GetObject\"\n      Resource  = \"${aws_s3_bucket.website.arn}/*\"\n    }]\n  })\n\n  depends_on = [aws_s3_bucket_public_access_block.website]\n}"
      },
      {
        "title": "RDS Database",
        "body": "resource \"aws_db_subnet_group\" \"main\" {\n  name       = \"${var.project_name}-db\"\n  subnet_ids = aws_subnet.private[*].id\n}\n\nresource \"aws_security_group\" \"db\" {\n  name_prefix = \"${var.project_name}-db-\"\n  vpc_id      = aws_vpc.main.id\n\n  ingress {\n    from_port       = 5432\n    to_port         = 5432\n    protocol        = \"tcp\"\n    security_groups = [aws_security_group.web.id]\n  }\n}\n\nresource \"aws_db_instance\" \"main\" {\n  identifier        = \"${var.project_name}-db\"\n  engine            = \"postgres\"\n  engine_version    = \"16.1\"\n  instance_class    = \"db.t3.micro\"\n  allocated_storage = 20\n\n  db_name  = var.db_name\n  username = var.db_username\n  password = var.db_password\n\n  db_subnet_group_name   = aws_db_subnet_group.main.name\n  vpc_security_group_ids = [aws_security_group.db.id]\n\n  backup_retention_period = 7\n  skip_final_snapshot     = var.environment != \"prod\"\n  deletion_protection     = var.environment == \"prod\"\n}"
      },
      {
        "title": "Lambda Function",
        "body": "data \"archive_file\" \"lambda_zip\" {\n  type        = \"zip\"\n  source_dir  = \"${path.module}/lambda\"\n  output_path = \"${path.module}/lambda.zip\"\n}\n\nresource \"aws_lambda_function\" \"api\" {\n  function_name    = \"${var.project_name}-api\"\n  filename         = data.archive_file.lambda_zip.output_path\n  source_code_hash = data.archive_file.lambda_zip.output_base64sha256\n  handler          = \"index.handler\"\n  runtime          = \"nodejs20.x\"\n  timeout          = 30\n\n  role = aws_iam_role.lambda_exec.arn\n\n  environment {\n    variables = {\n      DB_HOST     = aws_db_instance.main.endpoint\n      ENVIRONMENT = var.environment\n    }\n  }\n}\n\nresource \"aws_iam_role\" \"lambda_exec\" {\n  name = \"${var.project_name}-lambda-exec\"\n\n  assume_role_policy = jsonencode({\n    Version = \"2012-10-17\"\n    Statement = [{\n      Action = \"sts:AssumeRole\"\n      Effect = \"Allow\"\n      Principal = { Service = \"lambda.amazonaws.com\" }\n    }]\n  })\n}\n\nresource \"aws_iam_role_policy_attachment\" \"lambda_basic\" {\n  role       = aws_iam_role.lambda_exec.name\n  policy_arn = \"arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole\"\n}"
      },
      {
        "title": "State Management",
        "body": "# backend.tf - Remote state in S3\nterraform {\n  backend \"s3\" {\n    bucket         = \"my-terraform-state\"\n    key            = \"project/terraform.tfstate\"\n    region         = \"us-east-1\"\n    dynamodb_table = \"terraform-locks\"\n    encrypt        = true\n  }\n}\n\n# State operations\nterraform state list                    # List all resources in state\nterraform state show aws_instance.app   # Show resource details\nterraform state mv aws_instance.app aws_instance.web  # Rename resource\nterraform state rm aws_instance.old     # Remove from state (doesn't destroy)\n\n# Import existing resource into Terraform\nterraform import aws_instance.app i-1234567890abcdef0\n\n# Workspaces (multiple environments, same config)\nterraform workspace new dev\nterraform workspace new staging\nterraform workspace new prod\nterraform workspace select dev\nterraform workspace list"
      },
      {
        "title": "Multi-Environment Pattern",
        "body": "# Use workspaces + tfvars files\n# terraform.tfvars (default)\n# env/dev.tfvars\n# env/staging.tfvars\n# env/prod.tfvars\n\n# Apply for specific environment\n# terraform apply -var-file=env/prod.tfvars\n\n# Environment-specific apply\nENV=${1:-dev}\nterraform workspace select \"$ENV\" || terraform workspace new \"$ENV\"\nterraform apply -var-file=\"env/$ENV.tfvars\""
      },
      {
        "title": "Template Structure",
        "body": "# cloudformation.yaml\nAWSTemplateFormatVersion: '2010-09-09'\nDescription: My application stack\n\nParameters:\n  Environment:\n    Type: String\n    AllowedValues: [dev, staging, prod]\n    Default: dev\n  InstanceType:\n    Type: String\n    Default: t3.micro\n\nConditions:\n  IsProd: !Equals [!Ref Environment, prod]\n\nResources:\n  VPC:\n    Type: AWS::EC2::VPC\n    Properties:\n      CidrBlock: 10.0.0.0/16\n      EnableDnsSupport: true\n      EnableDnsHostnames: true\n      Tags:\n        - Key: Name\n          Value: !Sub \"${AWS::StackName}-vpc\"\n\n  PublicSubnet:\n    Type: AWS::EC2::Subnet\n    Properties:\n      VpcId: !Ref VPC\n      CidrBlock: 10.0.1.0/24\n      MapPublicIpOnLaunch: true\n      Tags:\n        - Key: Name\n          Value: !Sub \"${AWS::StackName}-public\"\n\n  AppInstance:\n    Type: AWS::EC2::Instance\n    Properties:\n      InstanceType: !Ref InstanceType\n      SubnetId: !Ref PublicSubnet\n      ImageId: !FindInMap [RegionAMI, !Ref \"AWS::Region\", ubuntu]\n\n  Database:\n    Type: AWS::RDS::DBInstance\n    Condition: IsProd\n    DeletionPolicy: Snapshot\n    Properties:\n      Engine: postgres\n      DBInstanceClass: db.t3.micro\n      AllocatedStorage: 20\n      MasterUsername: admin\n      MasterUserPassword: !Ref DBPassword\n\nOutputs:\n  VpcId:\n    Value: !Ref VPC\n    Export:\n      Name: !Sub \"${AWS::StackName}-VpcId\"\n  InstanceIP:\n    Value: !GetAtt AppInstance.PublicIp"
      },
      {
        "title": "CloudFormation CLI",
        "body": "# Validate template\naws cloudformation validate-template --template-body file://cloudformation.yaml\n\n# Create stack\naws cloudformation create-stack \\\n  --stack-name myapp-dev \\\n  --template-body file://cloudformation.yaml \\\n  --parameters ParameterKey=Environment,ParameterValue=dev \\\n  --capabilities CAPABILITY_IAM\n\n# Update stack\naws cloudformation update-stack \\\n  --stack-name myapp-dev \\\n  --template-body file://cloudformation.yaml \\\n  --parameters ParameterKey=Environment,ParameterValue=dev\n\n# Preview changes (changeset)\naws cloudformation create-change-set \\\n  --stack-name myapp-dev \\\n  --change-set-name update-1 \\\n  --template-body file://cloudformation.yaml\n\naws cloudformation describe-change-set \\\n  --stack-name myapp-dev \\\n  --change-set-name update-1\n\n# Delete stack\naws cloudformation delete-stack --stack-name myapp-dev\n\n# List stacks\naws cloudformation list-stacks --stack-status-filter CREATE_COMPLETE UPDATE_COMPLETE\n\n# Stack events (debugging)\naws cloudformation describe-stack-events --stack-name myapp-dev | head -50"
      },
      {
        "title": "Quick Start (TypeScript)",
        "body": "# Install: https://www.pulumi.com/docs/install/\npulumi new aws-typescript\n\n# Core workflow\npulumi preview    # Preview changes\npulumi up         # Apply changes\npulumi destroy    # Tear down\npulumi stack ls   # List stacks"
      },
      {
        "title": "TypeScript Example",
        "body": "// index.ts\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws from \"@pulumi/aws\";\n\nconst config = new pulumi.Config();\nconst environment = config.require(\"environment\");\n\n// VPC\nconst vpc = new aws.ec2.Vpc(\"main\", {\n  cidrBlock: \"10.0.0.0/16\",\n  enableDnsSupport: true,\n  enableDnsHostnames: true,\n  tags: { Name: `myapp-${environment}-vpc` },\n});\n\n// Public subnet\nconst publicSubnet = new aws.ec2.Subnet(\"public\", {\n  vpcId: vpc.id,\n  cidrBlock: \"10.0.1.0/24\",\n  mapPublicIpOnLaunch: true,\n  tags: { Name: `myapp-${environment}-public` },\n});\n\n// S3 bucket\nconst bucket = new aws.s3.Bucket(\"data\", {\n  bucket: `myapp-${environment}-data`,\n  versioning: { enabled: true },\n});\n\n// Lambda function\nconst lambdaRole = new aws.iam.Role(\"lambda-role\", {\n  assumeRolePolicy: JSON.stringify({\n    Version: \"2012-10-17\",\n    Statement: [{\n      Action: \"sts:AssumeRole\",\n      Effect: \"Allow\",\n      Principal: { Service: \"lambda.amazonaws.com\" },\n    }],\n  }),\n});\n\nconst lambdaFunc = new aws.lambda.Function(\"api\", {\n  runtime: \"nodejs20.x\",\n  handler: \"index.handler\",\n  role: lambdaRole.arn,\n  code: new pulumi.asset.FileArchive(\"./lambda\"),\n  environment: {\n    variables: {\n      BUCKET_NAME: bucket.id,\n      ENVIRONMENT: environment,\n    },\n  },\n});\n\n// Outputs\nexport const vpcId = vpc.id;\nexport const bucketName = bucket.id;\nexport const lambdaArn = lambdaFunc.arn;"
      },
      {
        "title": "Python Example",
        "body": "# __main__.py\nimport pulumi\nimport pulumi_aws as aws\n\nconfig = pulumi.Config()\nenvironment = config.require(\"environment\")\n\nvpc = aws.ec2.Vpc(\"main\",\n    cidr_block=\"10.0.0.0/16\",\n    enable_dns_support=True,\n    enable_dns_hostnames=True,\n    tags={\"Name\": f\"myapp-{environment}-vpc\"})\n\nbucket = aws.s3.Bucket(\"data\",\n    bucket=f\"myapp-{environment}-data\",\n    versioning=aws.s3.BucketVersioningArgs(enabled=True))\n\npulumi.export(\"vpc_id\", vpc.id)\npulumi.export(\"bucket_name\", bucket.id)"
      },
      {
        "title": "Pulumi State and Stacks",
        "body": "# Create per-environment stacks\npulumi stack init dev\npulumi stack init staging\npulumi stack init prod\n\n# Switch stack\npulumi stack select dev\n\n# Set config per stack\npulumi config set environment dev\npulumi config set aws:region us-east-1\npulumi config set --secret dbPassword 'my-secret-pass'\n\n# Stack references (cross-stack)\n# In consuming stack:\nconst infra = new pulumi.StackReference(\"org/infra/prod\");\nconst vpcId = infra.getOutput(\"vpcId\");"
      },
      {
        "title": "Terraform plan issues",
        "body": "# Detailed plan output\nterraform plan -out=plan.tfplan\nterraform show plan.tfplan\nterraform show -json plan.tfplan | jq '.resource_changes[] | {address, change: .change.actions}'\n\n# Debug mode\nTF_LOG=DEBUG terraform plan 2> debug.log\n\n# Check for drift\nterraform plan -refresh-only\n\n# Force refresh state\nterraform apply -refresh-only"
      },
      {
        "title": "Common issues",
        "body": "# Resource stuck in \"tainted\" state\nterraform untaint aws_instance.app\n\n# State locked (another apply running or crashed)\nterraform force-unlock LOCK_ID\n\n# Provider version conflict\nterraform providers lock    # Generate lock file\nterraform init -upgrade     # Upgrade providers\n\n# Circular dependency\n# Error: \"Cycle\" in terraform plan\n# Fix: use depends_on explicitly, or break the cycle with data sources"
      },
      {
        "title": "Cost estimation",
        "body": "# Infracost (estimates monthly cost from Terraform plans)\n# Install: https://www.infracost.io/docs/\ninfracost breakdown --path .\ninfracost diff --path . --compare-to infracost-base.json"
      },
      {
        "title": "Tips",
        "body": "Always run terraform plan before apply. Read the plan output carefully — especially lines showing destroy or replace.\nUse remote state from day one. Local state files get lost, can't be shared, and have no locking.\nTag everything. At minimum: Project, Environment, ManagedBy. Tags make cost tracking and cleanup possible.\nNever store secrets in .tf files or terraform.tfvars. Use environment variables (TF_VAR_name), secrets managers, or Vault.\nUse prevent_destroy lifecycle rules on stateful resources (databases, S3 buckets with data) to prevent accidental deletion.\nPin provider versions (~> 5.0 not >= 5.0) to avoid surprise breaking changes.\nFor multi-environment setups, prefer workspaces + var files over duplicated configurations.\nCloudFormation change sets are the equivalent of terraform plan — always create one before updating a stack.\nPulumi's advantage is using real programming languages (loops, conditionals, type checking). Use it when Terraform's HCL feels limiting."
      }
    ],
    "body": "Infrastructure as Code\n\nDefine, deploy, and manage cloud infrastructure using declarative configuration. Covers Terraform (multi-cloud), AWS CloudFormation, and Pulumi (code-first), with patterns for compute, networking, storage, databases, and state management.\n\nWhen to Use\nSetting up cloud infrastructure (VPCs, EC2, Lambda, S3, RDS, etc.)\nWriting or modifying Terraform configurations\nManaging Terraform state (remote backends, workspaces, imports)\nCreating CloudFormation templates\nUsing Pulumi for infrastructure in TypeScript/Python/Go\nPlanning and previewing infrastructure changes safely\nDebugging drift between declared state and actual resources\nSetting up multi-environment deployments (dev/staging/prod)\nTerraform\nQuick Start\n# Install: https://developer.hashicorp.com/terraform/install\n\n# Initialize a project\nmkdir infra && cd infra\nterraform init\n\n# Core workflow\nterraform plan        # Preview changes (safe, read-only)\nterraform apply       # Apply changes (creates/modifies resources)\nterraform destroy     # Tear down all resources\n\n# Format and validate\nterraform fmt -recursive    # Auto-format all .tf files\nterraform validate          # Check syntax and config validity\n\nProject Structure\ninfra/\n  main.tf              # Primary resources\n  variables.tf         # Input variable declarations\n  outputs.tf           # Output values\n  providers.tf         # Provider configuration\n  terraform.tfvars     # Variable values (don't commit secrets)\n  backend.tf           # Remote state configuration\n  modules/\n    vpc/\n      main.tf\n      variables.tf\n      outputs.tf\n    compute/\n      main.tf\n      variables.tf\n      outputs.tf\n\nProvider Configuration\n# providers.tf\nterraform {\n  required_version = \">= 1.5\"\n  required_providers {\n    aws = {\n      source  = \"hashicorp/aws\"\n      version = \"~> 5.0\"\n    }\n  }\n}\n\nprovider \"aws\" {\n  region = var.aws_region\n\n  default_tags {\n    tags = {\n      Project     = var.project_name\n      Environment = var.environment\n      ManagedBy   = \"terraform\"\n    }\n  }\n}\n\nVariables and Outputs\n# variables.tf\nvariable \"aws_region\" {\n  type        = string\n  default     = \"us-east-1\"\n  description = \"AWS region for all resources\"\n}\n\nvariable \"environment\" {\n  type        = string\n  description = \"Deployment environment\"\n  validation {\n    condition     = contains([\"dev\", \"staging\", \"prod\"], var.environment)\n    error_message = \"Environment must be dev, staging, or prod.\"\n  }\n}\n\nvariable \"instance_type\" {\n  type    = string\n  default = \"t3.micro\"\n}\n\nvariable \"db_password\" {\n  type      = string\n  sensitive = true\n  description = \"Database password (pass via TF_VAR_db_password env var)\"\n}\n\n# outputs.tf\noutput \"vpc_id\" {\n  value       = aws_vpc.main.id\n  description = \"VPC ID\"\n}\n\noutput \"api_endpoint\" {\n  value = aws_lb.api.dns_name\n}\n\nVPC + Networking\n# Networking module\nresource \"aws_vpc\" \"main\" {\n  cidr_block           = \"10.0.0.0/16\"\n  enable_dns_support   = true\n  enable_dns_hostnames = true\n\n  tags = { Name = \"${var.project_name}-vpc\" }\n}\n\nresource \"aws_subnet\" \"public\" {\n  count             = 2\n  vpc_id            = aws_vpc.main.id\n  cidr_block        = \"10.0.${count.index + 1}.0/24\"\n  availability_zone = data.aws_availability_zones.available.names[count.index]\n\n  map_public_ip_on_launch = true\n  tags = { Name = \"${var.project_name}-public-${count.index + 1}\" }\n}\n\nresource \"aws_subnet\" \"private\" {\n  count             = 2\n  vpc_id            = aws_vpc.main.id\n  cidr_block        = \"10.0.${count.index + 10}.0/24\"\n  availability_zone = data.aws_availability_zones.available.names[count.index]\n\n  tags = { Name = \"${var.project_name}-private-${count.index + 1}\" }\n}\n\nresource \"aws_internet_gateway\" \"main\" {\n  vpc_id = aws_vpc.main.id\n}\n\nresource \"aws_route_table\" \"public\" {\n  vpc_id = aws_vpc.main.id\n\n  route {\n    cidr_block = \"0.0.0.0/0\"\n    gateway_id = aws_internet_gateway.main.id\n  }\n}\n\nresource \"aws_route_table_association\" \"public\" {\n  count          = 2\n  subnet_id      = aws_subnet.public[count.index].id\n  route_table_id = aws_route_table.public.id\n}\n\nresource \"aws_security_group\" \"web\" {\n  name_prefix = \"${var.project_name}-web-\"\n  vpc_id      = aws_vpc.main.id\n\n  ingress {\n    from_port   = 80\n    to_port     = 80\n    protocol    = \"tcp\"\n    cidr_blocks = [\"0.0.0.0/0\"]\n  }\n\n  ingress {\n    from_port   = 443\n    to_port     = 443\n    protocol    = \"tcp\"\n    cidr_blocks = [\"0.0.0.0/0\"]\n  }\n\n  egress {\n    from_port   = 0\n    to_port     = 0\n    protocol    = \"-1\"\n    cidr_blocks = [\"0.0.0.0/0\"]\n  }\n}\n\ndata \"aws_availability_zones\" \"available\" {\n  state = \"available\"\n}\n\nCompute (EC2)\nresource \"aws_instance\" \"app\" {\n  ami                    = data.aws_ami.ubuntu.id\n  instance_type          = var.instance_type\n  subnet_id              = aws_subnet.public[0].id\n  vpc_security_group_ids = [aws_security_group.web.id]\n  key_name               = var.key_pair_name\n\n  user_data = <<-EOF\n    #!/bin/bash\n    apt-get update\n    apt-get install -y docker.io\n    systemctl start docker\n    docker run -d -p 80:8080 ${var.docker_image}\n  EOF\n\n  tags = { Name = \"${var.project_name}-app\" }\n}\n\ndata \"aws_ami\" \"ubuntu\" {\n  most_recent = true\n  owners      = [\"099720109477\"] # Canonical\n\n  filter {\n    name   = \"name\"\n    values = [\"ubuntu/images/hvm-ssd/ubuntu-*-24.04-amd64-server-*\"]\n  }\n}\n\nS3 + Static Website\nresource \"aws_s3_bucket\" \"website\" {\n  bucket = \"${var.project_name}-website\"\n}\n\nresource \"aws_s3_bucket_website_configuration\" \"website\" {\n  bucket = aws_s3_bucket.website.id\n\n  index_document { suffix = \"index.html\" }\n  error_document { key = \"error.html\" }\n}\n\nresource \"aws_s3_bucket_public_access_block\" \"website\" {\n  bucket = aws_s3_bucket.website.id\n\n  block_public_acls       = false\n  block_public_policy     = false\n  ignore_public_acls      = false\n  restrict_public_buckets = false\n}\n\nresource \"aws_s3_bucket_policy\" \"website\" {\n  bucket = aws_s3_bucket.website.id\n\n  policy = jsonencode({\n    Version = \"2012-10-17\"\n    Statement = [{\n      Sid       = \"PublicRead\"\n      Effect    = \"Allow\"\n      Principal = \"*\"\n      Action    = \"s3:GetObject\"\n      Resource  = \"${aws_s3_bucket.website.arn}/*\"\n    }]\n  })\n\n  depends_on = [aws_s3_bucket_public_access_block.website]\n}\n\nRDS Database\nresource \"aws_db_subnet_group\" \"main\" {\n  name       = \"${var.project_name}-db\"\n  subnet_ids = aws_subnet.private[*].id\n}\n\nresource \"aws_security_group\" \"db\" {\n  name_prefix = \"${var.project_name}-db-\"\n  vpc_id      = aws_vpc.main.id\n\n  ingress {\n    from_port       = 5432\n    to_port         = 5432\n    protocol        = \"tcp\"\n    security_groups = [aws_security_group.web.id]\n  }\n}\n\nresource \"aws_db_instance\" \"main\" {\n  identifier        = \"${var.project_name}-db\"\n  engine            = \"postgres\"\n  engine_version    = \"16.1\"\n  instance_class    = \"db.t3.micro\"\n  allocated_storage = 20\n\n  db_name  = var.db_name\n  username = var.db_username\n  password = var.db_password\n\n  db_subnet_group_name   = aws_db_subnet_group.main.name\n  vpc_security_group_ids = [aws_security_group.db.id]\n\n  backup_retention_period = 7\n  skip_final_snapshot     = var.environment != \"prod\"\n  deletion_protection     = var.environment == \"prod\"\n}\n\nLambda Function\ndata \"archive_file\" \"lambda_zip\" {\n  type        = \"zip\"\n  source_dir  = \"${path.module}/lambda\"\n  output_path = \"${path.module}/lambda.zip\"\n}\n\nresource \"aws_lambda_function\" \"api\" {\n  function_name    = \"${var.project_name}-api\"\n  filename         = data.archive_file.lambda_zip.output_path\n  source_code_hash = data.archive_file.lambda_zip.output_base64sha256\n  handler          = \"index.handler\"\n  runtime          = \"nodejs20.x\"\n  timeout          = 30\n\n  role = aws_iam_role.lambda_exec.arn\n\n  environment {\n    variables = {\n      DB_HOST     = aws_db_instance.main.endpoint\n      ENVIRONMENT = var.environment\n    }\n  }\n}\n\nresource \"aws_iam_role\" \"lambda_exec\" {\n  name = \"${var.project_name}-lambda-exec\"\n\n  assume_role_policy = jsonencode({\n    Version = \"2012-10-17\"\n    Statement = [{\n      Action = \"sts:AssumeRole\"\n      Effect = \"Allow\"\n      Principal = { Service = \"lambda.amazonaws.com\" }\n    }]\n  })\n}\n\nresource \"aws_iam_role_policy_attachment\" \"lambda_basic\" {\n  role       = aws_iam_role.lambda_exec.name\n  policy_arn = \"arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole\"\n}\n\nState Management\n# backend.tf - Remote state in S3\nterraform {\n  backend \"s3\" {\n    bucket         = \"my-terraform-state\"\n    key            = \"project/terraform.tfstate\"\n    region         = \"us-east-1\"\n    dynamodb_table = \"terraform-locks\"\n    encrypt        = true\n  }\n}\n\n# State operations\nterraform state list                    # List all resources in state\nterraform state show aws_instance.app   # Show resource details\nterraform state mv aws_instance.app aws_instance.web  # Rename resource\nterraform state rm aws_instance.old     # Remove from state (doesn't destroy)\n\n# Import existing resource into Terraform\nterraform import aws_instance.app i-1234567890abcdef0\n\n# Workspaces (multiple environments, same config)\nterraform workspace new dev\nterraform workspace new staging\nterraform workspace new prod\nterraform workspace select dev\nterraform workspace list\n\nMulti-Environment Pattern\n# Use workspaces + tfvars files\n# terraform.tfvars (default)\n# env/dev.tfvars\n# env/staging.tfvars\n# env/prod.tfvars\n\n# Apply for specific environment\n# terraform apply -var-file=env/prod.tfvars\n\n# Environment-specific apply\nENV=${1:-dev}\nterraform workspace select \"$ENV\" || terraform workspace new \"$ENV\"\nterraform apply -var-file=\"env/$ENV.tfvars\"\n\nAWS CloudFormation\nTemplate Structure\n# cloudformation.yaml\nAWSTemplateFormatVersion: '2010-09-09'\nDescription: My application stack\n\nParameters:\n  Environment:\n    Type: String\n    AllowedValues: [dev, staging, prod]\n    Default: dev\n  InstanceType:\n    Type: String\n    Default: t3.micro\n\nConditions:\n  IsProd: !Equals [!Ref Environment, prod]\n\nResources:\n  VPC:\n    Type: AWS::EC2::VPC\n    Properties:\n      CidrBlock: 10.0.0.0/16\n      EnableDnsSupport: true\n      EnableDnsHostnames: true\n      Tags:\n        - Key: Name\n          Value: !Sub \"${AWS::StackName}-vpc\"\n\n  PublicSubnet:\n    Type: AWS::EC2::Subnet\n    Properties:\n      VpcId: !Ref VPC\n      CidrBlock: 10.0.1.0/24\n      MapPublicIpOnLaunch: true\n      Tags:\n        - Key: Name\n          Value: !Sub \"${AWS::StackName}-public\"\n\n  AppInstance:\n    Type: AWS::EC2::Instance\n    Properties:\n      InstanceType: !Ref InstanceType\n      SubnetId: !Ref PublicSubnet\n      ImageId: !FindInMap [RegionAMI, !Ref \"AWS::Region\", ubuntu]\n\n  Database:\n    Type: AWS::RDS::DBInstance\n    Condition: IsProd\n    DeletionPolicy: Snapshot\n    Properties:\n      Engine: postgres\n      DBInstanceClass: db.t3.micro\n      AllocatedStorage: 20\n      MasterUsername: admin\n      MasterUserPassword: !Ref DBPassword\n\nOutputs:\n  VpcId:\n    Value: !Ref VPC\n    Export:\n      Name: !Sub \"${AWS::StackName}-VpcId\"\n  InstanceIP:\n    Value: !GetAtt AppInstance.PublicIp\n\nCloudFormation CLI\n# Validate template\naws cloudformation validate-template --template-body file://cloudformation.yaml\n\n# Create stack\naws cloudformation create-stack \\\n  --stack-name myapp-dev \\\n  --template-body file://cloudformation.yaml \\\n  --parameters ParameterKey=Environment,ParameterValue=dev \\\n  --capabilities CAPABILITY_IAM\n\n# Update stack\naws cloudformation update-stack \\\n  --stack-name myapp-dev \\\n  --template-body file://cloudformation.yaml \\\n  --parameters ParameterKey=Environment,ParameterValue=dev\n\n# Preview changes (changeset)\naws cloudformation create-change-set \\\n  --stack-name myapp-dev \\\n  --change-set-name update-1 \\\n  --template-body file://cloudformation.yaml\n\naws cloudformation describe-change-set \\\n  --stack-name myapp-dev \\\n  --change-set-name update-1\n\n# Delete stack\naws cloudformation delete-stack --stack-name myapp-dev\n\n# List stacks\naws cloudformation list-stacks --stack-status-filter CREATE_COMPLETE UPDATE_COMPLETE\n\n# Stack events (debugging)\naws cloudformation describe-stack-events --stack-name myapp-dev | head -50\n\nPulumi (Code-First IaC)\nQuick Start (TypeScript)\n# Install: https://www.pulumi.com/docs/install/\npulumi new aws-typescript\n\n# Core workflow\npulumi preview    # Preview changes\npulumi up         # Apply changes\npulumi destroy    # Tear down\npulumi stack ls   # List stacks\n\nTypeScript Example\n// index.ts\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws from \"@pulumi/aws\";\n\nconst config = new pulumi.Config();\nconst environment = config.require(\"environment\");\n\n// VPC\nconst vpc = new aws.ec2.Vpc(\"main\", {\n  cidrBlock: \"10.0.0.0/16\",\n  enableDnsSupport: true,\n  enableDnsHostnames: true,\n  tags: { Name: `myapp-${environment}-vpc` },\n});\n\n// Public subnet\nconst publicSubnet = new aws.ec2.Subnet(\"public\", {\n  vpcId: vpc.id,\n  cidrBlock: \"10.0.1.0/24\",\n  mapPublicIpOnLaunch: true,\n  tags: { Name: `myapp-${environment}-public` },\n});\n\n// S3 bucket\nconst bucket = new aws.s3.Bucket(\"data\", {\n  bucket: `myapp-${environment}-data`,\n  versioning: { enabled: true },\n});\n\n// Lambda function\nconst lambdaRole = new aws.iam.Role(\"lambda-role\", {\n  assumeRolePolicy: JSON.stringify({\n    Version: \"2012-10-17\",\n    Statement: [{\n      Action: \"sts:AssumeRole\",\n      Effect: \"Allow\",\n      Principal: { Service: \"lambda.amazonaws.com\" },\n    }],\n  }),\n});\n\nconst lambdaFunc = new aws.lambda.Function(\"api\", {\n  runtime: \"nodejs20.x\",\n  handler: \"index.handler\",\n  role: lambdaRole.arn,\n  code: new pulumi.asset.FileArchive(\"./lambda\"),\n  environment: {\n    variables: {\n      BUCKET_NAME: bucket.id,\n      ENVIRONMENT: environment,\n    },\n  },\n});\n\n// Outputs\nexport const vpcId = vpc.id;\nexport const bucketName = bucket.id;\nexport const lambdaArn = lambdaFunc.arn;\n\nPython Example\n# __main__.py\nimport pulumi\nimport pulumi_aws as aws\n\nconfig = pulumi.Config()\nenvironment = config.require(\"environment\")\n\nvpc = aws.ec2.Vpc(\"main\",\n    cidr_block=\"10.0.0.0/16\",\n    enable_dns_support=True,\n    enable_dns_hostnames=True,\n    tags={\"Name\": f\"myapp-{environment}-vpc\"})\n\nbucket = aws.s3.Bucket(\"data\",\n    bucket=f\"myapp-{environment}-data\",\n    versioning=aws.s3.BucketVersioningArgs(enabled=True))\n\npulumi.export(\"vpc_id\", vpc.id)\npulumi.export(\"bucket_name\", bucket.id)\n\nPulumi State and Stacks\n# Create per-environment stacks\npulumi stack init dev\npulumi stack init staging\npulumi stack init prod\n\n# Switch stack\npulumi stack select dev\n\n# Set config per stack\npulumi config set environment dev\npulumi config set aws:region us-east-1\npulumi config set --secret dbPassword 'my-secret-pass'\n\n# Stack references (cross-stack)\n# In consuming stack:\nconst infra = new pulumi.StackReference(\"org/infra/prod\");\nconst vpcId = infra.getOutput(\"vpcId\");\n\nDebugging Infrastructure\nTerraform plan issues\n# Detailed plan output\nterraform plan -out=plan.tfplan\nterraform show plan.tfplan\nterraform show -json plan.tfplan | jq '.resource_changes[] | {address, change: .change.actions}'\n\n# Debug mode\nTF_LOG=DEBUG terraform plan 2> debug.log\n\n# Check for drift\nterraform plan -refresh-only\n\n# Force refresh state\nterraform apply -refresh-only\n\nCommon issues\n# Resource stuck in \"tainted\" state\nterraform untaint aws_instance.app\n\n# State locked (another apply running or crashed)\nterraform force-unlock LOCK_ID\n\n# Provider version conflict\nterraform providers lock    # Generate lock file\nterraform init -upgrade     # Upgrade providers\n\n# Circular dependency\n# Error: \"Cycle\" in terraform plan\n# Fix: use depends_on explicitly, or break the cycle with data sources\n\nCost estimation\n# Infracost (estimates monthly cost from Terraform plans)\n# Install: https://www.infracost.io/docs/\ninfracost breakdown --path .\ninfracost diff --path . --compare-to infracost-base.json\n\nTips\nAlways run terraform plan before apply. Read the plan output carefully — especially lines showing destroy or replace.\nUse remote state from day one. Local state files get lost, can't be shared, and have no locking.\nTag everything. At minimum: Project, Environment, ManagedBy. Tags make cost tracking and cleanup possible.\nNever store secrets in .tf files or terraform.tfvars. Use environment variables (TF_VAR_name), secrets managers, or Vault.\nUse prevent_destroy lifecycle rules on stateful resources (databases, S3 buckets with data) to prevent accidental deletion.\nPin provider versions (~> 5.0 not >= 5.0) to avoid surprise breaking changes.\nFor multi-environment setups, prefer workspaces + var files over duplicated configurations.\nCloudFormation change sets are the equivalent of terraform plan — always create one before updating a stack.\nPulumi's advantage is using real programming languages (loops, conditionals, type checking). Use it when Terraform's HCL feels limiting."
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/gitgoodordietrying/infra-as-code",
    "publisherUrl": "https://clawhub.ai/gitgoodordietrying/infra-as-code",
    "owner": "gitgoodordietrying",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/infra-as-code",
    "downloadUrl": "https://openagent3.xyz/downloads/infra-as-code",
    "agentUrl": "https://openagent3.xyz/skills/infra-as-code/agent",
    "manifestUrl": "https://openagent3.xyz/skills/infra-as-code/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/infra-as-code/agent.md"
  }
}