Infrastructure as Code Security: Terraform Security Best Practices
Infrastructure as Code (IaC) enables reproducible, version-controlled infrastructure, but it also introduces security risks if not properly managed. This guide covers comprehensive security practices for Terraform deployments.
Security Scanning with tfsec
Basic tfsec Configuration
# .tfsec.yml
minimum_severity: MEDIUM
exclude:
- aws-vpc-no-excessive-port-access # Handled by security groups
severity_overrides:
AWS001: ERROR # S3 bucket without encryption
AWS002: ERROR # S3 bucket without logging
AWS017: WARNING # Unencrypted EBS volume
custom_checks_dir: .tfsec/custom_checksCommon Security Findings and Fixes
# INSECURE: S3 bucket without encryption
resource "aws_s3_bucket" "data" {
bucket = "my-data-bucket"
}
# SECURE: S3 bucket with encryption and security controls
resource "aws_s3_bucket" "data" {
bucket = "my-data-bucket"
tags = {
Environment = "production"
DataClass = "confidential"
}
}
resource "aws_s3_bucket_server_side_encryption_configuration" "data" {
bucket = aws_s3_bucket.data.id
rule {
apply_server_side_encryption_by_default {
kms_master_key_id = aws_kms_key.s3_key.arn
sse_algorithm = "aws:kms"
}
bucket_key_enabled = true
}
}
resource "aws_s3_bucket_public_access_block" "data" {
bucket = aws_s3_bucket.data.id
block_public_acls = true
block_public_policy = true
ignore_public_acls = true
restrict_public_buckets = true
}
resource "aws_s3_bucket_versioning" "data" {
bucket = aws_s3_bucket.data.id
versioning_configuration {
status = "Enabled"
}
}
resource "aws_s3_bucket_logging" "data" {
bucket = aws_s3_bucket.data.id
target_bucket = aws_s3_bucket.logs.id
target_prefix = "s3-access-logs/data-bucket/"
}
# INSECURE: Security group with wide open ingress
resource "aws_security_group" "web" {
name = "web-sg"
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"] # tfsec:ignore:aws-vpc-no-public-ingress-sgr
}
}
# SECURE: Security group with specific rules
resource "aws_security_group" "web" {
name = "web-sg"
description = "Security group for web tier"
vpc_id = aws_vpc.main.id
tags = {
Name = "web-security-group"
}
}
resource "aws_security_group_rule" "web_https" {
type = "ingress"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = [var.allowed_cidrs]
security_group_id = aws_security_group.web.id
description = "HTTPS from allowed networks"
}
resource "aws_security_group_rule" "web_egress" {
type = "egress"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.web.id
description = "Allow all outbound traffic"
}Policy as Code with OPA
Terraform Policy Definition
# policy/terraform/security.rego
package terraform.security
import future.keywords.in
# Deny unencrypted S3 buckets
deny[msg] {
resource := input.resource_changes[_]
resource.type == "aws_s3_bucket"
resource.change.after.server_side_encryption_configuration == null
msg := sprintf(
"S3 bucket '%s' must have server-side encryption enabled",
[resource.address]
)
}
# Deny public S3 buckets
deny[msg] {
resource := input.resource_changes[_]
resource.type == "aws_s3_bucket"
not has_public_access_block(resource.address)
msg := sprintf(
"S3 bucket '%s' must have public access block configured",
[resource.address]
)
}
has_public_access_block(bucket_address) {
resource := input.resource_changes[_]
resource.type == "aws_s3_bucket_public_access_block"
contains(resource.address, bucket_address)
}
# Deny unencrypted RDS instances
deny[msg] {
resource := input.resource_changes[_]
resource.type == "aws_db_instance"
resource.change.after.storage_encrypted != true
msg := sprintf(
"RDS instance '%s' must have storage encryption enabled",
[resource.address]
)
}
# Deny RDS instances without deletion protection
deny[msg] {
resource := input.resource_changes[_]
resource.type == "aws_db_instance"
resource.change.after.deletion_protection != true
msg := sprintf(
"RDS instance '%s' must have deletion protection enabled",
[resource.address]
)
}
# Require specific tags
required_tags := {"Environment", "Owner", "CostCenter"}
deny[msg] {
resource := input.resource_changes[_]
resource.change.after.tags != null
missing := required_tags - {tag | resource.change.after.tags[tag]}
count(missing) > 0
msg := sprintf(
"Resource '%s' is missing required tags: %v",
[resource.address, missing]
)
}
# Deny overly permissive IAM policies
deny[msg] {
resource := input.resource_changes[_]
resource.type == "aws_iam_policy"
policy := json.unmarshal(resource.change.after.policy)
statement := policy.Statement[_]
statement.Effect == "Allow"
statement.Action[_] == "*"
statement.Resource == "*"
msg := sprintf(
"IAM policy '%s' grants overly permissive access",
[resource.address]
)
}
# Deny EC2 instances without IMDSv2
deny[msg] {
resource := input.resource_changes[_]
resource.type == "aws_instance"
not resource.change.after.metadata_options[0].http_tokens == "required"
msg := sprintf(
"EC2 instance '%s' must require IMDSv2",
[resource.address]
)
}OPA Integration in CI/CD
# .github/workflows/terraform-security.yml
name: Terraform Security
on:
pull_request:
paths:
- '**.tf'
- '**.tfvars'
jobs:
security-scan:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Terraform
uses: hashicorp/setup-terraform@v3
with:
terraform_version: 1.6.0
- name: Terraform Init
run: terraform init -backend=false
- name: Terraform Plan
run: terraform plan -out=tfplan.binary
- name: Convert Plan to JSON
run: terraform show -json tfplan.binary > tfplan.json
- name: Setup OPA
uses: open-policy-agent/setup-opa@v2
with:
version: latest
- name: Run OPA Policy Check
run: |
opa eval --format pretty \
--data policy/terraform/ \
--input tfplan.json \
"data.terraform.security.deny" > policy-results.txt
if grep -q "deny" policy-results.txt; then
echo "Policy violations found:"
cat policy-results.txt
exit 1
fi
- name: Run tfsec
uses: aquasecurity/tfsec-action@v1.0.3
with:
soft_fail: false
additional_args: --minimum-severity MEDIUM
- name: Run Checkov
uses: bridgecrewio/checkov-action@v12
with:
directory: .
framework: terraform
soft_fail: false
output_format: sarif
output_file_path: checkov-results.sarif
- name: Upload SARIF
uses: github/codeql-action/upload-sarif@v2
with:
sarif_file: checkov-results.sarifSecrets Management
Using HashiCorp Vault with Terraform
# providers.tf
provider "vault" {
address = var.vault_addr
# Use VAULT_TOKEN environment variable or AppRole
auth_login {
path = "auth/approle/login"
parameters = {
role_id = var.vault_role_id
secret_id = var.vault_secret_id
}
}
}
# secrets.tf
data "vault_kv_secret_v2" "database" {
mount = "secret"
name = "database/production"
}
resource "aws_db_instance" "main" {
identifier = "production-db"
engine = "postgres"
engine_version = "15.4"
instance_class = "db.r6g.large"
username = data.vault_kv_secret_v2.database.data["username"]
password = data.vault_kv_secret_v2.database.data["password"]
# Security settings
storage_encrypted = true
deletion_protection = true
vpc_security_group_ids = [aws_security_group.database.id]
db_subnet_group_name = aws_db_subnet_group.main.name
}
# Dynamic database credentials
data "vault_database_secret" "app" {
backend = "database"
role = "app-readonly"
}
resource "kubernetes_secret" "db_credentials" {
metadata {
name = "db-credentials"
namespace = "production"
}
data = {
username = data.vault_database_secret.app.username
password = data.vault_database_secret.app.password
}
}Using AWS Secrets Manager
# Create secret
resource "aws_secretsmanager_secret" "api_key" {
name = "production/api-key"
description = "API key for external service"
recovery_window_in_days = 7
tags = {
Environment = "production"
ManagedBy = "terraform"
}
}
resource "aws_secretsmanager_secret_version" "api_key" {
secret_id = aws_secretsmanager_secret.api_key.id
secret_string = jsonencode({
api_key = var.api_key # Pass via TF_VAR or Vault
api_secret = var.api_secret
})
lifecycle {
ignore_changes = [secret_string] # Prevent drift after rotation
}
}
# Secret rotation
resource "aws_secretsmanager_secret_rotation" "api_key" {
secret_id = aws_secretsmanager_secret.api_key.id
rotation_lambda_arn = aws_lambda_function.rotation.arn
rotation_rules {
automatically_after_days = 30
}
}
# Reference secret in other resources
data "aws_secretsmanager_secret_version" "db_credentials" {
secret_id = "production/database"
}
locals {
db_credentials = jsondecode(data.aws_secretsmanager_secret_version.db_credentials.secret_string)
}Sensitive Variable Handling
# variables.tf
variable "database_password" {
description = "Database master password"
type = string
sensitive = true
validation {
condition = length(var.database_password) >= 16
error_message = "Database password must be at least 16 characters"
}
}
variable "api_keys" {
description = "API keys for external services"
type = map(object({
key = string
secret = string
}))
sensitive = true
}
# Use random password generation instead of static passwords
resource "random_password" "database" {
length = 32
special = true
# Exclude problematic characters
override_special = "!#$%&*()-_=+[]{}<>:?"
}
# Store generated password in Secrets Manager
resource "aws_secretsmanager_secret_version" "database" {
secret_id = aws_secretsmanager_secret.database.id
secret_string = jsonencode({
username = "admin"
password = random_password.database.result
host = aws_db_instance.main.address
port = aws_db_instance.main.port
database = aws_db_instance.main.db_name
})
}State File Security
Remote State with Encryption
# backend.tf
terraform {
backend "s3" {
bucket = "myorg-terraform-state"
key = "production/infrastructure/terraform.tfstate"
region = "us-east-1"
encrypt = true
kms_key_id = "alias/terraform-state-key"
dynamodb_table = "terraform-state-lock"
# Assume role for state access
role_arn = "arn:aws:iam::123456789012:role/TerraformStateAccess"
}
}
# State bucket configuration (in separate bootstrap config)
resource "aws_s3_bucket" "terraform_state" {
bucket = "myorg-terraform-state"
lifecycle {
prevent_destroy = true
}
}
resource "aws_s3_bucket_versioning" "terraform_state" {
bucket = aws_s3_bucket.terraform_state.id
versioning_configuration {
status = "Enabled"
}
}
resource "aws_s3_bucket_server_side_encryption_configuration" "terraform_state" {
bucket = aws_s3_bucket.terraform_state.id
rule {
apply_server_side_encryption_by_default {
kms_master_key_id = aws_kms_key.terraform_state.arn
sse_algorithm = "aws:kms"
}
bucket_key_enabled = true
}
}
resource "aws_s3_bucket_public_access_block" "terraform_state" {
bucket = aws_s3_bucket.terraform_state.id
block_public_acls = true
block_public_policy = true
ignore_public_acls = true
restrict_public_buckets = true
}
# State lock table
resource "aws_dynamodb_table" "terraform_state_lock" {
name = "terraform-state-lock"
billing_mode = "PAY_PER_REQUEST"
hash_key = "LockID"
attribute {
name = "LockID"
type = "S"
}
server_side_encryption {
enabled = true
kms_key_arn = aws_kms_key.terraform_state.arn
}
}
# KMS key for state encryption
resource "aws_kms_key" "terraform_state" {
description = "KMS key for Terraform state encryption"
deletion_window_in_days = 30
enable_key_rotation = true
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Sid = "Enable IAM User Permissions"
Effect = "Allow"
Principal = {
AWS = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"
}
Action = "kms:*"
Resource = "*"
},
{
Sid = "Allow Terraform Role"
Effect = "Allow"
Principal = {
AWS = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/TerraformStateAccess"
}
Action = [
"kms:Encrypt",
"kms:Decrypt",
"kms:GenerateDataKey"
]
Resource = "*"
}
]
})
}State Access IAM Policy
# IAM policy for state access
resource "aws_iam_policy" "terraform_state_access" {
name = "TerraformStateAccess"
description = "Allow access to Terraform state"
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Sid = "S3StateAccess"
Effect = "Allow"
Action = [
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject"
]
Resource = [
"${aws_s3_bucket.terraform_state.arn}/*"
]
Condition = {
StringEquals = {
"s3:x-amz-server-side-encryption" = "aws:kms"
}
}
},
{
Sid = "S3StateBucketAccess"
Effect = "Allow"
Action = [
"s3:ListBucket",
"s3:GetBucketVersioning"
]
Resource = [
aws_s3_bucket.terraform_state.arn
]
},
{
Sid = "DynamoDBLock"
Effect = "Allow"
Action = [
"dynamodb:GetItem",
"dynamodb:PutItem",
"dynamodb:DeleteItem"
]
Resource = [
aws_dynamodb_table.terraform_state_lock.arn
]
},
{
Sid = "KMSAccess"
Effect = "Allow"
Action = [
"kms:Encrypt",
"kms:Decrypt",
"kms:GenerateDataKey"
]
Resource = [
aws_kms_key.terraform_state.arn
]
}
]
})
}Checkov Security Scanning
Checkov Configuration
# .checkov.yml
branch: main
check:
- CKV_AWS_18 # S3 access logging
- CKV_AWS_19 # S3 encryption
- CKV_AWS_20 # S3 no public read
- CKV_AWS_21 # S3 versioning
- CKV_AWS_23 # Security group description
- CKV_AWS_24 # Security group not open to world
- CKV_AWS_25 # Security group egress defined
skip-check:
- CKV_AWS_144 # S3 cross-region replication (not needed)
soft-fail: false
framework:
- terraform
output:
- cli
- json
- sarif
output-file-path: ./reports/
compact: trueCustom Checkov Policy
# custom_policies/aws_s3_require_encryption_key.py
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
from checkov.common.models.enums import CheckCategories, CheckResult
class S3BucketRequireKMSEncryption(BaseResourceCheck):
def __init__(self):
name = "Ensure S3 bucket uses KMS encryption (not AES256)"
id = "CKV_CUSTOM_1"
supported_resources = ["aws_s3_bucket_server_side_encryption_configuration"]
categories = [CheckCategories.ENCRYPTION]
super().__init__(name=name, id=id, categories=categories,
supported_resources=supported_resources)
def scan_resource_conf(self, conf):
rules = conf.get("rule", [])
for rule in rules:
if isinstance(rule, dict):
default_encryption = rule.get("apply_server_side_encryption_by_default", [])
for encryption in default_encryption:
if isinstance(encryption, dict):
algorithm = encryption.get("sse_algorithm", [""])[0]
if algorithm == "aws:kms":
kms_key = encryption.get("kms_master_key_id")
if kms_key and kms_key[0]:
return CheckResult.PASSED
return CheckResult.FAILED
check = S3BucketRequireKMSEncryption()Module Security
Secure Module Structure
# modules/secure-vpc/main.tf
resource "aws_vpc" "main" {
cidr_block = var.vpc_cidr
enable_dns_hostnames = true
enable_dns_support = true
tags = merge(var.tags, {
Name = var.vpc_name
})
}
# Enable VPC Flow Logs
resource "aws_flow_log" "main" {
vpc_id = aws_vpc.main.id
traffic_type = "ALL"
log_destination_type = "cloud-watch-logs"
log_destination = aws_cloudwatch_log_group.flow_logs.arn
iam_role_arn = aws_iam_role.flow_logs.arn
}
resource "aws_cloudwatch_log_group" "flow_logs" {
name = "/aws/vpc/flow-logs/${var.vpc_name}"
retention_in_days = var.flow_log_retention_days
kms_key_id = var.kms_key_arn
}
# Default security group - deny all
resource "aws_default_security_group" "default" {
vpc_id = aws_vpc.main.id
# No ingress or egress rules - effectively deny all
tags = {
Name = "default-deny-all"
}
}
# modules/secure-vpc/variables.tf
variable "vpc_cidr" {
description = "CIDR block for VPC"
type = string
validation {
condition = can(cidrhost(var.vpc_cidr, 0))
error_message = "VPC CIDR must be a valid CIDR block"
}
validation {
condition = tonumber(split("/", var.vpc_cidr)[1]) <= 24
error_message = "VPC CIDR must be /24 or larger"
}
}
variable "flow_log_retention_days" {
description = "Retention period for VPC flow logs"
type = number
default = 90
validation {
condition = var.flow_log_retention_days >= 30
error_message = "Flow log retention must be at least 30 days"
}
}
variable "kms_key_arn" {
description = "KMS key ARN for encryption"
type = string
validation {
condition = can(regex("^arn:aws:kms:", var.kms_key_arn))
error_message = "KMS key must be a valid ARN"
}
}Best Practices Summary
- Scan early and often - Integrate tfsec, Checkov, and OPA in CI/CD
- Encrypt everything - State files, sensitive outputs, all data at rest
- Use remote state - Never commit state files to version control
- Implement least privilege - Minimal IAM permissions for Terraform
- Version pin providers - Prevent supply chain attacks
- Review plan output - Always review before applying changes
- Use modules - Encapsulate security best practices in reusable modules
- Tag resources - Enable cost tracking and security auditing
By following these practices, you can significantly reduce security risks in your Infrastructure as Code deployments.