Terraform Module Design: Reusable Modules, Variable Validation, and Workspace Patterns
Design production-grade Terraform modules: write reusable modules with validated inputs, typed outputs, and sensible defaults; manage multi-environment deployments with workspaces; version modules with a private registry; and test with Terratest.
Terraform projects start simple and grow into a mess. Repeated resource blocks across environments, hardcoded values that should be variables, no tests, and a root module that's 2,000 lines long. The fix is a module architecture designed before the project grows, not after.
A well-designed Terraform module is a building block with a clear interface: validated inputs, typed outputs, sensible defaults, and no opinions about the caller's environment. The caller brings the environment-specific values; the module brings the infrastructure pattern.
Module Structure
terraform/
├── modules/ # Reusable modules (no environment knowledge)
│ ├── ecs-service/
│ │ ├── main.tf
│ │ ├── variables.tf
│ │ ├── outputs.tf
│ │ └── README.md
│ ├── rds-postgres/
│ │ ├── main.tf
│ │ ├── variables.tf
│ │ └── outputs.tf
│ └── alb/
│ ├── main.tf
│ ├── variables.tf
│ └── outputs.tf
├── environments/ # Environment-specific root modules
│ ├── production/
│ │ ├── main.tf # Calls modules with production values
│ │ ├── variables.tf
│ │ ├── terraform.tfvars # Non-secret values
│ │ └── backend.tf # Remote state config
│ └── staging/
│ ├── main.tf
│ └── backend.tf
└── tests/
└── ecs-service_test.go # Terratest
Writing a Reusable Module
# modules/ecs-service/variables.tf
variable "name" {
description = "Name of the ECS service. Used as a prefix for all resources."
type = string
validation {
condition = can(regex("^[a-z][a-z0-9-]{1,38}[a-z0-9]$", var.name))
error_message = "Service name must be 3-40 characters, lowercase alphanumeric and hyphens, must start with a letter."
}
}
variable "environment" {
description = "Deployment environment."
type = string
validation {
condition = contains(["development", "staging", "production"], var.environment)
error_message = "Environment must be one of: development, staging, production."
}
}
variable "docker_image" {
description = "Full Docker image URI including tag. Example: 123456789.dkr.ecr.us-east-1.amazonaws.com/myapp:1.2.3"
type = string
validation {
condition = can(regex("^[^\\s]+:[^\\s]+$", var.docker_image))
error_message = "docker_image must include a tag (e.g., image:tag)."
}
}
variable "cpu" {
description = "CPU units for the task (256 = 0.25 vCPU). Valid values: 256, 512, 1024, 2048, 4096."
type = number
default = 256
validation {
condition = contains([256, 512, 1024, 2048, 4096], var.cpu)
error_message = "CPU must be one of: 256, 512, 1024, 2048, 4096."
}
}
variable "memory" {
description = "Memory in MiB. Must be compatible with the cpu value (see AWS Fargate task size docs)."
type = number
default = 512
}
variable "desired_count" {
description = "Desired number of tasks. Use 0 to stop the service."
type = number
default = 1
validation {
condition = var.desired_count >= 0 && var.desired_count <= 100
error_message = "desired_count must be between 0 and 100."
}
}
variable "health_check_path" {
description = "HTTP path for ALB health checks."
type = string
default = "/health"
validation {
condition = startswith(var.health_check_path, "/")
error_message = "health_check_path must start with '/'."
}
}
variable "environment_variables" {
description = "Non-secret environment variables for the container."
type = map(string)
default = {}
}
variable "secrets" {
description = "Secret environment variables. Values are Secrets Manager ARNs or SSM Parameter Store ARNs."
type = map(string)
default = {}
sensitive = true
}
variable "vpc_id" {
description = "VPC ID where the service will be deployed."
type = string
}
variable "private_subnet_ids" {
description = "List of private subnet IDs for the ECS tasks."
type = list(string)
validation {
condition = length(var.private_subnet_ids) >= 2
error_message = "At least 2 private subnets are required for high availability."
}
}
variable "tags" {
description = "Tags to apply to all resources. Merged with module-generated tags."
type = map(string)
default = {}
}
# modules/ecs-service/main.tf
locals {
# Consistent naming: {name}-{environment}
full_name = "${var.name}-${var.environment}"
# Merge caller tags with module-required tags
common_tags = merge(var.tags, {
Module = "ecs-service"
Service = var.name
Environment = var.environment
ManagedBy = "terraform"
})
# Build container environment from variables
container_environment = [
for k, v in var.environment_variables : {
name = k
value = v
}
]
# Build secrets from ARNs
container_secrets = [
for k, v in var.secrets : {
name = k
valueFrom = v
}
]
}
# ECS Task Definition
resource "aws_ecs_task_definition" "this" {
family = local.full_name
requires_compatibilities = ["FARGATE"]
network_mode = "awsvpc"
cpu = var.cpu
memory = var.memory
execution_role_arn = aws_iam_role.execution.arn
task_role_arn = aws_iam_role.task.arn
container_definitions = jsonencode([
{
name = var.name
image = var.docker_image
essential = true
portMappings = [
{
containerPort = 3000
protocol = "tcp"
}
]
environment = local.container_environment
secrets = local.container_secrets
logConfiguration = {
logDriver = "awslogs"
options = {
"awslogs-group" = "/ecs/${local.full_name}"
"awslogs-region" = data.aws_region.current.name
"awslogs-stream-prefix" = "ecs"
}
}
healthCheck = {
command = ["CMD-SHELL", "curl -f http://localhost:3000${var.health_check_path} || exit 1"]
interval = 30
timeout = 5
retries = 3
startPeriod = 60
}
}
])
tags = local.common_tags
}
# ECS Service
resource "aws_ecs_service" "this" {
name = local.full_name
cluster = data.aws_ecs_cluster.main.id
task_definition = aws_ecs_task_definition.this.arn
desired_count = var.desired_count
launch_type = "FARGATE"
network_configuration {
subnets = var.private_subnet_ids
security_groups = [aws_security_group.ecs_tasks.id]
assign_public_ip = false
}
# Prevent desired_count drift when autoscaling is enabled
lifecycle {
ignore_changes = [desired_count]
}
tags = local.common_tags
}
# modules/ecs-service/outputs.tf
output "service_name" {
description = "Name of the ECS service."
value = aws_ecs_service.this.name
}
output "service_arn" {
description = "ARN of the ECS service. Use to configure autoscaling targets."
value = aws_ecs_service.this.id
}
output "task_definition_arn" {
description = "ARN of the current task definition."
value = aws_ecs_task_definition.this.arn
}
output "task_role_arn" {
description = "ARN of the IAM role assigned to the task. Attach additional policies to grant AWS access."
value = aws_iam_role.task.arn
}
output "security_group_id" {
description = "Security group ID for the ECS tasks. Reference this to allow inbound traffic."
value = aws_security_group.ecs_tasks.id
}
output "cloudwatch_log_group" {
description = "CloudWatch log group name for container logs."
value = aws_cloudwatch_log_group.this.name
}
☁️ Is Your Cloud Costing Too Much?
Most teams overspend 30–40% on cloud — wrong instance types, no reserved pricing, bloated storage. We audit, right-size, and automate your infrastructure.
- AWS, GCP, Azure certified engineers
- Infrastructure as Code (Terraform, CDK)
- Docker, Kubernetes, GitHub Actions CI/CD
- Typical audit recovers $500–$3,000/month in savings
Calling Modules from Environments
# environments/production/main.tf
terraform {
required_version = ">= 1.9.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 5.0"
}
}
backend "s3" {
bucket = "viprasol-terraform-state"
key = "production/terraform.tfstate"
region = "us-east-1"
dynamodb_table = "terraform-state-lock"
encrypt = true
}
}
provider "aws" {
region = "us-east-1"
default_tags {
tags = {
Project = "viprasol-platform"
Environment = "production"
ManagedBy = "terraform"
}
}
}
# Call the ECS service module
module "api_server" {
source = "../../modules/ecs-service"
name = "api-server"
environment = "production"
docker_image = var.api_docker_image # Passed from CI/CD
cpu = 512
memory = 1024
desired_count = 3
vpc_id = data.aws_vpc.main.id
private_subnet_ids = data.aws_subnets.private.ids
environment_variables = {
NODE_ENV = "production"
LOG_LEVEL = "warn"
API_URL = "https://api.viprasol.com"
}
secrets = {
DATABASE_URL = data.aws_ssm_parameter.database_url.arn
REDIS_URL = data.aws_ssm_parameter.redis_url.arn
JWT_SECRET = data.aws_secretsmanager_secret.jwt_secret.arn
}
tags = {
Team = "platform"
CostCenter = "engineering"
}
}
# Grant the API task access to S3
resource "aws_iam_role_policy" "api_s3" {
name = "api-s3-access"
role = module.api_server.task_role_arn # Using module output
policy = jsonencode({
Version = "2012-10-17"
Statement = [{
Effect = "Allow"
Action = ["s3:GetObject", "s3:PutObject", "s3:DeleteObject"]
Resource = ["${aws_s3_bucket.uploads.arn}/*"]
}]
})
}
Workspace vs Directory Strategy
# Workspace strategy: same code, different state
terraform workspace new staging
terraform workspace new production
terraform workspace select production
terraform apply -var-file="production.tfvars"
# Directory strategy: separate directories per environment (recommended for production)
# Pros: explicit separation, different module versions per environment, clearer diffs
# Cons: more files to maintain
# Use workspaces for: dev/staging that mirror each other exactly
# Use directories for: production (different configs, different module versions)
⚙️ DevOps Done Right — Zero Downtime, Full Automation
Ship faster without breaking things. We build CI/CD pipelines, monitoring stacks, and auto-scaling infrastructure that your team can actually maintain.
- Staging + production environments with feature flags
- Automated security scanning in the pipeline
- Uptime monitoring + alerting + runbook automation
- On-call support handover docs included
Testing Modules with Terratest
// tests/ecs_service_test.go
package test
import (
"testing"
"github.com/gruntwork-io/terratest/modules/terraform"
"github.com/stretchr/testify/assert"
)
func TestECSServiceModule(t *testing.T) {
t.Parallel()
terraformOptions := &terraform.Options{
TerraformDir: "../modules/ecs-service",
Vars: map[string]interface{}{
"name": "test-service",
"environment": "development",
"docker_image": "nginx:latest",
"cpu": 256,
"memory": 512,
"vpc_id": "vpc-test123",
"private_subnet_ids": []string{"subnet-a", "subnet-b"},
},
// Don't actually apply — just validate
PlanFilePath: "/tmp/plan.out",
}
defer terraform.Destroy(t, terraformOptions)
// Validate the plan
planStruct := terraform.InitAndPlanAndShowWithStruct(t, terraformOptions)
// Assert ECS service will be created
ecs := planStruct.ResourceChangesMap["aws_ecs_service.this"]
assert.Equal(t, "create", ecs.Change.Actions.String())
// Assert task definition CPU is correct
taskDef := planStruct.ResourceChangesMap["aws_ecs_task_definition.this"]
after := taskDef.Change.After.(map[string]interface{})
assert.Equal(t, float64(256), after["cpu"])
}
See Also
- Infrastructure Cost Tagging — cost attribution with tags
- Kubernetes Operators — K8s-native infrastructure patterns
- Multi-Cloud Strategy — Terraform across providers
- Cloud Cost Engineering — right-sizing with IaC
Working With Viprasol
A well-designed Terraform module architecture prevents the drift and duplication that makes infrastructure unmanageable at scale. Our platform engineers design module interfaces, implement variable validation, set up remote state with proper locking, and write Terratest suites so infrastructure changes are tested before they hit production.
About the Author
Viprasol Tech Team
Custom Software Development Specialists
The Viprasol Tech team specialises in algorithmic trading software, AI agent systems, and SaaS development. With 100+ projects delivered across MT4/MT5 EAs, fintech platforms, and production AI systems, the team brings deep technical experience to every engagement. Based in India, serving clients globally.
Need DevOps & Cloud Expertise?
Scale your infrastructure with confidence. AWS, GCP, Azure certified team.
Free consultation • No commitment • Response within 24 hours
Making sense of your data at scale?
Viprasol builds end-to-end big data analytics solutions — ETL pipelines, data warehouses on Snowflake or BigQuery, and self-service BI dashboards. One reliable source of truth for your entire organisation.