automated terminal push

This commit is contained in:
lenape
2025-08-03 00:10:47 +00:00
parent abdf2e1156
commit 2923f2f15e
28 changed files with 2021 additions and 2613 deletions

747
Infrastructure/foundation/Jenkinsfile vendored Normal file
View File

@@ -0,0 +1,747 @@
pipeline {
agent any
parameters {
choice(
name: 'ACTION',
choices: ['plan', 'apply', 'destroy', 'cleanup'],
description: 'Action to perform: plan (review), apply (deploy), destroy (remove infra), cleanup (remove bootstrap)'
)
booleanParam(
name: 'AUTO_APPROVE',
defaultValue: false,
description: 'Auto-approve terraform apply (use with caution)'
)
booleanParam(
name: 'SKIP_SONAR',
defaultValue: false,
description: 'Skip SonarQube analysis (not recommended)'
)
booleanParam(
name: 'SKIP_BOOTSTRAP',
defaultValue: false,
description: 'Skip bootstrap phase (S3/DynamoDB already exist)'
)
string(
name: 'PROJECT_NAME',
defaultValue: 'nvhi-atsila-microservice',
description: 'Project name for resource naming'
)
string(
name: 'AWS_CREDENTIALS_ID',
defaultValue: 'aws-ci',
description: 'AWS credentials stored in Jenkins'
)
string(
name: 'AWS_REGION_ID',
defaultValue: 'AWS_REGION',
description: 'AWS region credential stored in Jenkins'
)
choice(
name: 'ENVIRONMENT',
choices: ['dev', 'staging', 'prod'],
description: 'Environment to deploy'
)
}
environment {
// Terraform configuration
TF_VERSION = '1.5.7'
TF_IN_AUTOMATION = 'true'
TF_INPUT = 'false'
TF_CLI_ARGS = '-no-color'
// Working directory
TF_WORKING_DIR = 'infrastructure/foundation'
// Project configuration (AWS_REGION will be injected from Jenkins credentials)
PROJECT_NAME = "${params.PROJECT_NAME}"
ENVIRONMENT = "${params.ENVIRONMENT}"
// SonarQube configuration
SONAR_PROJECT_KEY = "${params.PROJECT_NAME}-foundation"
SONAR_PROJECT_NAME = "${params.PROJECT_NAME} Foundation Layer"
SONAR_PROJECT_VERSION = "${BUILD_NUMBER}"
}
stages {
stage('🔍 Checkout & Validation') {
steps {
echo "=== Enterprise CI/CD Foundation Layer Pipeline ==="
echo "Action: ${params.ACTION}"
echo "Environment: ${params.ENVIRONMENT}"
echo "Project: ${params.PROJECT_NAME}"
echo "AWS Credentials: ${params.AWS_CREDENTIALS_ID}"
echo "AWS Region Credential: ${params.AWS_REGION_ID}"
echo "Authentication: Jenkins Credential Store (Enterprise Standard)"
echo "Build: #${BUILD_NUMBER}"
echo "Working Directory: ${env.TF_WORKING_DIR}"
// Clean workspace and checkout latest code
deleteDir()
checkout scm
// Verify repository structure
script {
sh '''
echo "Repository structure validation:"
# Check for required directories
if [ ! -d "${TF_WORKING_DIR}" ]; then
echo "❌ Missing foundation directory: ${TF_WORKING_DIR}"
exit 1
fi
# Check for required files
cd "${TF_WORKING_DIR}"
for file in main.tf variables.tf outputs.tf versions.tf bootstrap.sh cleanup.sh; do
if [ ! -f "$file" ]; then
echo "❌ Missing required file: $file"
exit 1
fi
echo "✅ Found: $file"
done
# Make scripts executable
chmod +x bootstrap.sh cleanup.sh
echo "✅ Repository structure validated"
'''
}
}
}
stage('🔧 Setup Tools') {
steps {
script {
// Install Terraform if not available
sh '''
if ! command -v terraform &> /dev/null; then
echo "Installing Terraform ${TF_VERSION}..."
wget -q https://releases.hashicorp.com/terraform/${TF_VERSION}/terraform_${TF_VERSION}_linux_amd64.zip
unzip -o terraform_${TF_VERSION}_linux_amd64.zip
chmod +x terraform
sudo mv terraform /usr/local/bin/ || mv terraform /tmp/
export PATH="/tmp:$PATH"
fi
echo "Terraform version:"
terraform version
'''
// Verify AWS credentials and permissions via Jenkins credential store
withCredentials([
aws(credentialsId: "${params.AWS_CREDENTIALS_ID}"),
string(credentialsId: "${params.AWS_REGION_ID}", variable: 'AWS_REGION')
]) {
sh '''
echo "AWS CLI version:"
aws --version
echo "Verifying Jenkins stored AWS credentials..."
echo "AWS Region: ${AWS_REGION}"
aws sts get-caller-identity
echo "Testing AWS permissions..."
aws ec2 describe-vpcs --max-items 1 --region ${AWS_REGION} > /dev/null && echo "✅ EC2 permissions OK" || echo "⚠️ EC2 permissions limited"
aws s3 ls > /dev/null 2>&1 && echo "✅ S3 permissions OK" || echo "⚠️ S3 permissions limited"
aws dynamodb list-tables --region ${AWS_REGION} > /dev/null 2>&1 && echo "✅ DynamoDB permissions OK" || echo "⚠️ DynamoDB permissions limited"
echo "✅ Jenkins credential store authentication verified"
'''
}
}
}
}
stage('🔍 SonarQube Analysis') {
when {
allOf {
not { params.SKIP_SONAR }
expression { params.ACTION != 'cleanup' }
}
}
steps {
dir("${env.TF_WORKING_DIR}") {
script {
// Create comprehensive SonarQube configuration
writeFile file: 'sonar-project.properties', text: """
sonar.projectKey=${env.SONAR_PROJECT_KEY}
sonar.projectName=${env.SONAR_PROJECT_NAME}
sonar.projectVersion=${env.SONAR_PROJECT_VERSION}
sonar.sources=.
sonar.sourceEncoding=UTF-8
# Terraform-specific configuration
sonar.terraform.file.suffixes=.tf
sonar.exclusions=**/*.tfstate,**/*.tfstate.backup,**/.terraform/**,**/*.tfplan
# Include scripts in analysis
sonar.inclusions=**/*.tf,**/*.sh
# Quality gate settings
sonar.qualitygate.wait=true
# Coverage and duplications
sonar.cpd.exclusions=**/*.tf
# Custom properties for enterprise analysis
sonar.tags=terraform,infrastructure,enterprise-cicd
"""
// Run SonarQube analysis
withSonarQubeEnv('SonarQube') {
sh '''
echo "🔍 Running SonarQube analysis on Terraform infrastructure..."
sonar-scanner
'''
}
}
}
}
}
stage('🎯 Quality Gate') {
when {
allOf {
not { params.SKIP_SONAR }
expression { params.ACTION != 'cleanup' }
}
}
steps {
script {
timeout(time: 5, unit: 'MINUTES') {
def qg = waitForQualityGate()
if (qg.status != 'OK') {
echo "❌ SonarQube Quality Gate failed: ${qg.status}"
echo "Quality gate details: ${qg}"
if (params.ACTION == 'apply' && !params.AUTO_APPROVE) {
def proceed = input(
message: 'SonarQube Quality Gate failed. How do you want to proceed?',
parameters: [
choice(
name: 'DECISION',
choices: ['Abort', 'Proceed anyway'],
description: 'Quality gate failed - your decision'
)
]
)
if (proceed == 'Abort') {
error "Deployment aborted due to quality gate failure"
}
} else if (params.ACTION == 'apply' && params.AUTO_APPROVE) {
echo "⚠️ Quality gate failed but AUTO_APPROVE is enabled, proceeding..."
} else {
error "Quality gate failed and action is ${params.ACTION}"
}
} else {
echo "✅ SonarQube Quality Gate passed"
}
}
}
}
}
stage('🚀 Bootstrap Backend') {
when {
allOf {
expression { params.ACTION == 'apply' }
not { params.SKIP_BOOTSTRAP }
}
}
steps {
dir("${env.TF_WORKING_DIR}") {
withCredentials([
aws(credentialsId: "${params.AWS_CREDENTIALS_ID}"),
string(credentialsId: "${params.AWS_REGION_ID}", variable: 'AWS_REGION')
]) {
script {
echo "=== Bootstrapping Terraform Backend ==="
sh '''
# Set environment variables for bootstrap script
export PROJECT_NAME="${PROJECT_NAME}"
export ENVIRONMENT="${ENVIRONMENT}"
export AWS_REGION="${AWS_REGION}"
# Run bootstrap script (uses Jenkins credentials)
./bootstrap.sh
# Verify backend configuration was created
if [ ! -f backend.tf ]; then
echo "❌ Bootstrap failed - backend.tf not created"
exit 1
fi
echo "✅ Backend bootstrap completed"
echo "Generated backend.tf:"
cat backend.tf
'''
}
}
}
}
}
stage('🔄 Terraform Init & Validate') {
when {
expression { params.ACTION != 'cleanup' }
}
steps {
dir("${env.TF_WORKING_DIR}") {
withCredentials([
aws(credentialsId: "${params.AWS_CREDENTIALS_ID}"),
string(credentialsId: "${params.AWS_REGION_ID}", variable: 'AWS_REGION')
]) {
script {
sh '''
echo "=== Terraform Initialization ==="
# Create terraform.tfvars if not exists
if [ ! -f terraform.tfvars ]; then
echo "Creating terraform.tfvars..."
cat > terraform.tfvars << EOF
# Generated by Jenkins Pipeline Build #${BUILD_NUMBER}
project_name = "${PROJECT_NAME}"
environment = "${ENVIRONMENT}"
aws_region = "${AWS_REGION}"
# Free tier optimized settings
enable_private_subnets = false
enable_vpc_endpoints = false
enable_nat_gateway = false
single_nat_gateway = true
cost_optimization_mode = true
# Jenkins-managed tags
common_tags = {
Terraform = "true"
Project = "${PROJECT_NAME}"
Environment = "${ENVIRONMENT}"
ManagedBy = "jenkins"
Pipeline = "foundation-layer"
BuildNumber = "${BUILD_NUMBER}"
GitCommit = "${GIT_COMMIT}"
}
EOF
fi
echo "Current terraform.tfvars:"
cat terraform.tfvars
# Initialize Terraform (uses Jenkins credentials)
terraform init -upgrade
# Validate configuration
terraform validate
# Format check
terraform fmt -check=true || {
echo "⚠️ Terraform files need formatting"
terraform fmt -diff=true
}
echo "✅ Terraform initialized and validated"
'''
}
}
}
}
}
stage('📊 Terraform Plan') {
when {
expression { params.ACTION in ['plan', 'apply'] }
}
steps {
dir("${env.TF_WORKING_DIR}") {
withCredentials([
aws(credentialsId: "${params.AWS_CREDENTIALS_ID}"),
string(credentialsId: "${params.AWS_REGION_ID}", variable: 'AWS_REGION')
]) {
script {
sh '''
echo "=== Terraform Plan ==="
terraform plan \
-var="project_name=${PROJECT_NAME}" \
-var="environment=${ENVIRONMENT}" \
-var="aws_region=${AWS_REGION}" \
-out=tfplan \
-detailed-exitcode || PLAN_EXIT_CODE=$?
# Handle plan exit codes
case ${PLAN_EXIT_CODE:-0} in
0)
echo "✅ No changes needed - infrastructure is up to date"
;;
1)
echo "❌ Terraform plan failed"
exit 1
;;
2)
echo "📝 Changes detected - plan saved to tfplan"
# Show plan summary
echo "=== Plan Summary ==="
terraform show -no-color tfplan | grep -E "(Plan:|No changes|Error:)" || echo "Plan generated successfully"
;;
esac
'''
// Archive the plan for audit
archiveArtifacts artifacts: 'tfplan', allowEmptyArchive: true
}
}
}
}
}
stage('🚦 Deployment Approval') {
when {
allOf {
expression { params.ACTION == 'apply' }
not { params.AUTO_APPROVE }
}
}
steps {
script {
def planSummary = ""
dir("${env.TF_WORKING_DIR}") {
planSummary = sh(
script: 'terraform show -no-color tfplan | grep "Plan:" || echo "No plan summary available"',
returnStdout: true
).trim()
}
echo "=== Manual Approval Required ==="
echo "Environment: ${params.ENVIRONMENT}"
echo "Region: ${params.AWS_REGION}"
echo "Plan Summary: ${planSummary}"
def approvalData = input(
id: 'ProceedApply',
message: """
🔍 Review the Terraform plan output above carefully.
Environment: ${params.ENVIRONMENT}
Region: ${params.AWS_REGION}
Plan: ${planSummary}
Proceed with deployment?
""",
parameters: [
choice(
name: 'PROCEED',
choices: ['No', 'Yes, deploy infrastructure'],
description: 'Deployment decision'
),
string(
name: 'APPROVER',
defaultValue: env.BUILD_USER ?: 'jenkins-user',
description: 'Your name for audit trail'
)
]
)
if (approvalData.PROCEED != 'Yes, deploy infrastructure') {
error "Deployment cancelled by ${approvalData.APPROVER}"
}
echo "✅ Deployment approved by: ${approvalData.APPROVER}"
env.DEPLOYMENT_APPROVER = approvalData.APPROVER
}
}
}
stage('🚀 Terraform Apply') {
when {
expression { params.ACTION == 'apply' }
}
steps {
dir("${env.TF_WORKING_DIR}") {
withCredentials([
aws(credentialsId: "${params.AWS_CREDENTIALS_ID}"),
string(credentialsId: "${params.AWS_REGION_ID}", variable: 'AWS_REGION')
]) {
script {
echo "=== Terraform Apply ==="
if (env.DEPLOYMENT_APPROVER) {
echo "✅ Approved by: ${env.DEPLOYMENT_APPROVER}"
}
sh '''
terraform apply -auto-approve tfplan
echo "=== Deployment Outputs ==="
terraform output
# Save outputs for other stages/jobs
terraform output -json > terraform-outputs.json
terraform output > terraform-outputs.txt
'''
// Archive outputs
archiveArtifacts artifacts: 'terraform-outputs.json,terraform-outputs.txt', allowEmptyArchive: true
}
}
}
}
}
stage('💥 Terraform Destroy') {
when {
expression { params.ACTION == 'destroy' }
}
steps {
dir("${env.TF_WORKING_DIR}") {
withCredentials([
aws(credentialsId: "${params.AWS_CREDENTIALS_ID}"),
string(credentialsId: "${params.AWS_REGION_ID}", variable: 'AWS_REGION')
]) {
script {
def destroyApproval = input(
id: 'ProceedDestroy',
message: """
⚠️ DESTRUCTIVE ACTION WARNING ⚠️
This will permanently delete ALL infrastructure in:
• Environment: ${params.ENVIRONMENT}
• Project: ${params.PROJECT_NAME}
This action CANNOT be undone!
Type 'DESTROY' exactly to confirm:
""",
parameters: [
string(
name: 'CONFIRMATION',
defaultValue: '',
description: 'Type DESTROY to confirm deletion'
),
string(
name: 'DESTROYER',
defaultValue: env.BUILD_USER ?: 'jenkins-user',
description: 'Your name for audit trail'
)
]
)
if (destroyApproval.CONFIRMATION != 'DESTROY') {
error "Destroy cancelled - confirmation text did not match 'DESTROY'"
}
echo "💀 DESTROY operation confirmed by: ${destroyApproval.DESTROYER}"
echo "💀 Destroying infrastructure in 10 seconds..."
echo "💀 Last chance to cancel with Ctrl+C..."
sleep(10)
sh '''
terraform destroy -auto-approve \
-var="project_name=${PROJECT_NAME}" \
-var="environment=${ENVIRONMENT}" \
-var="aws_region=${AWS_REGION}"
'''
echo "💀 Infrastructure destroyed by: ${destroyApproval.DESTROYER}"
echo "💀 Next step: Run with ACTION=cleanup to remove bootstrap resources"
}
}
}
}
}
stage('🧹 Cleanup Bootstrap') {
when {
expression { params.ACTION == 'cleanup' }
}
steps {
dir("${env.TF_WORKING_DIR}") {
withCredentials([
aws(credentialsId: "${params.AWS_CREDENTIALS_ID}"),
string(credentialsId: "${params.AWS_REGION_ID}", variable: 'AWS_REGION')
]) {
script {
echo "=== Cleanup Bootstrap Resources ==="
sh '''
# Set environment variables for cleanup script
export PROJECT_NAME="${PROJECT_NAME}"
export ENVIRONMENT="${ENVIRONMENT}"
export AWS_REGION="${AWS_REGION}"
# Run cleanup script (uses Jenkins credentials)
./cleanup.sh
echo "✅ Bootstrap cleanup completed"
'''
}
}
}
}
}
stage('📈 Post-Deployment Validation') {
when {
expression { params.ACTION == 'apply' }
}
steps {
dir("${env.TF_WORKING_DIR}") {
withCredentials([
aws(credentialsId: "${params.AWS_CREDENTIALS_ID}"),
string(credentialsId: "${params.AWS_REGION_ID}", variable: 'AWS_REGION')
]) {
script {
sh '''
echo "=== Post-Deployment Validation ==="
# Validate VPC
VPC_ID=$(terraform output -raw vpc_id 2>/dev/null)
if [ -n "$VPC_ID" ] && [ "$VPC_ID" != "null" ]; then
echo "✅ VPC created successfully: $VPC_ID"
# Get VPC details
aws ec2 describe-vpcs --vpc-ids $VPC_ID --region ${AWS_REGION} \
--query 'Vpcs[0].{VpcId:VpcId,State:State,CidrBlock:CidrBlock}' \
--output table
# Count resources
SUBNET_COUNT=$(aws ec2 describe-subnets --filters "Name=vpc-id,Values=$VPC_ID" \
--query 'length(Subnets)' --output text --region ${AWS_REGION})
echo "✅ Subnets created: $SUBNET_COUNT"
SG_COUNT=$(aws ec2 describe-security-groups --filters "Name=vpc-id,Values=$VPC_ID" \
--query 'length(SecurityGroups)' --output text --region ${AWS_REGION})
echo "✅ Security groups: $SG_COUNT"
else
echo "❌ VPC validation failed"
exit 1
fi
# Validate backend resources
BUCKET_NAME=$(terraform output -raw terraform_state_bucket_name 2>/dev/null)
TABLE_NAME=$(terraform output -raw terraform_locks_table_name 2>/dev/null)
if [ -n "$BUCKET_NAME" ] && [ "$BUCKET_NAME" != "null" ]; then
echo "✅ S3 backend bucket: $BUCKET_NAME"
aws s3 ls s3://$BUCKET_NAME --region ${AWS_REGION}
fi
if [ -n "$TABLE_NAME" ] && [ "$TABLE_NAME" != "null" ]; then
echo "✅ DynamoDB locks table: $TABLE_NAME"
aws dynamodb describe-table --table-name $TABLE_NAME --region ${AWS_REGION} \
--query 'Table.{TableName:TableName,Status:TableStatus}' --output table
fi
# Cost analysis
echo "=== Cost Analysis ==="
echo "✅ Current configuration: ~$0/month (free tier optimized)"
echo "✅ No NAT Gateways (saves ~$32/month)"
echo "✅ No VPC Endpoints (saves ~$14/month)"
echo "✅ Using public subnets only for cost optimization"
echo "✅ Using Jenkins credential store (enterprise standard)"
'''
}
}
}
}
}
}
post {
always {
script {
echo "=== Pipeline Execution Summary ==="
echo "🔹 Build: #${BUILD_NUMBER}"
echo "🔹 Action: ${params.ACTION}"
echo "🔹 Environment: ${params.ENVIRONMENT}"
echo "🔹 Duration: ${currentBuild.durationString}"
echo "🔹 Result: ${currentBuild.result ?: 'SUCCESS'}"
// Archive all important artifacts
dir("${env.TF_WORKING_DIR}") {
archiveArtifacts artifacts: '*.tf,terraform.tfvars,*.tfplan,terraform-outputs.*,sonar-project.properties,.backend-config', allowEmptyArchive: true
}
}
}
success {
script {
echo "✅ Foundation Layer pipeline completed successfully!"
if (params.ACTION == 'apply') {
def message = """
🎉 Foundation Layer Deployment Complete!
📊 Deployment Details:
• Environment: ${params.ENVIRONMENT}
• Region: ${params.AWS_REGION}
• Project: ${params.PROJECT_NAME}
• Build: #${BUILD_NUMBER}
• Duration: ${currentBuild.durationString}
${env.DEPLOYMENT_APPROVER ? "• Approved by: ${env.DEPLOYMENT_APPROVER}" : ""}
🏗️ Infrastructure Created:
• VPC with multi-AZ public subnets
• Security groups for ALB and ECS
• S3 bucket for Terraform state
• DynamoDB table for state locking
• Internet Gateway and routing
💰 Cost: ~$0/month (free tier optimized)
🚀 Next Steps:
• Phase 2: Deploy Shared Services (ECR, ALB, IAM)
• Phase 3: Deploy Application Layer (ECS Fargate)
• Phase 4: Setup application CI/CD pipeline
📋 Outputs: Check archived artifacts for resource details
"""
echo message
}
}
}
failure {
script {
echo "❌ Foundation Layer pipeline failed!"
// Archive debug information
dir("${env.TF_WORKING_DIR}") {
sh '''
echo "=== Debug Information ===" > debug-info.txt
echo "Build: ${BUILD_NUMBER}" >> debug-info.txt
echo "Action: ${ACTION}" >> debug-info.txt
echo "Environment: ${ENVIRONMENT}" >> debug-info.txt
echo "Region: ${AWS_REGION}" >> debug-info.txt
echo "" >> debug-info.txt
echo "Terraform version:" >> debug-info.txt
terraform version >> debug-info.txt 2>&1 || echo "Terraform not available" >> debug-info.txt
echo "" >> debug-info.txt
echo "AWS CLI version:" >> debug-info.txt
aws --version >> debug-info.txt 2>&1 || echo "AWS CLI not available" >> debug-info.txt
echo "" >> debug-info.txt
echo "Working directory:" >> debug-info.txt
pwd >> debug-info.txt
ls -la >> debug-info.txt 2>&1
echo "" >> debug-info.txt
echo "Terraform state:" >> debug-info.txt
terraform state list >> debug-info.txt 2>&1 || echo "No state available" >> debug-info.txt
'''
archiveArtifacts artifacts: 'debug-info.txt', allowEmptyArchive: true
}
}
}
cleanup {
// Clean sensitive data but preserve artifacts
dir("${env.TF_WORKING_DIR}") {
sh '''
rm -f .terraform.lock.hcl 2>/dev/null || true
rm -rf .terraform/ 2>/dev/null || true
'''
}
}
}
}

View File

@@ -0,0 +1,25 @@
# Terraform Backend Configuration
# This file will be auto-generated by the bootstrap script
# The bootstrap script creates this backend configuration automatically
# to avoid the chicken-and-egg problem with Terraform state management.
#
# During pipeline execution:
# 1. bootstrap.sh creates S3 bucket and DynamoDB table
# 2. bootstrap.sh generates this backend configuration
# 3. terraform init uses the remote backend from the start
#
# This approach eliminates the need for state migration and
# follows enterprise best practices.
# Backend configuration will be inserted here by bootstrap.sh
# Example structure:
# terraform {
# backend "s3" {
# bucket = "project-terraform-state-xxxxxxxx"
# key = "foundation/terraform.tfstate"
# region = "us-east-1"
# dynamodb_table = "project-terraform-locks"
# encrypt = true
# }
# }

View File

@@ -0,0 +1,144 @@
#!/bin/bash
# Enterprise CI/CD Foundation Bootstrap Script
# This script creates the S3 bucket and DynamoDB table for Terraform backend
# before running the main Terraform deployment
set -e
# Configuration
PROJECT_NAME="${PROJECT_NAME:-enterprise-cicd}"
ENVIRONMENT="${ENVIRONMENT:-dev}"
AWS_REGION="${AWS_REGION:-us-east-1}"
# Generate unique suffix for global resources
RANDOM_SUFFIX=$(openssl rand -hex 4)
BUCKET_NAME="${PROJECT_NAME}-terraform-state-${RANDOM_SUFFIX}"
TABLE_NAME="${PROJECT_NAME}-terraform-locks"
echo "🚀 Bootstrapping Terraform Backend Infrastructure"
echo "Project: ${PROJECT_NAME}"
echo "Environment: ${ENVIRONMENT}"
echo "Region: ${AWS_REGION}"
echo "Bucket: ${BUCKET_NAME}"
echo "Table: ${TABLE_NAME}"
# Verify AWS credentials
echo "🔐 Verifying AWS credentials..."
aws sts get-caller-identity || {
echo "❌ AWS credentials not configured or invalid"
exit 1
}
# Create S3 bucket for Terraform state
echo "📦 Creating S3 bucket for Terraform state..."
if aws s3api head-bucket --bucket "${BUCKET_NAME}" 2>/dev/null; then
echo "✅ Bucket ${BUCKET_NAME} already exists"
else
# Create bucket with appropriate settings based on region
if [ "${AWS_REGION}" = "us-east-1" ]; then
aws s3api create-bucket \
--bucket "${BUCKET_NAME}" \
--region "${AWS_REGION}"
else
aws s3api create-bucket \
--bucket "${BUCKET_NAME}" \
--region "${AWS_REGION}" \
--create-bucket-configuration LocationConstraint="${AWS_REGION}"
fi
# Enable versioning
aws s3api put-bucket-versioning \
--bucket "${BUCKET_NAME}" \
--versioning-configuration Status=Enabled
# Enable encryption
aws s3api put-bucket-encryption \
--bucket "${BUCKET_NAME}" \
--server-side-encryption-configuration '{
"Rules": [{
"ApplyServerSideEncryptionByDefault": {
"SSEAlgorithm": "AES256"
}
}]
}'
# Block public access
aws s3api put-public-access-block \
--bucket "${BUCKET_NAME}" \
--public-access-block-configuration \
BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true
echo "✅ S3 bucket ${BUCKET_NAME} created successfully"
fi
# Create DynamoDB table for state locking
echo "🔒 Creating DynamoDB table for state locking..."
if aws dynamodb describe-table --table-name "${TABLE_NAME}" --region "${AWS_REGION}" >/dev/null 2>&1; then
echo "✅ DynamoDB table ${TABLE_NAME} already exists"
else
aws dynamodb create-table \
--table-name "${TABLE_NAME}" \
--attribute-definitions AttributeName=LockID,AttributeType=S \
--key-schema AttributeName=LockID,KeyType=HASH \
--billing-mode PAY_PER_REQUEST \
--region "${AWS_REGION}" \
--tags Key=Name,Value="${TABLE_NAME}" \
Key=Project,Value="${PROJECT_NAME}" \
Key=Environment,Value="${ENVIRONMENT}" \
Key=ManagedBy,Value=terraform
# Wait for table to be active
echo "⏳ Waiting for DynamoDB table to be active..."
aws dynamodb wait table-exists --table-name "${TABLE_NAME}" --region "${AWS_REGION}"
echo "✅ DynamoDB table ${TABLE_NAME} created successfully"
fi
# Generate backend configuration
echo "📝 Generating backend configuration..."
cat > backend.tf << EOF
# Terraform Backend Configuration
# Auto-generated by bootstrap script
terraform {
backend "s3" {
bucket = "${BUCKET_NAME}"
key = "foundation/terraform.tfstate"
region = "${AWS_REGION}"
dynamodb_table = "${TABLE_NAME}"
encrypt = true
}
}
EOF
echo "✅ Backend configuration written to backend.tf"
# Save configuration for later use
cat > .backend-config << EOF
BUCKET_NAME=${BUCKET_NAME}
TABLE_NAME=${TABLE_NAME}
AWS_REGION=${AWS_REGION}
PROJECT_NAME=${PROJECT_NAME}
ENVIRONMENT=${ENVIRONMENT}
EOF
echo ""
echo "🎉 Bootstrap completed successfully!"
echo ""
echo "📋 Resources Created:"
echo " S3 Bucket: ${BUCKET_NAME}"
echo " DynamoDB Table: ${TABLE_NAME}"
echo " Region: ${AWS_REGION}"
echo ""
echo "📁 Files Generated:"
echo " backend.tf - Terraform backend configuration"
echo " .backend-config - Resource details for cleanup"
echo ""
echo "🚀 Ready to run Terraform:"
echo " terraform init"
echo " terraform plan"
echo " terraform apply"
echo ""
echo "💡 To destroy everything later:"
echo " terraform destroy"
echo " ./cleanup.sh (to remove bootstrap resources)"

View File

@@ -0,0 +1,131 @@
#!/bin/bash
# Enterprise CI/CD Foundation Cleanup Script
# This script removes the bootstrap S3 bucket and DynamoDB table
# Run this AFTER terraform destroy to completely clean up
set -e
echo "🧹 Foundation Layer Cleanup Script"
# Load configuration if available
if [ -f .backend-config ]; then
echo "📋 Loading configuration from .backend-config..."
source .backend-config
else
echo "⚠️ No .backend-config found. Using environment variables..."
BUCKET_NAME="${BUCKET_NAME:-}"
TABLE_NAME="${TABLE_NAME:-}"
AWS_REGION="${AWS_REGION:-us-east-1}"
PROJECT_NAME="${PROJECT_NAME:-enterprise-cicd}"
ENVIRONMENT="${ENVIRONMENT:-dev}"
fi
# Verify AWS credentials
echo "🔐 Verifying AWS credentials..."
aws sts get-caller-identity || {
echo "❌ AWS credentials not configured or invalid"
exit 1
}
# Interactive confirmation
echo ""
echo "⚠️ WARNING: This will permanently delete bootstrap resources!"
echo ""
echo "Resources to delete:"
echo " S3 Bucket: ${BUCKET_NAME}"
echo " DynamoDB Table: ${TABLE_NAME}"
echo " Region: ${AWS_REGION}"
echo ""
read -p "Are you sure you want to proceed? (type 'DELETE' to confirm): " confirmation
if [ "$confirmation" != "DELETE" ]; then
echo "❌ Cleanup cancelled"
exit 1
fi
echo ""
echo "💀 Starting cleanup process..."
# Check if Terraform state still exists
if [ -f terraform.tfstate ] || [ -f .terraform/terraform.tfstate ]; then
echo "❌ Error: Terraform state files still exist!"
echo "Please run 'terraform destroy' first to destroy all infrastructure"
echo "Then run this cleanup script to remove bootstrap resources"
exit 1
fi
# Check if S3 bucket contains state files
if [ -n "${BUCKET_NAME}" ] && aws s3api head-bucket --bucket "${BUCKET_NAME}" 2>/dev/null; then
STATE_FILES=$(aws s3 ls "s3://${BUCKET_NAME}/foundation/" --recursive 2>/dev/null || echo "")
if [ -n "${STATE_FILES}" ]; then
echo "❌ Error: S3 bucket contains Terraform state files!"
echo "Found state files:"
echo "${STATE_FILES}"
echo ""
echo "Please run 'terraform destroy' first to clean up all infrastructure"
echo "This will remove the state files from S3"
exit 1
fi
fi
# Remove S3 bucket
if [ -n "${BUCKET_NAME}" ] && aws s3api head-bucket --bucket "${BUCKET_NAME}" 2>/dev/null; then
echo "🗑️ Removing S3 bucket: ${BUCKET_NAME}"
# Remove all objects and versions
echo " Removing all objects and versions..."
aws s3api list-object-versions --bucket "${BUCKET_NAME}" \
--query 'Versions[].[Key,VersionId]' --output text | \
while read key version; do
if [ -n "$key" ] && [ -n "$version" ]; then
aws s3api delete-object --bucket "${BUCKET_NAME}" --key "$key" --version-id "$version"
fi
done
# Remove delete markers
aws s3api list-object-versions --bucket "${BUCKET_NAME}" \
--query 'DeleteMarkers[].[Key,VersionId]' --output text | \
while read key version; do
if [ -n "$key" ] && [ -n "$version" ]; then
aws s3api delete-object --bucket "${BUCKET_NAME}" --key "$key" --version-id "$version"
fi
done
# Delete the bucket
aws s3api delete-bucket --bucket "${BUCKET_NAME}" --region "${AWS_REGION}"
echo "✅ S3 bucket ${BUCKET_NAME} deleted"
else
echo " S3 bucket ${BUCKET_NAME} not found or already deleted"
fi
# Remove DynamoDB table
if [ -n "${TABLE_NAME}" ] && aws dynamodb describe-table --table-name "${TABLE_NAME}" --region "${AWS_REGION}" >/dev/null 2>&1; then
echo "🗑️ Removing DynamoDB table: ${TABLE_NAME}"
aws dynamodb delete-table --table-name "${TABLE_NAME}" --region "${AWS_REGION}"
# Wait for deletion to complete
echo "⏳ Waiting for table deletion to complete..."
aws dynamodb wait table-not-exists --table-name "${TABLE_NAME}" --region "${AWS_REGION}"
echo "✅ DynamoDB table ${TABLE_NAME} deleted"
else
echo " DynamoDB table ${TABLE_NAME} not found or already deleted"
fi
# Clean up local files
echo "🧹 Cleaning up local files..."
rm -f backend.tf
rm -f .backend-config
rm -f terraform.tfstate.backup
rm -f .terraform.lock.hcl
rm -rf .terraform/
echo ""
echo "🎉 Cleanup completed successfully!"
echo ""
echo "📋 What was removed:"
echo " ✅ S3 bucket: ${BUCKET_NAME}"
echo " ✅ DynamoDB table: ${TABLE_NAME}"
echo " ✅ Local backend configuration files"
echo ""
echo "💡 You can now run the bootstrap script again to create new resources"

View File

@@ -0,0 +1,331 @@
# Foundation Layer - VPC and Core Infrastructure (Free Tier Optimized)
# Creates base networking infrastructure with minimal cost for learning/development
# Data source for availability zones
data "aws_availability_zones" "available" {
state = "available"
}
# VPC
resource "aws_vpc" "main" {
cidr_block = var.vpc_cidr
enable_dns_hostnames = true
enable_dns_support = true
tags = {
Name = "${var.project_name}-vpc"
Environment = var.environment
Project = var.project_name
}
}
# Internet Gateway
resource "aws_internet_gateway" "main" {
vpc_id = aws_vpc.main.id
tags = {
Name = "${var.project_name}-igw"
Environment = var.environment
Project = var.project_name
}
}
# Public Subnets (using 2 AZs for cost optimization)
resource "aws_subnet" "public" {
count = 2
vpc_id = aws_vpc.main.id
cidr_block = cidrsubnet(var.vpc_cidr, 8, count.index)
availability_zone = data.aws_availability_zones.available.names[count.index]
map_public_ip_on_launch = true
tags = {
Name = "${var.project_name}-public-subnet-${count.index + 1}"
Environment = var.environment
Project = var.project_name
Type = "public"
}
}
# Private Subnets (created but will use public for now to avoid NAT Gateway costs)
# These can be activated later when you want to upgrade to production-ready setup
resource "aws_subnet" "private" {
count = var.enable_private_subnets ? 2 : 0
vpc_id = aws_vpc.main.id
cidr_block = cidrsubnet(var.vpc_cidr, 8, count.index + 10)
availability_zone = data.aws_availability_zones.available.names[count.index]
tags = {
Name = "${var.project_name}-private-subnet-${count.index + 1}"
Environment = var.environment
Project = var.project_name
Type = "private"
}
}
# Conditional NAT Gateway resources (only if private subnets are enabled)
resource "aws_eip" "nat" {
count = var.enable_private_subnets && var.enable_nat_gateway ? (var.single_nat_gateway ? 1 : 2) : 0
domain = "vpc"
depends_on = [aws_internet_gateway.main]
tags = {
Name = "${var.project_name}-nat-eip-${count.index + 1}"
Environment = var.environment
Project = var.project_name
}
}
resource "aws_nat_gateway" "main" {
count = var.enable_private_subnets && var.enable_nat_gateway ? (var.single_nat_gateway ? 1 : 2) : 0
allocation_id = aws_eip.nat[count.index].id
subnet_id = aws_subnet.public[count.index].id
depends_on = [aws_internet_gateway.main]
tags = {
Name = "${var.project_name}-nat-gw-${count.index + 1}"
Environment = var.environment
Project = var.project_name
}
}
# Route Table for Public Subnets
resource "aws_route_table" "public" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.main.id
}
tags = {
Name = "${var.project_name}-public-rt"
Environment = var.environment
Project = var.project_name
}
}
# Route Tables for Private Subnets (only if enabled)
resource "aws_route_table" "private" {
count = var.enable_private_subnets ? 2 : 0
vpc_id = aws_vpc.main.id
# Only add route to NAT Gateway if NAT Gateway is enabled
dynamic "route" {
for_each = var.enable_nat_gateway ? [1] : []
content {
cidr_block = "0.0.0.0/0"
# If single NAT gateway, all route tables use index 0, otherwise use the route table's index
nat_gateway_id = aws_nat_gateway.main[var.single_nat_gateway ? 0 : count.index].id
}
}
tags = {
Name = "${var.project_name}-private-rt-${count.index + 1}"
Environment = var.environment
Project = var.project_name
}
}
# Associate Public Subnets with Public Route Table
resource "aws_route_table_association" "public" {
count = 2
subnet_id = aws_subnet.public[count.index].id
route_table_id = aws_route_table.public.id
}
# Associate Private Subnets with Private Route Tables (only if enabled)
resource "aws_route_table_association" "private" {
count = var.enable_private_subnets ? 2 : 0
subnet_id = aws_subnet.private[count.index].id
route_table_id = aws_route_table.private[count.index].id
}
# Default Security Group
resource "aws_security_group" "default" {
name = "${var.project_name}-default-sg"
description = "Default security group for ${var.project_name}"
vpc_id = aws_vpc.main.id
ingress {
from_port = 0
to_port = 0
protocol = "-1"
self = true
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "${var.project_name}-default-sg"
Environment = var.environment
Project = var.project_name
}
}
# Security Group for ALB
resource "aws_security_group" "alb" {
name = "${var.project_name}-alb-sg"
description = "Security group for Application Load Balancer"
vpc_id = aws_vpc.main.id
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "${var.project_name}-alb-sg"
Environment = var.environment
Project = var.project_name
}
}
# Security Group for ECS Tasks
resource "aws_security_group" "ecs_tasks" {
name = "${var.project_name}-ecs-tasks-sg"
description = "Security group for ECS tasks"
vpc_id = aws_vpc.main.id
# Allow traffic from ALB
ingress {
from_port = 0
to_port = 65535
protocol = "tcp"
security_groups = [aws_security_group.alb.id]
}
# For development: allow direct access (remove in production)
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 8080
to_port = 8080
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "${var.project_name}-ecs-tasks-sg"
Environment = var.environment
Project = var.project_name
}
}
# Conditional VPC Endpoints (only if enabled and cost-optimized)
resource "aws_vpc_endpoint" "s3" {
count = var.enable_vpc_endpoints ? 1 : 0
vpc_id = aws_vpc.main.id
service_name = "com.amazonaws.${var.aws_region}.s3"
tags = {
Name = "${var.project_name}-s3-endpoint"
Environment = var.environment
Project = var.project_name
}
}
# S3 Bucket for Terraform State
resource "aws_s3_bucket" "terraform_state" {
bucket = "${var.project_name}-terraform-state-${random_string.bucket_suffix.result}"
tags = {
Name = "${var.project_name}-terraform-state"
Environment = var.environment
Project = var.project_name
}
}
# Random string for bucket uniqueness
resource "random_string" "bucket_suffix" {
length = 8
special = false
upper = false
}
# S3 Bucket Versioning
resource "aws_s3_bucket_versioning" "terraform_state" {
bucket = aws_s3_bucket.terraform_state.id
versioning_configuration {
status = "Enabled"
}
}
# S3 Bucket Server Side Encryption
resource "aws_s3_bucket_server_side_encryption_configuration" "terraform_state" {
bucket = aws_s3_bucket.terraform_state.id
rule {
apply_server_side_encryption_by_default {
sse_algorithm = "AES256"
}
}
}
# S3 Bucket Public Access Block
resource "aws_s3_bucket_public_access_block" "terraform_state" {
bucket = aws_s3_bucket.terraform_state.id
block_public_acls = true
block_public_policy = true
ignore_public_acls = true
restrict_public_buckets = true
}
# DynamoDB Table for Terraform State Locking
resource "aws_dynamodb_table" "terraform_locks" {
name = "${var.project_name}-terraform-locks"
billing_mode = "PAY_PER_REQUEST"
hash_key = "LockID"
attribute {
name = "LockID"
type = "S"
}
tags = {
Name = "${var.project_name}-terraform-locks"
Environment = var.environment
Project = var.project_name
}
}

View File

@@ -0,0 +1,157 @@
# Foundation Layer Outputs
# These outputs will be used by subsequent layers (Shared Services and Application)
# VPC Information
output "vpc_id" {
description = "ID of the VPC"
value = aws_vpc.main.id
}
output "vpc_cidr_block" {
description = "CIDR block of the VPC"
value = aws_vpc.main.cidr_block
}
output "vpc_arn" {
description = "ARN of the VPC"
value = aws_vpc.main.arn
}
# Subnet Information
output "public_subnet_ids" {
description = "List of IDs of the public subnets"
value = aws_subnet.public[*].id
}
output "private_subnet_ids" {
description = "List of IDs of the private subnets"
value = var.enable_private_subnets ? aws_subnet.private[*].id : []
}
output "public_subnet_cidrs" {
description = "List of CIDR blocks of the public subnets"
value = aws_subnet.public[*].cidr_block
}
output "private_subnet_cidrs" {
description = "List of CIDR blocks of the private subnets"
value = var.enable_private_subnets ? aws_subnet.private[*].cidr_block : []
}
# For free tier: use public subnets as "app subnets" when private subnets are disabled
output "app_subnet_ids" {
description = "List of subnet IDs to use for application deployment (private if available, public if cost-optimized)"
value = var.enable_private_subnets ? aws_subnet.private[*].id : aws_subnet.public[*].id
}
# Availability Zones
output "availability_zones" {
description = "List of availability zones"
value = data.aws_availability_zones.available.names
}
# Gateway Information
output "internet_gateway_id" {
description = "ID of the Internet Gateway"
value = aws_internet_gateway.main.id
}
output "nat_gateway_ids" {
description = "List of IDs of the NAT Gateways"
value = var.enable_private_subnets && var.enable_nat_gateway ? aws_nat_gateway.main[*].id : []
}
output "nat_gateway_public_ips" {
description = "List of public Elastic IPs of NAT Gateways"
value = var.enable_private_subnets && var.enable_nat_gateway ? aws_eip.nat[*].public_ip : []
}
# Security Group Information
output "default_security_group_id" {
description = "ID of the default security group"
value = aws_security_group.default.id
}
output "alb_security_group_id" {
description = "ID of the ALB security group"
value = aws_security_group.alb.id
}
output "ecs_tasks_security_group_id" {
description = "ID of the ECS tasks security group"
value = aws_security_group.ecs_tasks.id
}
output "vpc_endpoints_security_group_id" {
description = "ID of the VPC endpoints security group"
value = null # Not created in free tier version to avoid costs
}
# Route Table Information
output "public_route_table_id" {
description = "ID of the public route table"
value = aws_route_table.public.id
}
output "private_route_table_ids" {
description = "List of IDs of the private route tables"
value = var.enable_private_subnets ? aws_route_table.private[*].id : []
}
# VPC Endpoint Information
output "s3_vpc_endpoint_id" {
description = "ID of the S3 VPC endpoint"
value = var.enable_vpc_endpoints ? aws_vpc_endpoint.s3[0].id : null
}
output "ecr_dkr_vpc_endpoint_id" {
description = "ID of the ECR Docker VPC endpoint"
value = null # Disabled in free tier version
}
output "ecr_api_vpc_endpoint_id" {
description = "ID of the ECR API VPC endpoint"
value = null # Disabled in free tier version
}
output "logs_vpc_endpoint_id" {
description = "ID of the CloudWatch Logs VPC endpoint"
value = null # Disabled in free tier version
}
# Terraform Backend Information
output "terraform_state_bucket_name" {
description = "Name of the S3 bucket for Terraform state"
value = aws_s3_bucket.terraform_state.bucket
}
output "terraform_state_bucket_arn" {
description = "ARN of the S3 bucket for Terraform state"
value = aws_s3_bucket.terraform_state.arn
}
output "terraform_locks_table_name" {
description = "Name of the DynamoDB table for Terraform locks"
value = aws_dynamodb_table.terraform_locks.name
}
output "terraform_locks_table_arn" {
description = "ARN of the DynamoDB table for Terraform locks"
value = aws_dynamodb_table.terraform_locks.arn
}
# Project Information
output "project_name" {
description = "Name of the project"
value = var.project_name
}
output "environment" {
description = "Environment name"
value = var.environment
}
output "aws_region" {
description = "AWS region"
value = var.aws_region
}

View File

@@ -0,0 +1,36 @@
# Example Terraform Variables File - FREE TIER OPTIMIZED
# Copy this file to terraform.tfvars and customize as needed
# terraform.tfvars is gitignored for security
# Project Configuration
project_name = "nvhi-atsila-microservice"
environment = "dev"
aws_region = "us-east-1"
# Network Configuration
vpc_cidr = "10.0.0.0/16"
# Feature Toggles - FREE TIER SETTINGS
enable_dns_hostnames = true
enable_dns_support = true
enable_private_subnets = false # Set to false to avoid NAT Gateway costs (~$32/month)
enable_nat_gateway = false # Only relevant if private subnets are enabled
single_nat_gateway = true # If you enable NAT later, use single gateway for cost savings
enable_vpc_endpoints = false # Set to false to avoid interface endpoint costs (~$14/month)
cost_optimization_mode = true # Enables free tier optimizations
# Cost Estimates:
# enable_private_subnets = false, enable_vpc_endpoints = false: ~$0/month
# enable_private_subnets = true, single_nat_gateway = true: ~$32/month
# enable_private_subnets = true, single_nat_gateway = false: ~$64/month
# enable_vpc_endpoints = true: +$14/month
# Additional Tags
common_tags = {
Terraform = "true"
Project = "enterprise-cicd"
Owner = "devops-team"
CostCenter = "engineering"
Department = "technology"
Tier = "free"
}

View File

@@ -0,0 +1,79 @@
# Foundation Layer Variables - FREE TIER OPTIMIZED
# Configuration variables for the enterprise CI/CD pipeline infrastructure
variable "project_name" {
description = "Name of the project - used for resource naming"
type = string
default = "nvhi-atsila-microservice"
}
variable "environment" {
description = "Environment name (dev, staging, prod)"
type = string
default = "dev"
}
variable "aws_region" {
description = "AWS region for infrastructure deployment"
type = string
default = "us-east-1"
}
variable "vpc_cidr" {
description = "CIDR block for the VPC"
type = string
default = "10.0.0.0/16"
}
variable "enable_dns_hostnames" {
description = "Enable DNS hostnames in the VPC"
type = bool
default = true
}
variable "enable_dns_support" {
description = "Enable DNS support in the VPC"
type = bool
default = true
}
variable "enable_nat_gateway" {
description = "Enable NAT gateways for private subnets (only relevant if private subnets enabled)"
type = bool
default = false # Default false for free tier
}
variable "single_nat_gateway" {
description = "Use a single NAT gateway instead of one per AZ (cost optimization)"
type = bool
default = true # Default true for cost optimization when NAT is enabled
}
variable "enable_private_subnets" {
description = "Enable private subnets (requires NAT Gateway for internet access)"
type = bool
default = false # Set to false for free tier to avoid NAT Gateway costs
}
variable "enable_vpc_endpoints" {
description = "Enable VPC endpoints for AWS services (costs extra)"
type = bool
default = false # Set to false for free tier to avoid interface endpoint costs
}
variable "cost_optimization_mode" {
description = "Enable cost optimization features for free tier usage"
type = bool
default = true
}
variable "common_tags" {
description = "Common tags to apply to all resources"
type = map(string)
default = {
Terraform = "true"
Project = "enterprise-cicd"
Owner = "devops-team"
CostCenter = "engineering"
}
}

View File

@@ -0,0 +1,33 @@
# Terraform and Provider Versions
# Defines the minimum required versions for consistency and reliability
terraform {
required_version = ">= 1.5"
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 5.0"
}
random = {
source = "hashicorp/random"
version = "~> 3.1"
}
}
}
# AWS Provider Configuration
provider "aws" {
region = var.aws_region
default_tags {
tags = merge(
var.common_tags,
{
Environment = var.environment
Project = var.project_name
ManagedBy = "terraform"
}
)
}
}