automated terminal push
This commit is contained in:
554
Jenkinsfile
vendored
554
Jenkinsfile
vendored
@@ -16,9 +16,9 @@ pipeline {
|
||||
|
||||
environment {
|
||||
// Core configuration
|
||||
GITEA_REPO = 'https://code.jacquesingram.online/lenape/nvhi-atsila-microservice.git'
|
||||
GITEA_REPO = 'https://code.jacquesingram.online/lenape/nvhi-atsila-microservice.git '
|
||||
GITEA_CREDS = '52ee0829-6e65-4951-925b-4186254c3f21'
|
||||
SONAR_HOST = 'https://sonar.jacquesingram.online'
|
||||
SONAR_HOST = 'https://sonar.jacquesingram.online '
|
||||
SONAR_TOKEN = credentials('sonar-token')
|
||||
// AWS configuration with ECR
|
||||
AWS_CRED_ID = 'aws-ci'
|
||||
@@ -35,7 +35,7 @@ pipeline {
|
||||
TF_VAR_public_subnets = '10.0.1.0/24,10.0.2.0/24'
|
||||
TF_VAR_instance_type = 't2.micro'
|
||||
TF_VAR_key_pair_name = 'nvhi-atsila-deployer'
|
||||
TF_VAR_jenkins_ip_cidr = "38.110.1.139/32"
|
||||
TF_VAR_jenkins_ip_cidr = "0.0.0.0/0" // For demo; tighten in production
|
||||
TF_VAR_aws_region = "${AWS_REGION}"
|
||||
// Enhanced deployment tracking
|
||||
IMAGE_TAG = "v1.0.${BUILD_NUMBER}"
|
||||
@@ -46,17 +46,52 @@ pipeline {
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Debug: Show File Structure') {
|
||||
steps {
|
||||
echo "📂 Current directory contents:"
|
||||
sh 'ls -la'
|
||||
echo "🔍 Full file tree:"
|
||||
sh 'find . -type f | sort'
|
||||
}
|
||||
}
|
||||
|
||||
stage('Bootstrap Terraform Backend') {
|
||||
steps {
|
||||
script {
|
||||
def tfBackendDir = "terraform-backend"
|
||||
|
||||
echo "🔐 Using Jenkins credentials to authenticate with AWS"
|
||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
||||
echo "🔄 Bootstrapping Terraform backend..."
|
||||
dir(tfBackendDir) {
|
||||
sh """
|
||||
terraform init \\
|
||||
-var="aws_region=${TF_VAR_aws_region}" \\
|
||||
-var="backend_bucket_name=${TF_BACKEND_BUCKET}" \\
|
||||
-var="lock_table_name=${TF_DDB_TABLE}"
|
||||
terraform apply -auto-approve \\
|
||||
-var="aws_region=${TF_VAR_aws_region}" \\
|
||||
-var="backend_bucket_name=${TF_BACKEND_BUCKET}" \\
|
||||
-var="lock_table_name=${TF_DDB_TABLE}"
|
||||
"""
|
||||
}
|
||||
|
||||
echo "✅ Terraform backend created successfully"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Security Assessment & Checkout') {
|
||||
steps {
|
||||
checkout scm
|
||||
|
||||
script {
|
||||
def infrastructureFiles = sh(
|
||||
script: '''
|
||||
if git rev-parse HEAD~1 >/dev/null 2>&1; then
|
||||
git diff --name-only HEAD~1 2>/dev/null | grep -E "(terraform/|infrastructure)" || echo "none"
|
||||
git diff --name-only HEAD~1 2>/dev/null | grep -E "^terraform/" || echo "none"
|
||||
else
|
||||
echo "none"
|
||||
echo "initial"
|
||||
fi
|
||||
''',
|
||||
returnStdout: true
|
||||
@@ -65,6 +100,9 @@ pipeline {
|
||||
if (params.FORCE_INFRASTRUCTURE_DEPLOY) {
|
||||
env.DEPLOYMENT_TYPE = "INFRASTRUCTURE"
|
||||
echo "🚨 FORCED: Infrastructure deployment requested via parameter"
|
||||
} else if (infrastructureFiles == "initial") {
|
||||
env.DEPLOYMENT_TYPE = "INFRASTRUCTURE"
|
||||
echo "✅ First run detected. Deploying infrastructure."
|
||||
} else if (infrastructureFiles != "none") {
|
||||
env.DEPLOYMENT_TYPE = "INFRASTRUCTURE"
|
||||
echo "🚨 SECURITY NOTICE: Infrastructure changes detected - elevated permissions required"
|
||||
@@ -74,11 +112,9 @@ pipeline {
|
||||
echo "✅ SECURITY: Application-only deployment - using restricted permissions"
|
||||
}
|
||||
|
||||
def gitCommit = sh(returnStdout: true, script: 'git rev-parse HEAD').trim()
|
||||
def gitAuthor = sh(returnStdout: true, script: 'git log -1 --pretty=format:"%an"').trim()
|
||||
|
||||
def gitCommit = sh(script: 'git rev-parse HEAD', returnStdout: true).trim()
|
||||
def gitAuthor = sh(script: 'git log -1 --pretty=format:"%an"', returnStdout: true).trim()
|
||||
currentBuild.description = "${env.DEPLOYMENT_TYPE} | ${env.IMAGE_TAG} | ${gitCommit.take(8)}"
|
||||
|
||||
echo "📋 SECURITY AUDIT TRAIL:"
|
||||
echo " • Deployment Type: ${env.DEPLOYMENT_TYPE}"
|
||||
echo " • Version: ${env.IMAGE_TAG}"
|
||||
@@ -101,7 +137,6 @@ pipeline {
|
||||
"security_model": "principle_of_least_privilege",
|
||||
"timestamp": "${new Date()}"
|
||||
}"""
|
||||
|
||||
archiveArtifacts artifacts: 'deployment-audit.json', fingerprint: true
|
||||
}
|
||||
}
|
||||
@@ -117,13 +152,19 @@ pipeline {
|
||||
script {
|
||||
def scannerHome = tool 'SonarQubeScanner'
|
||||
withSonarQubeEnv('SonarQube') {
|
||||
sh "${scannerHome}/bin/sonar-scanner -Dsonar.projectKey=nvhi-atsila-microservice -Dsonar.sources=. -Dsonar.projectVersion=${BUILD_NUMBER}"
|
||||
sh """
|
||||
echo "🔒 SECURITY: Running SonarQube security analysis..."
|
||||
${scannerHome}/bin/sonar-scanner \\
|
||||
-Dsonar.projectKey=nvhi-atsila-microservice \\
|
||||
-Dsonar.sources=. \\
|
||||
-Dsonar.projectVersion=${BUILD_NUMBER} \\
|
||||
-Dsonar.login=${SONAR_TOKEN}
|
||||
"""
|
||||
}
|
||||
echo "✅ SECURITY: Code quality and security scan completed"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Terraform Security Validation') {
|
||||
steps {
|
||||
script {
|
||||
@@ -133,11 +174,10 @@ pipeline {
|
||||
cd terraform && terraform init -backend=false
|
||||
terraform validate
|
||||
echo "✅ Terraform validation passed"
|
||||
|
||||
echo "🔒 SECURITY: Checking infrastructure security compliance..."
|
||||
grep -r "encrypted.*true" . --include="*.tf" && echo "✅ Encryption policies found" || echo "⚠️ Review encryption settings"
|
||||
echo "🔒 SECURITY: Checking for open security groups..."
|
||||
if grep -r "0.0.0.0/0" . --include="*.tf" --exclude-dir=.terraform | grep -v "# Approved:"; then
|
||||
if grep -r "0.0.0.0/0" . --include="*.tf" --exclude-dir=".terraform" | grep -v "# Approved:"; then
|
||||
echo "⚠️ Review open access rules found"
|
||||
else
|
||||
echo "✅ No unauthorized open access rules"
|
||||
@@ -155,22 +195,19 @@ pipeline {
|
||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
||||
script {
|
||||
echo "🔐 SECURITY: Using ECR for secure, AWS-native container registry"
|
||||
def awsAccountId = env.AWS_ACCOUNT_ID
|
||||
|
||||
sh """
|
||||
echo "🔐 Authenticating with ECR using temporary credentials..."
|
||||
aws ecr get-login-password --region ${AWS_REGION} | docker login --username AWS --password-stdin ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com
|
||||
"""
|
||||
|
||||
echo "🐳 Building secure container with metadata..."
|
||||
def img = docker.build("${awsAccountId}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG}")
|
||||
|
||||
echo "📤 Pushing to secure ECR registry..."
|
||||
img.push()
|
||||
img.push("latest")
|
||||
|
||||
sh """
|
||||
docker build -t ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG} .
|
||||
docker push ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG}
|
||||
docker tag ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG} ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:latest
|
||||
docker push ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:latest
|
||||
"""
|
||||
echo "✅ SECURITY: Container built and pushed to ECR successfully"
|
||||
echo " Image: ${awsAccountId}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG}"
|
||||
echo " Image: ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG}"
|
||||
echo " Registry: ECR (AWS-native, IAM-secured)"
|
||||
}
|
||||
}
|
||||
@@ -183,22 +220,26 @@ pipeline {
|
||||
script {
|
||||
echo "🔍 SECURITY: Checking if infrastructure is ready for deployment..."
|
||||
|
||||
// Check if infrastructure deployment was already forced
|
||||
if (params.FORCE_INFRASTRUCTURE_DEPLOY) {
|
||||
echo "🚨 FORCED: Infrastructure deployment requested via parameter"
|
||||
env.DEPLOYMENT_TYPE = "INFRASTRUCTURE"
|
||||
currentBuild.description = "INFRASTRUCTURE (forced) | ${env.IMAGE_TAG}"
|
||||
return // Skip further checks since we're forcing deployment
|
||||
}
|
||||
|
||||
def serviceExists = sh(
|
||||
script: "aws ecs describe-services --cluster ${TF_VAR_cluster_name} --services ${TF_VAR_cluster_name}-service --region ${AWS_REGION} 2>/dev/null | grep -q 'ACTIVE' && echo 'true' || echo 'false'",
|
||||
script: """
|
||||
aws ecs describe-services --cluster ${TF_VAR_cluster_name} --services ${TF_VAR_cluster_name}-service --region ${AWS_REGION} 2>/dev/null | grep -q 'ACTIVE' && echo 'true' || echo 'false'
|
||||
""",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
def instanceCount = sh(
|
||||
script: "aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION} --query 'length(containerInstanceArns)' --output text 2>/dev/null || echo '0'",
|
||||
script: """
|
||||
aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION} --query 'length(containerInstanceArns)' --output text 2>/dev/null || echo '0'
|
||||
""",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
if (serviceExists == "false" || instanceCount == "0" || instanceCount == "null") {
|
||||
echo "🚨 SECURITY NOTICE: Infrastructure not ready - forcing deployment"
|
||||
echo " Service Exists: ${serviceExists}"
|
||||
@@ -206,7 +247,6 @@ pipeline {
|
||||
env.DEPLOYMENT_TYPE = "INFRASTRUCTURE"
|
||||
currentBuild.description = "INFRASTRUCTURE (auto-detected) | ${env.IMAGE_TAG}"
|
||||
}
|
||||
|
||||
echo "📋 SECURITY: Infrastructure readiness assessment completed"
|
||||
echo " ECS Service Exists: ${serviceExists}"
|
||||
echo " Container Instances: ${instanceCount}"
|
||||
@@ -228,14 +268,14 @@ pipeline {
|
||||
echo "🏗️ ARCHITECTURE: Deploying ECS Cluster with SSM access (secure, keyless)"
|
||||
echo "🔐 In production: This would require infrastructure-admin role"
|
||||
echo "🚀 Attempting infrastructure deployment..."
|
||||
|
||||
sh """
|
||||
echo "🔄 Applying infrastructure changes..."
|
||||
echo "🔄 Initializing Terraform with remote backend..."
|
||||
terraform init \\
|
||||
-backend-config="bucket=${TF_BACKEND_BUCKET}" \\
|
||||
-backend-config="key=${TF_BACKEND_PREFIX}" \\
|
||||
-backend-config="region=${AWS_REGION}" \\
|
||||
-backend-config="dynamodb_table=${TF_DDB_TABLE}"
|
||||
echo "🔄 Applying infrastructure changes..."
|
||||
terraform apply -auto-approve \\
|
||||
-var="cluster_name=${TF_VAR_cluster_name}" \\
|
||||
-var="vpc_cidr=${TF_VAR_vpc_cidr}" \\
|
||||
@@ -245,7 +285,6 @@ pipeline {
|
||||
-var="jenkins_ip_cidr=${TF_VAR_jenkins_ip_cidr}" \\
|
||||
-var="aws_region=${TF_VAR_aws_region}"
|
||||
"""
|
||||
|
||||
echo "✅ SECURITY: Infrastructure deployment completed with compliance verification"
|
||||
}
|
||||
}
|
||||
@@ -261,20 +300,22 @@ pipeline {
|
||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
||||
script {
|
||||
echo "⏳ Waiting for ECS agents to register with cluster..."
|
||||
timeout(time: 5, unit: 'MINUTES') {
|
||||
timeout(time: 10, unit: 'MINUTES') {
|
||||
waitUntil {
|
||||
def count = sh(
|
||||
script: "aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION} --query 'length(containerInstanceArns)' --output text 2>/dev/null || echo '0'",
|
||||
script: """
|
||||
aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION} --query 'length(containerInstanceArns)' --output text 2>/dev/null || echo '0'
|
||||
""",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
if (count != "0" && count != "null") {
|
||||
echo "✅ ECS agents registered: ${count} instance(s)"
|
||||
def activeCount = sh(
|
||||
script: "aws ecs describe-container-instances --cluster ${TF_VAR_cluster_name} --container-instances \$(aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION} --query 'containerInstanceArns[*]' --output text) --region ${AWS_REGION} --query 'length(containerInstances[?status==\\`ACTIVE\\`])' --output text 2>/dev/null || echo '0'",
|
||||
script: """
|
||||
aws ecs describe-container-instances --cluster ${TF_VAR_cluster_name} --container-instances \$(aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION} --query 'containerInstanceArns[*]' --output text) --region ${AWS_REGION} --query 'length(containerInstances[?status==\\`ACTIVE\\`])' --output text 2>/dev/null || echo '0'
|
||||
""",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
if (activeCount != "0" && activeCount != "null") {
|
||||
echo "✅ Active ECS instances: ${activeCount}"
|
||||
return true
|
||||
@@ -303,7 +344,9 @@ pipeline {
|
||||
def hasInstances = false
|
||||
try {
|
||||
def instanceId = sh(
|
||||
script: "cd terraform && terraform output -raw ecs_instance_id 2>/dev/null || echo ''",
|
||||
script: """
|
||||
cd terraform && terraform output -raw ecs_instance_id 2>/dev/null || echo ''
|
||||
""",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
hasInstances = (instanceId != "" && instanceId != "null")
|
||||
@@ -319,15 +362,18 @@ pipeline {
|
||||
echo "🔧 ENTERPRISE: Configuring EC2 instance via SSM (no SSH required)"
|
||||
def instanceId = ""
|
||||
def ec2_ip = ""
|
||||
|
||||
try {
|
||||
sh "test -d terraform || (echo 'Terraform directory not found' && exit 1)"
|
||||
instanceId = sh(
|
||||
script: "cd terraform && terraform output -raw ecs_instance_id",
|
||||
script: """
|
||||
cd terraform && terraform output -raw ecs_instance_id
|
||||
""",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
ec2_ip = sh(
|
||||
script: "cd terraform && terraform output -raw ecs_instance_public_ip",
|
||||
script: """
|
||||
cd terraform && terraform output -raw ecs_instance_public_ip
|
||||
""",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
} catch (Exception e) {
|
||||
@@ -335,72 +381,90 @@ pipeline {
|
||||
echo "⚠️ Skipping SSM configuration - no instances available"
|
||||
return
|
||||
}
|
||||
|
||||
echo "📍 Target Instance: ${instanceId} (${ec2_ip})"
|
||||
echo "⏳ Waiting for SSM agent to be ready..."
|
||||
timeout(time: 10, unit: 'MINUTES') {
|
||||
waitUntil {
|
||||
def ssmStatus = sh(
|
||||
script: "aws ssm describe-instance-information --filters \"Key=InstanceIds,Values=${instanceId}\" --region ${AWS_REGION} --query 'InstanceInformationList[0].PingStatus' --output text 2>/dev/null || echo 'Offline'",
|
||||
script: """
|
||||
aws ssm describe-instance-information --filters "Key=InstanceIds,Values=${instanceId}" --region ${AWS_REGION} --query 'InstanceInformationList[0].PingStatus' --output text 2>/dev/null || echo 'Offline'
|
||||
""",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
if (ssmStatus == "Online") {
|
||||
echo "✅ SSM agent is online"
|
||||
return true
|
||||
} else {
|
||||
echo "⏳ Waiting for SSM agent... (Status: ${ssmStatus})"
|
||||
sleep(20)
|
||||
echo "⏳ SSM agent status: ${ssmStatus}, waiting..."
|
||||
sleep(30)
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
echo "🔧 Configuring ECS agent via SSM..."
|
||||
def commandId = sh(
|
||||
script: "aws ssm send-command --instance-ids ${instanceId} --document-name \"AWS-RunShellScript\" --parameters 'commands=[\"echo === ECS Configuration via SSM ===\",\"echo Cluster: ${TF_VAR_cluster_name}\",\"echo Time: \$(date)\",\"echo Instance: \$(hostname)\",\"sudo systemctl status ecs --no-pager\",\"sudo systemctl status docker --no-pager\",\"curl -s http://localhost:51678/v1/metadata || echo \\\"ECS agent not ready\\\"\",\"sudo systemctl restart ecs\",\"sleep 15\",\"sudo systemctl status ecs --no-pager\",\"curl -s http://localhost:51678/v1/metadata || echo \\\"ECS agent still starting\\\"\",\"echo === Configuration completed ===\"]' --region ${AWS_REGION} --output text --query 'Command.CommandId'",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
echo "🔧 Running configuration commands via SSM..."
|
||||
sh """
|
||||
# Install or update Docker if needed
|
||||
aws ssm send-command \\
|
||||
--instance-ids ${instanceId} \\
|
||||
--document-name "AWS-RunShellScript" \\
|
||||
--parameters 'commands=["sudo yum update -y && sudo yum install -y docker && sudo systemctl start docker && sudo systemctl enable docker"]' \\
|
||||
--region ${AWS_REGION} \\
|
||||
--comment "Installing Docker on ECS instance"
|
||||
|
||||
echo "📋 SSM Command ID: ${commandId}"
|
||||
echo "⏳ Waiting for SSM command completion..."
|
||||
sh "aws ssm wait command-executed --command-id ${commandId} --instance-id ${instanceId} --region ${AWS_REGION}"
|
||||
# Wait for command to complete
|
||||
sleep 60
|
||||
|
||||
echo "📋 SSM Command Output:"
|
||||
sh "aws ssm get-command-invocation --command-id ${commandId} --instance-id ${instanceId} --region ${AWS_REGION} --query 'StandardOutputContent' --output text"
|
||||
|
||||
def commandStatus = sh(
|
||||
script: "aws ssm get-command-invocation --command-id ${commandId} --instance-id ${instanceId} --region ${AWS_REGION} --query 'Status' --output text",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
if (commandStatus != "Success") {
|
||||
echo "❌ SSM Command failed with status: ${commandStatus}"
|
||||
sh "echo 'Error Output:'; aws ssm get-command-invocation --command-id ${commandId} --instance-id ${instanceId} --region ${AWS_REGION} --query 'StandardErrorContent' --output text"
|
||||
echo "⚠️ SSM configuration had issues but continuing with deployment"
|
||||
}
|
||||
|
||||
echo "✅ ENTERPRISE: EC2 instance configured via SSM successfully"
|
||||
echo "🔐 SSM Session Manager Access: To connect to the instance for troubleshooting, use: aws ssm start-session --target ${instanceId} --region ${AWS_REGION}"
|
||||
# Configure ECS agent
|
||||
aws ssm send-command \\
|
||||
--instance-ids ${instanceId} \\
|
||||
--document-name "AWS-RunShellScript" \\
|
||||
--parameters 'commands=["echo ECS_CLUSTER=${TF_VAR_cluster_name} | sudo tee -a /etc/ecs/ecs.config","sudo systemctl restart ecs"]' \\
|
||||
--region ${AWS_REGION} \\
|
||||
--comment "Configuring ECS agent"
|
||||
"""
|
||||
echo "✅ ENTERPRISE: EC2 instance configured via SSM"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Deploy Application to ECS') {
|
||||
stage('Deploy ECS Service') {
|
||||
steps {
|
||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
||||
script {
|
||||
echo "🚢 SECURITY: Deploying application to ECS with ECR integration..."
|
||||
def awsAccountId = env.AWS_ACCOUNT_ID
|
||||
def gitCommitHash = sh(script: 'git rev-parse HEAD', returnStdout: true).trim()
|
||||
echo "🚀 DEPLOYMENT: Deploying application to ECS cluster"
|
||||
|
||||
def taskDefinition = """[{
|
||||
"name": "health-workload",
|
||||
"image": "${awsAccountId}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG}",
|
||||
"essential": true,
|
||||
// Create task definition
|
||||
def taskDefinition = """
|
||||
{
|
||||
"family": "${TF_VAR_cluster_name}-task",
|
||||
"networkMode": "bridge",
|
||||
"requiresCompatibilities": ["EC2"],
|
||||
"memory": "512",
|
||||
"cpu": "256",
|
||||
"executionRoleArn": "${sh(script: 'cd terraform && terraform output -raw ecs_task_execution_role_arn', returnStdout: true).trim()}",
|
||||
"containerDefinitions": [
|
||||
{
|
||||
"name": "${ECR_REPO}",
|
||||
"image": "${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG}",
|
||||
"memory": 512,
|
||||
"portMappings": [{"containerPort": 8080, "hostPort": 8080}],
|
||||
"cpu": 256,
|
||||
"essential": true,
|
||||
"portMappings": [
|
||||
{
|
||||
"containerPort": 8080,
|
||||
"hostPort": 8080,
|
||||
"protocol": "tcp"
|
||||
}
|
||||
],
|
||||
"healthCheck": {
|
||||
"command": ["CMD-SHELL", "curl -f http://localhost:8080/health || exit 1"],
|
||||
"interval": 30,
|
||||
"timeout": 5,
|
||||
"retries": 3,
|
||||
"startPeriod": 60
|
||||
},
|
||||
"logConfiguration": {
|
||||
"logDriver": "awslogs",
|
||||
"options": {
|
||||
@@ -408,54 +472,44 @@ pipeline {
|
||||
"awslogs-region": "${AWS_REGION}",
|
||||
"awslogs-stream-prefix": "ecs"
|
||||
}
|
||||
},
|
||||
"environment": [
|
||||
{"name": "BUILD_NUMBER", "value": "${BUILD_NUMBER}"},
|
||||
{"name": "GIT_COMMIT", "value": "${gitCommitHash}"},
|
||||
{"name": "DEPLOYMENT_TIME", "value": "${new Date().format('yyyy-MM-dd HH:mm:ss')}"},
|
||||
{"name": "CONTAINER_REGISTRY", "value": "ECR"},
|
||||
{"name": "ARCHITECTURE", "value": "ssm_based_ecs_access"}
|
||||
}
|
||||
}
|
||||
]
|
||||
}]"""
|
||||
}
|
||||
"""
|
||||
|
||||
writeFile file: 'task-definition.json', text: taskDefinition
|
||||
|
||||
sh "aws ecs register-task-definition --family ${TF_VAR_cluster_name} --network-mode bridge --container-definitions file://task-definition.json --region ${AWS_REGION}"
|
||||
sh """
|
||||
# Create CloudWatch log group if it doesn't exist
|
||||
aws logs create-log-group --log-group-name /ecs/${TF_VAR_cluster_name} --region ${AWS_REGION} || echo "Log group already exists"
|
||||
|
||||
def serviceExists = sh(
|
||||
script: "aws ecs describe-services --cluster ${TF_VAR_cluster_name} --services ${TF_VAR_cluster_name}-service --region ${AWS_REGION} 2>/dev/null | grep -q 'ACTIVE' && echo 'true' || echo 'false'",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
# Register task definition
|
||||
aws ecs register-task-definition \\
|
||||
--cli-input-json file://task-definition.json \\
|
||||
--region ${AWS_REGION}
|
||||
|
||||
if (serviceExists == "false") {
|
||||
echo "🆕 Creating new ECS service..."
|
||||
sh "aws ecs create-service --cluster ${TF_VAR_cluster_name} --service-name ${TF_VAR_cluster_name}-service --task-definition ${TF_VAR_cluster_name} --desired-count 1 --launch-type EC2 --region ${AWS_REGION}"
|
||||
} else {
|
||||
echo "🔄 Updating existing ECS service..."
|
||||
sh "aws ecs update-service --cluster ${TF_VAR_cluster_name} --service ${TF_VAR_cluster_name}-service --force-new-deployment --region ${AWS_REGION}"
|
||||
}
|
||||
# Check if service exists
|
||||
if aws ecs describe-services --cluster ${TF_VAR_cluster_name} --services ${TF_VAR_cluster_name}-service --region ${AWS_REGION} --query 'services[0].status' --output text 2>/dev/null | grep -q 'ACTIVE'; then
|
||||
echo "✅ Service exists, updating..."
|
||||
aws ecs update-service \\
|
||||
--cluster ${TF_VAR_cluster_name} \\
|
||||
--service ${TF_VAR_cluster_name}-service \\
|
||||
--task-definition ${TF_VAR_cluster_name}-task \\
|
||||
--desired-count 1 \\
|
||||
--region ${AWS_REGION}
|
||||
else
|
||||
echo "✅ Creating new service..."
|
||||
aws ecs create-service \\
|
||||
--cluster ${TF_VAR_cluster_name} \\
|
||||
--service-name ${TF_VAR_cluster_name}-service \\
|
||||
--task-definition ${TF_VAR_cluster_name}-task \\
|
||||
--desired-count 1 \\
|
||||
--region ${AWS_REGION}
|
||||
fi
|
||||
"""
|
||||
|
||||
echo "⏳ Waiting for secure service deployment to stabilize..."
|
||||
timeout(time: 10, unit: 'MINUTES') {
|
||||
try {
|
||||
sh "aws ecs wait services-stable --cluster ${TF_VAR_cluster_name} --services ${TF_VAR_cluster_name}-service --region ${AWS_REGION}"
|
||||
echo "✅ SECURITY: Application deployed successfully with ECR integration"
|
||||
} catch (Exception e) {
|
||||
echo "⚠️ Service deployment timeout - checking status..."
|
||||
def serviceStatus = sh(
|
||||
script: "aws ecs describe-services --cluster ${TF_VAR_cluster_name} --services ${TF_VAR_cluster_name}-service --region ${AWS_REGION} --query 'services[0].deployments[0].rolloutState' --output text",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
echo "Service deployment state: ${serviceStatus}"
|
||||
if (serviceStatus == "COMPLETED") {
|
||||
echo "✅ Deployment completed successfully despite timeout"
|
||||
} else {
|
||||
echo "⚠️ Deployment still in progress: ${serviceStatus}"
|
||||
echo "⚠️ Check ECS console for more details"
|
||||
}
|
||||
}
|
||||
}
|
||||
echo "✅ SECURITY: Application deployment initiated successfully"
|
||||
echo "✅ DEPLOYMENT: ECS service deployment initiated"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -463,113 +517,70 @@ pipeline {
|
||||
}
|
||||
}
|
||||
|
||||
stage('Post-Deployment Validation') {
|
||||
parallel {
|
||||
stage('Health Check') {
|
||||
stage('Verify Deployment') {
|
||||
steps {
|
||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
||||
script {
|
||||
def ec2_ip = ""
|
||||
try {
|
||||
ec2_ip = sh(
|
||||
script: "cd terraform && terraform output -raw ecs_instance_public_ip 2>/dev/null || echo 'unknown'",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
} catch (Exception e) {
|
||||
echo "⚠️ Could not get EC2 IP for health check"
|
||||
ec2_ip = "unknown"
|
||||
}
|
||||
echo "🔍 VERIFICATION: Checking deployment status..."
|
||||
|
||||
echo "🏥 SECURITY: Running health validation on http://${ec2_ip}:8080/health"
|
||||
echo "🔗 ARCHITECTURE: Direct access with SSM management (secure and efficient)"
|
||||
|
||||
if (ec2_ip != "unknown" && ec2_ip != "" && ec2_ip != "null") {
|
||||
timeout(time: 5, unit: 'MINUTES') {
|
||||
timeout(time: 15, unit: 'MINUTES') {
|
||||
waitUntil {
|
||||
def response = sh(
|
||||
script: "curl -s -o /dev/null -w '%{http_code}' http://${ec2_ip}:8080/health || echo '000'",
|
||||
def serviceStatus = sh(
|
||||
script: """
|
||||
aws ecs describe-services \\
|
||||
--cluster ${TF_VAR_cluster_name} \\
|
||||
--services ${TF_VAR_cluster_name}-service \\
|
||||
--region ${AWS_REGION} \\
|
||||
--query 'services[0].deployments[0].status' \\
|
||||
--output text 2>/dev/null || echo 'UNKNOWN'
|
||||
""",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
echo "🔍 SECURITY: Health check response: ${response}"
|
||||
if (response == "200") {
|
||||
echo "✅ SECURITY: Application health check passed - service is secure and operational"
|
||||
|
||||
def runningCount = sh(
|
||||
script: """
|
||||
aws ecs describe-services \\
|
||||
--cluster ${TF_VAR_cluster_name} \\
|
||||
--services ${TF_VAR_cluster_name}-service \\
|
||||
--region ${AWS_REGION} \\
|
||||
--query 'services[0].runningCount' \\
|
||||
--output text 2>/dev/null || echo '0'
|
||||
""",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
echo "Service Status: ${serviceStatus}, Running Tasks: ${runningCount}"
|
||||
|
||||
if (serviceStatus == "STEADY" && runningCount.toInteger() > 0) {
|
||||
echo "✅ Service deployment completed successfully"
|
||||
return true
|
||||
} else {
|
||||
echo "⏳ SECURITY: Waiting for application to be ready (${response})"
|
||||
sleep(10)
|
||||
echo "⏳ Waiting for service to stabilize..."
|
||||
sleep(30)
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
echo "⚠️ SECURITY: EC2 IP not available - skipping health check"
|
||||
echo "⚠️ This may happen if infrastructure wasn't deployed in this run"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Smoke Tests') {
|
||||
steps {
|
||||
script {
|
||||
echo "💨 SECURITY: Running comprehensive smoke tests..."
|
||||
def ec2_ip = ""
|
||||
// Get application URL
|
||||
def appUrl = ""
|
||||
try {
|
||||
sh "test -d terraform || (echo 'Terraform directory not found' && exit 1)"
|
||||
ec2_ip = sh(
|
||||
script: "cd terraform && terraform output -raw ecs_instance_public_ip 2>/dev/null || echo 'unknown'",
|
||||
appUrl = sh(
|
||||
script: """
|
||||
cd terraform && terraform output -raw ecs_instance_public_ip 2>/dev/null || echo 'unavailable'
|
||||
""",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
if (appUrl != "unavailable" && appUrl != "") {
|
||||
echo "🌐 APPLICATION URL: http://${appUrl}:8080"
|
||||
currentBuild.description = "${currentBuild.description} | URL: http://${appUrl}:8080"
|
||||
}
|
||||
} catch (Exception e) {
|
||||
echo "⚠️ Could not get EC2 IP for smoke tests: ${e.getMessage()}"
|
||||
ec2_ip = "unknown"
|
||||
echo "⚠️ Could not determine application URL: ${e.getMessage()}"
|
||||
}
|
||||
|
||||
if (ec2_ip != "unknown" && ec2_ip != "" && ec2_ip != "null") {
|
||||
sh """
|
||||
echo "🔒 SECURITY: Testing application endpoints and security headers..."
|
||||
curl -I http://${ec2_ip}:8080/health || echo "Service may still be starting..."
|
||||
echo "🔍 SECURITY: Verifying ECR image deployment and metadata..."
|
||||
curl -s http://${ec2_ip}:8080/health || echo "Application responding"
|
||||
echo "🛡️ SECURITY: Validating network security and access controls..."
|
||||
echo "✅ SECURITY: All smoke tests and security validations passed"
|
||||
"""
|
||||
} else {
|
||||
echo "⚠️ SECURITY: EC2 IP not available - skipping smoke tests"
|
||||
echo "⚠️ This may happen if infrastructure wasn't deployed in this run"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Container Instance Validation') {
|
||||
steps {
|
||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
||||
script {
|
||||
echo "🔍 Validating ECS container instances..."
|
||||
def instanceArns = sh(
|
||||
script: "aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION} --query 'containerInstanceArns' --output json 2>/dev/null || echo '[]'",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
if (instanceArns != '[]' && instanceArns != 'null') {
|
||||
echo "📋 Container Instances Found:"
|
||||
sh "aws ecs describe-container-instances --cluster ${TF_VAR_cluster_name} --container-instances ${instanceArns} --region ${AWS_REGION} --query 'containerInstances[*].[containerInstanceArn,status,runningTasksCount,pendingTasksCount]' --output table"
|
||||
|
||||
def runningTasks = sh(
|
||||
script: "aws ecs list-tasks --cluster ${TF_VAR_cluster_name} --desired-status RUNNING --region ${AWS_REGION} --query 'length(taskArns)' --output text 2>/dev/null || echo '0'",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
echo "📊 Running Tasks: ${runningTasks}"
|
||||
if (runningTasks == "0") {
|
||||
echo "⚠️ No running tasks found - deployment may still be in progress"
|
||||
} else {
|
||||
echo "✅ ${runningTasks} task(s) running successfully"
|
||||
}
|
||||
} else {
|
||||
echo "⚠️ No container instances found in cluster"
|
||||
}
|
||||
}
|
||||
}
|
||||
echo "✅ VERIFICATION: Deployment verification completed"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -579,115 +590,52 @@ pipeline {
|
||||
post {
|
||||
always {
|
||||
script {
|
||||
echo "📊 SECURITY: Collecting deployment artifacts and performing secure cleanup..."
|
||||
archiveArtifacts artifacts: 'deployment-audit.json,task-definition.json', allowEmptyArchive: true
|
||||
cleanWs(deleteDirs: true, notFailBuild: true)
|
||||
echo "🔒 SECURITY: Deployment artifacts archived and workspace securely cleaned"
|
||||
echo "🧹 CLEANUP: Performing post-build cleanup..."
|
||||
|
||||
// Archive deployment artifacts
|
||||
archiveArtifacts artifacts: 'task-definition.json', allowEmptyArchive: true
|
||||
|
||||
// Clean up Docker images to save space
|
||||
sh '''
|
||||
echo "🧹 Cleaning up Docker images..."
|
||||
docker system prune -f || echo "Docker cleanup failed"
|
||||
'''
|
||||
|
||||
echo "📊 SUMMARY: Build completed"
|
||||
echo " Build Number: ${BUILD_NUMBER}"
|
||||
echo " Image Tag: ${IMAGE_TAG}"
|
||||
echo " Deployment Type: ${env.DEPLOYMENT_TYPE}"
|
||||
echo " Status: ${currentBuild.currentResult}"
|
||||
}
|
||||
}
|
||||
|
||||
success {
|
||||
script {
|
||||
def ec2_ip = ""
|
||||
def instanceId = ""
|
||||
def gitCommitHash = ""
|
||||
def runningTasks = "0"
|
||||
echo "🎉 SUCCESS: Deployment completed successfully!"
|
||||
echo " Version ${IMAGE_TAG} deployed to ECS cluster ${TF_VAR_cluster_name}"
|
||||
|
||||
try {
|
||||
sh "test -d terraform || echo 'Terraform directory not found'"
|
||||
ec2_ip = sh(
|
||||
script: "cd terraform && terraform output -raw ecs_instance_public_ip 2>/dev/null || echo 'unknown'",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
instanceId = sh(
|
||||
script: "cd terraform && terraform output -raw ecs_instance_id 2>/dev/null || echo 'unknown'",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
gitCommitHash = sh(script: 'git rev-parse HEAD 2>/dev/null || echo "unknown"', returnStdout: true).trim().take(8)
|
||||
|
||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
||||
runningTasks = sh(
|
||||
script: "aws ecs list-tasks --cluster ${TF_VAR_cluster_name} --desired-status RUNNING --region ${AWS_REGION} --query 'length(taskArns)' --output text 2>/dev/null || echo '0'",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
}
|
||||
} catch (Exception e) {
|
||||
ec2_ip = "unknown"
|
||||
instanceId = "unknown"
|
||||
gitCommitHash = "unknown"
|
||||
}
|
||||
|
||||
echo "🎉 SSM-BASED SECURE DEPLOYMENT SUCCESSFUL!"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo "📋 DEPLOYMENT SUMMARY (SSM-OPTIMIZED FOR SECURITY):"
|
||||
echo " • Container Registry: ECR (AWS-native, secure) ✅"
|
||||
echo " • Architecture: SSM-based ECS access (keyless, secure) ✅"
|
||||
echo " • Infrastructure: ECS + VPC + Security Groups (SSM-enabled) ✅"
|
||||
echo " • Application Version: ${IMAGE_TAG}"
|
||||
echo " • Application URL: http://${ec2_ip}:8080"
|
||||
echo " • Health Endpoint: http://${ec2_ip}:8080/health"
|
||||
echo " • ECR Image: ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG}"
|
||||
echo " • Security Compliance: ✅ PASSED (No SSH keys required)"
|
||||
echo " • Git Commit: ${gitCommitHash}"
|
||||
echo " • Deployment Type: ${env.DEPLOYMENT_TYPE}"
|
||||
echo " • Running Tasks: ${runningTasks}"
|
||||
echo " • Instance Access: SSM Session Manager (${instanceId}) ✅"
|
||||
echo " • Cost Optimization: Free tier friendly ✅"
|
||||
echo "🔐 SSM ACCESS COMMANDS:"
|
||||
echo " • Connect to instance: aws ssm start-session --target ${instanceId} --region ${AWS_REGION}"
|
||||
echo " • View ECS logs: aws logs tail /ecs/${TF_VAR_cluster_name} --follow --region ${AWS_REGION}"
|
||||
echo " • Check task status: aws ecs list-tasks --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION}"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
|
||||
currentBuild.description = "✅ ${env.DEPLOYMENT_TYPE} | ECR | ${IMAGE_TAG} | ${ec2_ip}"
|
||||
// Send success notification (customize as needed)
|
||||
// slackSend channel: '#deployments',
|
||||
// color: 'good',
|
||||
// message: "✅ ${env.JOB_NAME} - Build #${env.BUILD_NUMBER} deployed successfully"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
def failureStage = ""
|
||||
def instanceCount = "unknown"
|
||||
echo "❌ FAILURE: Deployment failed"
|
||||
echo " Check the logs above for error details"
|
||||
|
||||
try {
|
||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
||||
instanceCount = sh(
|
||||
script: "aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION} --query 'length(containerInstanceArns)' --output text 2>/dev/null || echo 'unknown'",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
// Send failure notification (customize as needed)
|
||||
// slackSend channel: '#deployments',
|
||||
// color: 'danger',
|
||||
// message: "❌ ${env.JOB_NAME} - Build #${env.BUILD_NUMBER} failed"
|
||||
}
|
||||
} catch (Exception e) {
|
||||
// Ignore errors in post section
|
||||
}
|
||||
|
||||
echo "❌ DEPLOYMENT FAILED!"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo "🔍 FAILURE ANALYSIS:"
|
||||
echo " • Deployment Type: ${env.DEPLOYMENT_TYPE}"
|
||||
echo " • Container Instances Available: ${instanceCount}"
|
||||
echo " • Failed Stage: ${env.STAGE_NAME ?: 'Unknown'}"
|
||||
echo "📋 COMMON ISSUES AND SOLUTIONS:"
|
||||
if (instanceCount == "0" || instanceCount == "unknown") {
|
||||
echo "❌ NO CONTAINER INSTANCES FOUND"
|
||||
echo " • Run with FORCE_INFRASTRUCTURE_DEPLOY=true parameter"
|
||||
echo " • Or check if EC2 instances are terminating unexpectedly"
|
||||
echo " • Verify IAM role has required ECS permissions"
|
||||
}
|
||||
echo "🔧 TROUBLESHOOTING COMMANDS:"
|
||||
echo " • Check ECS cluster: aws ecs describe-clusters --clusters ${TF_VAR_cluster_name} --region ${AWS_REGION}"
|
||||
echo " • List instances: aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION}"
|
||||
echo " • Check services: aws ecs describe-services --cluster ${TF_VAR_cluster_name} --services ${TF_VAR_cluster_name}-service --region ${AWS_REGION}"
|
||||
echo "💡 RECOVERY OPTIONS:"
|
||||
echo " 1. Force infrastructure deployment: Run pipeline with FORCE_INFRASTRUCTURE_DEPLOY=true"
|
||||
echo " 2. Check AWS Console for any manually terminated resources"
|
||||
echo " 3. Review Terraform state: May need to run 'terraform refresh' locally"
|
||||
echo " 4. Check CloudWatch logs: /ecs/${TF_VAR_cluster_name}"
|
||||
echo "📁 ARTIFACTS AVAILABLE:"
|
||||
echo " • Security audit trail: deployment-audit.json"
|
||||
echo " • State backup: secure-state-backup-${BUILD_NUMBER}.json"
|
||||
echo " • Terraform plan: secure-tfplan-${BUILD_NUMBER}"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
|
||||
currentBuild.description = "❌ Failed: ${env.DEPLOYMENT_TYPE} | ${env.STAGE_NAME}"
|
||||
unstable {
|
||||
script {
|
||||
echo "⚠️ UNSTABLE: Build completed with warnings"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -164,6 +164,56 @@ resource "aws_iam_role_policy_attachment" "ecs_instance_ssm_policy" {
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
|
||||
}
|
||||
|
||||
# ECS Task Execution Role
|
||||
resource "aws_iam_role" "ecs_task_execution_role" {
|
||||
name = "${var.cluster_name}-task-execution-role"
|
||||
|
||||
assume_role_policy = jsonencode({
|
||||
Version = "2012-10-17"
|
||||
Statement = [
|
||||
{
|
||||
Action = "sts:AssumeRole"
|
||||
Effect = "Allow"
|
||||
Principal = {
|
||||
Service = "ecs-tasks.amazonaws.com"
|
||||
}
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
tags = {
|
||||
Name = "${var.cluster_name}-task-execution-role"
|
||||
}
|
||||
}
|
||||
|
||||
# Attach AWS managed policy for ECS task execution
|
||||
resource "aws_iam_role_policy_attachment" "ecs_task_execution_role_policy" {
|
||||
role = aws_iam_role.ecs_task_execution_role.name
|
||||
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy"
|
||||
}
|
||||
|
||||
# Additional policy for ECR access
|
||||
resource "aws_iam_role_policy" "ecs_task_execution_ecr_policy" {
|
||||
name = "${var.cluster_name}-task-execution-ecr-policy"
|
||||
role = aws_iam_role.ecs_task_execution_role.id
|
||||
|
||||
policy = jsonencode({
|
||||
Version = "2012-10-17"
|
||||
Statement = [
|
||||
{
|
||||
Effect = "Allow"
|
||||
Action = [
|
||||
"ecr:GetAuthorizationToken",
|
||||
"ecr:BatchCheckLayerAvailability",
|
||||
"ecr:GetDownloadUrlForLayer",
|
||||
"ecr:BatchGetImage"
|
||||
]
|
||||
Resource = "*"
|
||||
}
|
||||
]
|
||||
})
|
||||
}
|
||||
|
||||
# IAM Instance Profile
|
||||
resource "aws_iam_instance_profile" "ecs_instance_profile" {
|
||||
name = "${var.cluster_name}-ecs-instance-profile"
|
||||
@@ -216,26 +266,25 @@ resource "aws_instance" "ecs_instance" {
|
||||
}
|
||||
}
|
||||
|
||||
# ECS Service
|
||||
resource "aws_ecs_service" "main" {
|
||||
name = "${var.cluster_name}-service"
|
||||
cluster = aws_ecs_cluster.main.id
|
||||
desired_count = 1
|
||||
launch_type = "EC2"
|
||||
|
||||
# This will be updated by your Jenkins pipeline
|
||||
task_definition = "${var.cluster_name}:1"
|
||||
|
||||
depends_on = [aws_instance.ecs_instance]
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [task_definition]
|
||||
}
|
||||
|
||||
tags = {
|
||||
Name = "${var.cluster_name}-service"
|
||||
}
|
||||
}
|
||||
# ECS Service (will be created by Jenkins pipeline)
|
||||
# Commented out because Jenkins will create the service
|
||||
# resource "aws_ecs_service" "main" {
|
||||
# name = "${var.cluster_name}-service"
|
||||
# cluster = aws_ecs_cluster.main.id
|
||||
# desired_count = 1
|
||||
# launch_type = "EC2"
|
||||
# task_definition = "${var.cluster_name}-task:1"
|
||||
#
|
||||
# depends_on = [aws_instance.ecs_instance]
|
||||
#
|
||||
# lifecycle {
|
||||
# ignore_changes = [task_definition]
|
||||
# }
|
||||
#
|
||||
# tags = {
|
||||
# Name = "${var.cluster_name}-service"
|
||||
# }
|
||||
# }
|
||||
|
||||
# CloudWatch Log Group for ECS
|
||||
resource "aws_cloudwatch_log_group" "ecs_logs" {
|
||||
@@ -272,3 +321,8 @@ output "public_subnet_ids" {
|
||||
description = "IDs of the public subnets"
|
||||
value = aws_subnet.public[*].id
|
||||
}
|
||||
|
||||
output "ecs_task_execution_role_arn" {
|
||||
description = "ARN of the ECS task execution role"
|
||||
value = aws_iam_role.ecs_task_execution_role.arn
|
||||
}
|
Reference in New Issue
Block a user