automated terminal push
This commit is contained in:
471
Jenkinsfile
vendored
471
Jenkinsfile
vendored
@@ -20,18 +20,15 @@ pipeline {
|
||||
GITEA_CREDS = '52ee0829-6e65-4951-925b-4186254c3f21'
|
||||
SONAR_HOST = 'https://sonar.jacquesingram.online'
|
||||
SONAR_TOKEN = credentials('sonar-token')
|
||||
|
||||
// AWS configuration with ECR (optimal choice)
|
||||
// AWS configuration with ECR
|
||||
AWS_CRED_ID = 'aws-ci'
|
||||
AWS_ACCOUNT_ID = credentials('AWS_ACCOUNT_ID')
|
||||
AWS_REGION = 'us-east-2'
|
||||
ECR_REPO = 'nvhi-atsila-microservice'
|
||||
|
||||
// Backend configuration
|
||||
TF_BACKEND_BUCKET = 'nvhi-atsila-tf-state'
|
||||
TF_BACKEND_PREFIX = 'ecs/terraform.tfstate'
|
||||
TF_DDB_TABLE = 'nvhi-atsila-locks'
|
||||
|
||||
// Application variables
|
||||
TF_VAR_cluster_name = 'nvhi-atsila-cluster'
|
||||
TF_VAR_vpc_cidr = '10.0.0.0/16'
|
||||
@@ -40,7 +37,6 @@ pipeline {
|
||||
TF_VAR_key_pair_name = 'nvhi-atsila-deployer'
|
||||
TF_VAR_jenkins_ip_cidr = "38.110.1.139/32"
|
||||
TF_VAR_aws_region = "${AWS_REGION}"
|
||||
|
||||
// Enhanced deployment tracking
|
||||
IMAGE_TAG = "v1.0.${BUILD_NUMBER}"
|
||||
DEPLOYMENT_TYPE = "APPLICATION"
|
||||
@@ -56,7 +52,6 @@ pipeline {
|
||||
checkout scm
|
||||
|
||||
script {
|
||||
// Enhanced change detection with security implications
|
||||
def infrastructureFiles = sh(
|
||||
script: '''
|
||||
if git rev-parse HEAD~1 >/dev/null 2>&1; then
|
||||
@@ -68,7 +63,6 @@ pipeline {
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
// Fixed: Proper deployment type assignment
|
||||
if (params.FORCE_INFRASTRUCTURE_DEPLOY) {
|
||||
env.DEPLOYMENT_TYPE = "INFRASTRUCTURE"
|
||||
echo "🚨 FORCED: Infrastructure deployment requested via parameter"
|
||||
@@ -81,7 +75,6 @@ pipeline {
|
||||
echo "✅ SECURITY: Application-only deployment - using restricted permissions"
|
||||
}
|
||||
|
||||
// Professional audit logging
|
||||
def gitCommit = sh(returnStdout: true, script: 'git rev-parse HEAD').trim()
|
||||
def gitAuthor = sh(returnStdout: true, script: 'git log -1 --pretty=format:"%an"').trim()
|
||||
|
||||
@@ -97,7 +90,6 @@ pipeline {
|
||||
echo " • Security Model: Principle of Least Privilege"
|
||||
echo " • Timestamp: ${new Date()}"
|
||||
|
||||
// Archive deployment metadata for compliance
|
||||
writeFile file: 'deployment-audit.json', text: """{
|
||||
"build_number": "${BUILD_NUMBER}",
|
||||
"deployment_type": "${env.DEPLOYMENT_TYPE}",
|
||||
@@ -126,12 +118,7 @@ pipeline {
|
||||
script {
|
||||
def scannerHome = tool 'SonarQubeScanner'
|
||||
withSonarQubeEnv('SonarQube') {
|
||||
sh """
|
||||
${scannerHome}/bin/sonar-scanner \\
|
||||
-Dsonar.projectKey=nvhi-atsila-microservice \\
|
||||
-Dsonar.sources=. \\
|
||||
-Dsonar.projectVersion=${BUILD_NUMBER}
|
||||
"""
|
||||
sh "${scannerHome}/bin/sonar-scanner -Dsonar.projectKey=nvhi-atsila-microservice -Dsonar.sources=. -Dsonar.projectVersion=${BUILD_NUMBER}"
|
||||
}
|
||||
echo "✅ SECURITY: Code quality and security scan completed"
|
||||
}
|
||||
@@ -166,28 +153,22 @@ pipeline {
|
||||
|
||||
stage('Secure Container Build & Registry') {
|
||||
steps {
|
||||
withCredentials([[
|
||||
$class: 'AmazonWebServicesCredentialsBinding',
|
||||
credentialsId: env.AWS_CRED_ID
|
||||
]]) {
|
||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
||||
script {
|
||||
echo "🔐 SECURITY: Using ECR for secure, AWS-native container registry"
|
||||
|
||||
// Get AWS Account ID safely for credential masking
|
||||
def awsAccountId = env.AWS_ACCOUNT_ID
|
||||
|
||||
sh '''
|
||||
sh """
|
||||
echo "🔐 Authenticating with ECR using temporary credentials..."
|
||||
aws ecr get-login-password --region $AWS_REGION \\
|
||||
| docker login --username AWS --password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com
|
||||
'''
|
||||
aws ecr get-login-password --region ${AWS_REGION} | docker login --username AWS --password-stdin ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com
|
||||
"""
|
||||
|
||||
echo "🐳 Building secure container with metadata..."
|
||||
def img = docker.build("${awsAccountId}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG}")
|
||||
|
||||
echo "📤 Pushing to secure ECR registry..."
|
||||
img.push()
|
||||
img.push("latest") // Also tag as latest for convenience
|
||||
img.push("latest")
|
||||
|
||||
echo "✅ SECURITY: Container built and pushed to ECR successfully"
|
||||
echo " Image: ${awsAccountId}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG}"
|
||||
@@ -197,140 +178,25 @@ pipeline {
|
||||
}
|
||||
}
|
||||
|
||||
stage('Bootstrap Backend Infrastructure') {
|
||||
steps {
|
||||
withCredentials([[
|
||||
$class: 'AmazonWebServicesCredentialsBinding',
|
||||
credentialsId: env.AWS_CRED_ID
|
||||
]]) {
|
||||
dir('terraform-backend') {
|
||||
script {
|
||||
echo "🏗️ SECURITY: Checking backend infrastructure with proper permissions..."
|
||||
|
||||
def backendExists = sh(
|
||||
script: '''
|
||||
echo "🔒 SECURITY: Verifying backend infrastructure access..."
|
||||
if aws s3api head-bucket --bucket $TF_BACKEND_BUCKET 2>/dev/null && \\
|
||||
aws dynamodb describe-table --table-name $TF_DDB_TABLE 2>/dev/null; then
|
||||
echo "true"
|
||||
else
|
||||
echo "false"
|
||||
fi
|
||||
''',
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
if (backendExists == "false") {
|
||||
echo "🚨 SECURITY WARNING: Backend infrastructure missing - would require elevated permissions"
|
||||
echo "📦 In production: This would trigger infrastructure admin role"
|
||||
echo "🔐 For demo: Simulating backend creation check..."
|
||||
} else {
|
||||
echo "✅ SECURITY: Backend infrastructure verified and accessible"
|
||||
echo " S3 Bucket: ${TF_BACKEND_BUCKET}"
|
||||
echo " DynamoDB Table: ${TF_DDB_TABLE}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Infrastructure State Management') {
|
||||
steps {
|
||||
withCredentials([[
|
||||
$class: 'AmazonWebServicesCredentialsBinding',
|
||||
credentialsId: env.AWS_CRED_ID
|
||||
]]) {
|
||||
dir('terraform') {
|
||||
script {
|
||||
if (env.DEPLOYMENT_TYPE == "INFRASTRUCTURE") {
|
||||
echo "🔐 SECURITY ESCALATION: Infrastructure changes detected"
|
||||
echo " In production: Would use infrastructure-admin role"
|
||||
echo " Current: Using deployment role with limited permissions"
|
||||
} else {
|
||||
echo "✅ SECURITY: Application deployment using restricted permissions"
|
||||
}
|
||||
|
||||
echo "🔄 Managing infrastructure state for optimal ECS deployment..."
|
||||
|
||||
sh '''
|
||||
# Initialize with secure remote backend
|
||||
terraform init \\
|
||||
-backend-config="bucket=${TF_BACKEND_BUCKET}" \\
|
||||
-backend-config="key=${TF_BACKEND_PREFIX}" \\
|
||||
-backend-config="region=${AWS_REGION}" \\
|
||||
-backend-config="dynamodb_table=${TF_DDB_TABLE}"
|
||||
|
||||
# Enterprise security: Backup current state
|
||||
echo "💾 Creating secure state backup for disaster recovery..."
|
||||
terraform state pull > "secure-state-backup-${BUILD_NUMBER}.json"
|
||||
|
||||
# Upgrade providers to handle version conflicts
|
||||
echo "⬆️ Upgrading providers with security validation..."
|
||||
terraform init -upgrade
|
||||
|
||||
# Create execution plan for ECS infrastructure
|
||||
echo "📋 Creating infrastructure plan for ECS cluster and networking..."
|
||||
terraform plan -out="secure-tfplan-${BUILD_NUMBER}" \\
|
||||
-var="cluster_name=${TF_VAR_cluster_name}" \\
|
||||
-var="vpc_cidr=${TF_VAR_vpc_cidr}" \\
|
||||
-var="public_subnets=${TF_VAR_public_subnets}" \\
|
||||
-var="instance_type=${TF_VAR_instance_type}" \\
|
||||
-var="key_pair_name=${TF_VAR_key_pair_name}" \\
|
||||
-var="jenkins_ip_cidr=${TF_VAR_jenkins_ip_cidr}" \\
|
||||
-var="aws_region=${TF_VAR_aws_region}"
|
||||
'''
|
||||
|
||||
// Archive state backup and plan for enterprise audit trail
|
||||
archiveArtifacts artifacts: "secure-state-backup-${BUILD_NUMBER}.json,secure-tfplan-${BUILD_NUMBER}",
|
||||
fingerprint: true,
|
||||
allowEmptyArchive: false
|
||||
|
||||
echo "✅ SECURITY: Infrastructure planning completed with full audit trail"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Infrastructure Readiness Check') {
|
||||
steps {
|
||||
withCredentials([[
|
||||
$class: 'AmazonWebServicesCredentialsBinding',
|
||||
credentialsId: env.AWS_CRED_ID
|
||||
]]) {
|
||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
||||
script {
|
||||
echo "🔍 SECURITY: Checking if infrastructure is ready for deployment..."
|
||||
|
||||
// Check if parameter forces infrastructure deployment
|
||||
if (params.FORCE_INFRASTRUCTURE_DEPLOY) {
|
||||
echo "🚨 FORCED: Infrastructure deployment requested via parameter"
|
||||
env.DEPLOYMENT_TYPE = "INFRASTRUCTURE"
|
||||
currentBuild.description = "INFRASTRUCTURE (forced) | ${env.IMAGE_TAG}"
|
||||
return
|
||||
}
|
||||
|
||||
// Check if ECS service exists
|
||||
def serviceExists = sh(
|
||||
script: '''
|
||||
if aws ecs describe-services --cluster nvhi-atsila-cluster --services nvhi-atsila-cluster-service --region us-east-2 2>/dev/null | grep -q "ACTIVE"; then
|
||||
echo "true"
|
||||
else
|
||||
echo "false"
|
||||
fi
|
||||
''',
|
||||
script: "aws ecs describe-services --cluster ${TF_VAR_cluster_name} --services ${TF_VAR_cluster_name}-service --region ${AWS_REGION} 2>/dev/null | grep -q 'ACTIVE' && echo 'true' || echo 'false'",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
// Check container instance count
|
||||
def instanceCount = sh(
|
||||
script: """
|
||||
aws ecs list-container-instances \\
|
||||
--cluster ${TF_VAR_cluster_name} \\
|
||||
--region ${AWS_REGION} \\
|
||||
--query 'length(containerInstanceArns)' \\
|
||||
--output text 2>/dev/null || echo "0"
|
||||
""",
|
||||
script: "aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION} --query 'length(containerInstanceArns)' --output text 2>/dev/null || echo '0'",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
@@ -338,8 +204,6 @@ pipeline {
|
||||
echo "🚨 SECURITY NOTICE: Infrastructure not ready - forcing deployment"
|
||||
echo " Service Exists: ${serviceExists}"
|
||||
echo " Container Instances: ${instanceCount}"
|
||||
echo " This is normal for first deployment or after infrastructure cleanup"
|
||||
|
||||
env.DEPLOYMENT_TYPE = "INFRASTRUCTURE"
|
||||
currentBuild.description = "INFRASTRUCTURE (auto-detected) | ${env.IMAGE_TAG}"
|
||||
}
|
||||
@@ -348,7 +212,6 @@ pipeline {
|
||||
echo " ECS Service Exists: ${serviceExists}"
|
||||
echo " Container Instances: ${instanceCount}"
|
||||
echo " Final Deployment Type: ${env.DEPLOYMENT_TYPE}"
|
||||
echo " Security Decision: ${env.DEPLOYMENT_TYPE == 'INFRASTRUCTURE' ? 'Infrastructure deployment required' : 'Application-only deployment'}"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -359,10 +222,7 @@ pipeline {
|
||||
expression { env.DEPLOYMENT_TYPE == "INFRASTRUCTURE" }
|
||||
}
|
||||
steps {
|
||||
withCredentials([[
|
||||
$class: 'AmazonWebServicesCredentialsBinding',
|
||||
credentialsId: env.AWS_CRED_ID
|
||||
]]) {
|
||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
||||
dir('terraform') {
|
||||
script {
|
||||
echo "🚨 SECURITY NOTICE: Infrastructure deployment requested"
|
||||
@@ -371,20 +231,10 @@ pipeline {
|
||||
echo "🚀 Attempting infrastructure deployment..."
|
||||
|
||||
sh """
|
||||
# Apply the planned changes with security logging
|
||||
echo "🔄 Applying infrastructure changes..."
|
||||
terraform apply "secure-tfplan-${BUILD_NUMBER}"
|
||||
|
||||
# Verify no unexpected drift with security validation
|
||||
echo "🔍 Verifying deployment consistency and security compliance..."
|
||||
terraform plan -detailed-exitcode \\
|
||||
-var="cluster_name=${TF_VAR_cluster_name}" \\
|
||||
-var="vpc_cidr=${TF_VAR_vpc_cidr}" \\
|
||||
-var="public_subnets=${TF_VAR_public_subnets}" \\
|
||||
-var="instance_type=${TF_VAR_instance_type}" \\
|
||||
-var="key_pair_name=${TF_VAR_key_pair_name}" \\
|
||||
-var="jenkins_ip_cidr=${TF_VAR_jenkins_ip_cidr}" \\
|
||||
-var="aws_region=${TF_VAR_aws_region}" || echo "⚠️ Infrastructure drift detected - review required"
|
||||
terraform plan -detailed-exitcode -var="cluster_name=${TF_VAR_cluster_name}" -var="vpc_cidr=${TF_VAR_vpc_cidr}" -var="public_subnets=${TF_VAR_public_subnets}" -var="instance_type=${TF_VAR_instance_type}" -var="key_pair_name=${TF_VAR_key_pair_name}" -var="jenkins_ip_cidr=${TF_VAR_jenkins_ip_cidr}" -var="aws_region=${TF_VAR_aws_region}" || echo "⚠️ Infrastructure drift detected - review required"
|
||||
"""
|
||||
|
||||
echo "✅ SECURITY: Infrastructure deployment completed with compliance verification"
|
||||
@@ -399,42 +249,20 @@ pipeline {
|
||||
expression { env.DEPLOYMENT_TYPE == "INFRASTRUCTURE" }
|
||||
}
|
||||
steps {
|
||||
withCredentials([[
|
||||
$class: 'AmazonWebServicesCredentialsBinding',
|
||||
credentialsId: env.AWS_CRED_ID
|
||||
]]) {
|
||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
||||
script {
|
||||
echo "⏳ Waiting for ECS agents to register with cluster..."
|
||||
timeout(time: 5, unit: 'MINUTES') {
|
||||
waitUntil {
|
||||
def count = sh(
|
||||
script: """
|
||||
aws ecs list-container-instances \\
|
||||
--cluster ${TF_VAR_cluster_name} \\
|
||||
--region ${AWS_REGION} \\
|
||||
--query 'length(containerInstanceArns)' \\
|
||||
--output text 2>/dev/null || echo "0"
|
||||
""",
|
||||
script: "aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION} --query 'length(containerInstanceArns)' --output text 2>/dev/null || echo '0'",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
if (count != "0" && count != "null") {
|
||||
echo "✅ ECS agents registered: ${count} instance(s)"
|
||||
|
||||
// Verify the instances are actually ACTIVE
|
||||
def activeCount = sh(
|
||||
script: """
|
||||
aws ecs describe-container-instances \\
|
||||
--cluster ${TF_VAR_cluster_name} \\
|
||||
--container-instances \$(aws ecs list-container-instances \\
|
||||
--cluster ${TF_VAR_cluster_name} \\
|
||||
--region ${AWS_REGION} \\
|
||||
--query 'containerInstanceArns[*]' \\
|
||||
--output text) \\
|
||||
--region ${AWS_REGION} \\
|
||||
--query 'length(containerInstances[?status==\`ACTIVE\`])' \\
|
||||
--output text 2>/dev/null || echo "0"
|
||||
""",
|
||||
script: "aws ecs describe-container-instances --cluster ${TF_VAR_cluster_name} --container-instances \$(aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION} --query 'containerInstanceArns[*]' --output text) --region ${AWS_REGION} --query 'length(containerInstances[?status==\\`ACTIVE\\`])' --output text 2>/dev/null || echo '0'",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
@@ -463,7 +291,6 @@ pipeline {
|
||||
stage('Configure EC2 Instance via SSM') {
|
||||
when {
|
||||
expression {
|
||||
// Only run if we have EC2 instances from terraform
|
||||
def hasInstances = false
|
||||
try {
|
||||
def instanceId = sh(
|
||||
@@ -478,14 +305,9 @@ pipeline {
|
||||
}
|
||||
}
|
||||
steps {
|
||||
withCredentials([[
|
||||
$class: 'AmazonWebServicesCredentialsBinding',
|
||||
credentialsId: env.AWS_CRED_ID
|
||||
]]) {
|
||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
||||
script {
|
||||
echo "🔧 ENTERPRISE: Configuring EC2 instance via SSM (no SSH required)"
|
||||
|
||||
// Get instance ID from Terraform output
|
||||
def instanceId = ""
|
||||
def ec2_ip = ""
|
||||
|
||||
@@ -506,20 +328,11 @@ pipeline {
|
||||
}
|
||||
|
||||
echo "📍 Target Instance: ${instanceId} (${ec2_ip})"
|
||||
|
||||
// Wait for SSM agent to be ready
|
||||
echo "⏳ Waiting for SSM agent to be ready..."
|
||||
timeout(time: 10, unit: 'MINUTES') {
|
||||
waitUntil {
|
||||
script {
|
||||
def ssmStatus = sh(
|
||||
script: """
|
||||
aws ssm describe-instance-information \\
|
||||
--filters "Key=InstanceIds,Values=${instanceId}" \\
|
||||
--region ${AWS_REGION} \\
|
||||
--query 'InstanceInformationList[0].PingStatus' \\
|
||||
--output text 2>/dev/null || echo "Offline"
|
||||
""",
|
||||
script: "aws ssm describe-instance-information --filters \"Key=InstanceIds,Values=${instanceId}\" --region ${AWS_REGION} --query 'InstanceInformationList[0].PingStatus' --output text 2>/dev/null || echo 'Offline'",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
@@ -533,100 +346,33 @@ pipeline {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Configure ECS agent via SSM
|
||||
echo "🔧 Configuring ECS agent via SSM..."
|
||||
def commandId = sh(
|
||||
script: """
|
||||
aws ssm send-command \\
|
||||
--instance-ids ${instanceId} \\
|
||||
--document-name "AWS-RunShellScript" \\
|
||||
--parameters 'commands=[
|
||||
"echo \\"=== ECS Configuration via SSM ===\\"",
|
||||
"echo \\"Cluster: ${TF_VAR_cluster_name}\\"",
|
||||
"echo \\"Time: \$(date)\\"",
|
||||
"echo \\"Instance: \$(hostname)\\"",
|
||||
"sudo systemctl status ecs --no-pager",
|
||||
"sudo systemctl status docker --no-pager",
|
||||
"curl -s http://localhost:51678/v1/metadata || echo \\"ECS agent not ready\\"",
|
||||
"sudo systemctl restart ecs",
|
||||
"sleep 15",
|
||||
"sudo systemctl status ecs --no-pager",
|
||||
"curl -s http://localhost:51678/v1/metadata || echo \\"ECS agent still starting\\"",
|
||||
"echo \\"=== Configuration completed ===\\""
|
||||
]' \\
|
||||
--region ${AWS_REGION} \\
|
||||
--output text \\
|
||||
--query 'Command.CommandId'
|
||||
""",
|
||||
script: "aws ssm send-command --instance-ids ${instanceId} --document-name \"AWS-RunShellScript\" --parameters 'commands=[\"echo === ECS Configuration via SSM ===\",\"echo Cluster: ${TF_VAR_cluster_name}\",\"echo Time: \$(date)\",\"echo Instance: \$(hostname)\",\"sudo systemctl status ecs --no-pager\",\"sudo systemctl status docker --no-pager\",\"curl -s http://localhost:51678/v1/metadata || echo \\\"ECS agent not ready\\\"\",\"sudo systemctl restart ecs\",\"sleep 15\",\"sudo systemctl status ecs --no-pager\",\"curl -s http://localhost:51678/v1/metadata || echo \\\"ECS agent still starting\\\"\",\"echo === Configuration completed ===\"]' --region ${AWS_REGION} --output text --query 'Command.CommandId'",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
echo "📋 SSM Command ID: ${commandId}"
|
||||
|
||||
// Wait for command completion
|
||||
echo "⏳ Waiting for SSM command completion..."
|
||||
sh """
|
||||
aws ssm wait command-executed \\
|
||||
--command-id ${commandId} \\
|
||||
--instance-id ${instanceId} \\
|
||||
--region ${AWS_REGION}
|
||||
"""
|
||||
sh "aws ssm wait command-executed --command-id ${commandId} --instance-id ${instanceId} --region ${AWS_REGION}"
|
||||
|
||||
// Get command output
|
||||
echo "📋 SSM Command Output:"
|
||||
sh """
|
||||
aws ssm get-command-invocation \\
|
||||
--command-id ${commandId} \\
|
||||
--instance-id ${instanceId} \\
|
||||
--region ${AWS_REGION} \\
|
||||
--query 'StandardOutputContent' \\
|
||||
--output text
|
||||
"""
|
||||
sh "aws ssm get-command-invocation --command-id ${commandId} --instance-id ${instanceId} --region ${AWS_REGION} --query 'StandardOutputContent' --output text"
|
||||
|
||||
// Check for any errors
|
||||
def commandStatus = sh(
|
||||
script: """
|
||||
aws ssm get-command-invocation \\
|
||||
--command-id ${commandId} \\
|
||||
--instance-id ${instanceId} \\
|
||||
--region ${AWS_REGION} \\
|
||||
--query 'Status' \\
|
||||
--output text
|
||||
""",
|
||||
script: "aws ssm get-command-invocation --command-id ${commandId} --instance-id ${instanceId} --region ${AWS_REGION} --query 'Status' --output text",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
if (commandStatus != "Success") {
|
||||
echo "❌ SSM Command failed with status: ${commandStatus}"
|
||||
// Get error output
|
||||
sh """
|
||||
echo "Error Output:"
|
||||
aws ssm get-command-invocation \\
|
||||
--command-id ${commandId} \\
|
||||
--instance-id ${instanceId} \\
|
||||
--region ${AWS_REGION} \\
|
||||
--query 'StandardErrorContent' \\
|
||||
--output text
|
||||
"""
|
||||
// Don't fail the build, continue with deployment
|
||||
sh "echo 'Error Output:'; aws ssm get-command-invocation --command-id ${commandId} --instance-id ${instanceId} --region ${AWS_REGION} --query 'StandardErrorContent' --output text"
|
||||
echo "⚠️ SSM configuration had issues but continuing with deployment"
|
||||
}
|
||||
|
||||
echo "✅ ENTERPRISE: EC2 instance configured via SSM successfully"
|
||||
echo """
|
||||
🔐 SSM Session Manager Access:
|
||||
|
||||
To connect to the instance for troubleshooting:
|
||||
|
||||
aws ssm start-session \\
|
||||
--target ${instanceId} \\
|
||||
--region ${AWS_REGION}
|
||||
|
||||
Instance ID: ${instanceId}
|
||||
Instance IP: ${ec2_ip}
|
||||
"""
|
||||
echo "🔐 SSM Session Manager Access: To connect to the instance for troubleshooting, use: aws ssm start-session --target ${instanceId} --region ${AWS_REGION}"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -634,26 +380,18 @@ pipeline {
|
||||
|
||||
stage('Deploy Application to ECS') {
|
||||
steps {
|
||||
withCredentials([[
|
||||
$class: 'AmazonWebServicesCredentialsBinding',
|
||||
credentialsId: env.AWS_CRED_ID
|
||||
]]) {
|
||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
||||
script {
|
||||
echo "🚢 SECURITY: Deploying application to ECS with ECR integration..."
|
||||
echo "📦 ARCHITECTURE: Using ECR for secure, AWS-native container delivery"
|
||||
echo "🔐 User can register tasks and update services, but cannot modify infrastructure"
|
||||
|
||||
// Get AWS Account ID and Git commit safely for credential masking
|
||||
def awsAccountId = env.AWS_ACCOUNT_ID
|
||||
def gitCommitHash = sh(script: 'git rev-parse HEAD', returnStdout: true).trim()
|
||||
|
||||
// Create task definition JSON safely
|
||||
def taskDefinition = """[{
|
||||
"name":"health-workload",
|
||||
"image":"${awsAccountId}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG}",
|
||||
"essential":true,
|
||||
"memory":512,
|
||||
"portMappings":[{"containerPort":8080,"hostPort":8080}],
|
||||
"name": "health-workload",
|
||||
"image": "${awsAccountId}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG}",
|
||||
"essential": true,
|
||||
"memory": 512,
|
||||
"portMappings": [{"containerPort": 8080, "hostPort": 8080}],
|
||||
"logConfiguration": {
|
||||
"logDriver": "awslogs",
|
||||
"options": {
|
||||
@@ -671,83 +409,35 @@ pipeline {
|
||||
]
|
||||
}]"""
|
||||
|
||||
// Write task definition to file for safety
|
||||
writeFile file: 'task-definition.json', text: taskDefinition
|
||||
|
||||
sh """
|
||||
# Register new task definition with ECR image
|
||||
aws ecs register-task-definition \\
|
||||
--family ${TF_VAR_cluster_name} \\
|
||||
--network-mode bridge \\
|
||||
--container-definitions file://task-definition.json \\
|
||||
--region ${AWS_REGION}
|
||||
"""
|
||||
sh "aws ecs register-task-definition --family ${TF_VAR_cluster_name} --network-mode bridge --container-definitions file://task-definition.json --region ${AWS_REGION}"
|
||||
|
||||
// Check if service exists and create/update accordingly
|
||||
def serviceExists = sh(
|
||||
script: """
|
||||
if aws ecs describe-services --cluster ${TF_VAR_cluster_name} --services ${TF_VAR_cluster_name}-service --region ${AWS_REGION} 2>/dev/null | grep -q "ACTIVE"; then
|
||||
echo "true"
|
||||
else
|
||||
echo "false"
|
||||
fi
|
||||
""",
|
||||
script: "aws ecs describe-services --cluster ${TF_VAR_cluster_name} --services ${TF_VAR_cluster_name}-service --region ${AWS_REGION} 2>/dev/null | grep -q 'ACTIVE' && echo 'true' || echo 'false'",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
if (serviceExists == "false") {
|
||||
echo "🆕 Creating new ECS service..."
|
||||
sh """
|
||||
# Create new service since it doesn't exist
|
||||
aws ecs create-service \\
|
||||
--cluster ${TF_VAR_cluster_name} \\
|
||||
--service-name ${TF_VAR_cluster_name}-service \\
|
||||
--task-definition ${TF_VAR_cluster_name} \\
|
||||
--desired-count 1 \\
|
||||
--launch-type EC2 \\
|
||||
--region ${AWS_REGION}
|
||||
"""
|
||||
sh "aws ecs create-service --cluster ${TF_VAR_cluster_name} --service-name ${TF_VAR_cluster_name}-service --task-definition ${TF_VAR_cluster_name} --desired-count 1 --launch-type EC2 --region ${AWS_REGION}"
|
||||
} else {
|
||||
echo "🔄 Updating existing ECS service..."
|
||||
sh """
|
||||
# Update existing service
|
||||
aws ecs update-service \\
|
||||
--cluster ${TF_VAR_cluster_name} \\
|
||||
--service ${TF_VAR_cluster_name}-service \\
|
||||
--force-new-deployment \\
|
||||
--region ${AWS_REGION}
|
||||
"""
|
||||
sh "aws ecs update-service --cluster ${TF_VAR_cluster_name} --service ${TF_VAR_cluster_name}-service --force-new-deployment --region ${AWS_REGION}"
|
||||
}
|
||||
|
||||
// Wait for deployment with better timeout handling
|
||||
echo "⏳ Waiting for secure service deployment to stabilize..."
|
||||
timeout(time: 10, unit: 'MINUTES') {
|
||||
try {
|
||||
sh """
|
||||
aws ecs wait services-stable \\
|
||||
--cluster ${TF_VAR_cluster_name} \\
|
||||
--services ${TF_VAR_cluster_name}-service \\
|
||||
--region ${AWS_REGION}
|
||||
"""
|
||||
sh "aws ecs wait services-stable --cluster ${TF_VAR_cluster_name} --services ${TF_VAR_cluster_name}-service --region ${AWS_REGION}"
|
||||
echo "✅ SECURITY: Application deployed successfully with ECR integration"
|
||||
} catch (Exception e) {
|
||||
echo "⚠️ Service deployment timeout - checking status..."
|
||||
|
||||
// Get service status even if wait times out
|
||||
def serviceStatus = sh(
|
||||
script: """
|
||||
aws ecs describe-services \\
|
||||
--cluster ${TF_VAR_cluster_name} \\
|
||||
--services ${TF_VAR_cluster_name}-service \\
|
||||
--region ${AWS_REGION} \\
|
||||
--query 'services[0].deployments[0].rolloutState' \\
|
||||
--output text
|
||||
""",
|
||||
script: "aws ecs describe-services --cluster ${TF_VAR_cluster_name} --services ${TF_VAR_cluster_name}-service --region ${AWS_REGION} --query 'services[0].deployments[0].rolloutState' --output text",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
echo "Service deployment state: ${serviceStatus}"
|
||||
|
||||
if (serviceStatus == "COMPLETED") {
|
||||
echo "✅ Deployment completed successfully despite timeout"
|
||||
} else {
|
||||
@@ -756,7 +446,6 @@ pipeline {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
echo "✅ SECURITY: Application deployment initiated successfully"
|
||||
}
|
||||
}
|
||||
@@ -787,12 +476,10 @@ pipeline {
|
||||
if (ec2_ip != "unknown" && ec2_ip != "" && ec2_ip != "null") {
|
||||
timeout(time: 5, unit: 'MINUTES') {
|
||||
waitUntil {
|
||||
script {
|
||||
def response = sh(
|
||||
script: "curl -s -o /dev/null -w '%{http_code}' http://${ec2_ip}:8080/health || echo '000'",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
echo "🔍 SECURITY: Health check response: ${response}"
|
||||
if (response == "200") {
|
||||
echo "✅ SECURITY: Application health check passed - service is secure and operational"
|
||||
@@ -804,7 +491,6 @@ pipeline {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
echo "⚠️ SECURITY: EC2 IP not available - skipping health check"
|
||||
echo "⚠️ This may happen if infrastructure wasn't deployed in this run"
|
||||
@@ -833,15 +519,9 @@ pipeline {
|
||||
sh """
|
||||
echo "🔒 SECURITY: Testing application endpoints and security headers..."
|
||||
curl -I http://${ec2_ip}:8080/health || echo "Service may still be starting..."
|
||||
|
||||
echo "🔍 SECURITY: Verifying ECR image deployment and metadata..."
|
||||
curl -s http://${ec2_ip}:8080/health || echo "Application responding"
|
||||
|
||||
echo "🛡️ SECURITY: Validating network security and access controls..."
|
||||
echo " Testing only allowed ports are accessible"
|
||||
echo " Verifying ECR integration working correctly"
|
||||
echo " Confirming SSM-based access security model"
|
||||
|
||||
echo "✅ SECURITY: All smoke tests and security validations passed"
|
||||
"""
|
||||
} else {
|
||||
@@ -854,50 +534,23 @@ pipeline {
|
||||
|
||||
stage('Container Instance Validation') {
|
||||
steps {
|
||||
withCredentials([[
|
||||
$class: 'AmazonWebServicesCredentialsBinding',
|
||||
credentialsId: env.AWS_CRED_ID
|
||||
]]) {
|
||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
||||
script {
|
||||
echo "🔍 Validating ECS container instances..."
|
||||
|
||||
def instanceArns = sh(
|
||||
script: """
|
||||
aws ecs list-container-instances \\
|
||||
--cluster ${TF_VAR_cluster_name} \\
|
||||
--region ${AWS_REGION} \\
|
||||
--query 'containerInstanceArns' \\
|
||||
--output json 2>/dev/null || echo '[]'
|
||||
""",
|
||||
script: "aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION} --query 'containerInstanceArns' --output json 2>/dev/null || echo '[]'",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
if (instanceArns != '[]' && instanceArns != 'null') {
|
||||
echo "📋 Container Instances Found:"
|
||||
sh """
|
||||
aws ecs describe-container-instances \\
|
||||
--cluster ${TF_VAR_cluster_name} \\
|
||||
--container-instances ${instanceArns} \\
|
||||
--region ${AWS_REGION} \\
|
||||
--query 'containerInstances[*].[containerInstanceArn,status,runningTasksCount,pendingTasksCount]' \\
|
||||
--output table
|
||||
"""
|
||||
sh "aws ecs describe-container-instances --cluster ${TF_VAR_cluster_name} --container-instances ${instanceArns} --region ${AWS_REGION} --query 'containerInstances[*].[containerInstanceArn,status,runningTasksCount,pendingTasksCount]' --output table"
|
||||
|
||||
// Check running tasks
|
||||
def runningTasks = sh(
|
||||
script: """
|
||||
aws ecs list-tasks \\
|
||||
--cluster ${TF_VAR_cluster_name} \\
|
||||
--desired-status RUNNING \\
|
||||
--region ${AWS_REGION} \\
|
||||
--query 'length(taskArns)' \\
|
||||
--output text 2>/dev/null || echo "0"
|
||||
""",
|
||||
script: "aws ecs list-tasks --cluster ${TF_VAR_cluster_name} --desired-status RUNNING --region ${AWS_REGION} --query 'length(taskArns)' --output text 2>/dev/null || echo '0'",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
echo "📊 Running Tasks: ${runningTasks}"
|
||||
|
||||
if (runningTasks == "0") {
|
||||
echo "⚠️ No running tasks found - deployment may still be in progress"
|
||||
} else {
|
||||
@@ -918,13 +571,8 @@ pipeline {
|
||||
always {
|
||||
script {
|
||||
echo "📊 SECURITY: Collecting deployment artifacts and performing secure cleanup..."
|
||||
|
||||
// Archive comprehensive deployment artifacts for audit
|
||||
archiveArtifacts artifacts: 'deployment-audit.json,task-definition.json', allowEmptyArchive: true
|
||||
|
||||
// Secure workspace cleanup
|
||||
cleanWs(deleteDirs: true, notFailBuild: true)
|
||||
|
||||
echo "🔒 SECURITY: Deployment artifacts archived and workspace securely cleaned"
|
||||
}
|
||||
}
|
||||
@@ -948,19 +596,9 @@ pipeline {
|
||||
).trim()
|
||||
gitCommitHash = sh(script: 'git rev-parse HEAD 2>/dev/null || echo "unknown"', returnStdout: true).trim().take(8)
|
||||
|
||||
withCredentials([[
|
||||
$class: 'AmazonWebServicesCredentialsBinding',
|
||||
credentialsId: env.AWS_CRED_ID
|
||||
]]) {
|
||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
||||
runningTasks = sh(
|
||||
script: """
|
||||
aws ecs list-tasks \\
|
||||
--cluster ${TF_VAR_cluster_name} \\
|
||||
--desired-status RUNNING \\
|
||||
--region ${AWS_REGION} \\
|
||||
--query 'length(taskArns)' \\
|
||||
--output text 2>/dev/null || echo "0"
|
||||
""",
|
||||
script: "aws ecs list-tasks --cluster ${TF_VAR_cluster_name} --desired-status RUNNING --region ${AWS_REGION} --query 'length(taskArns)' --output text 2>/dev/null || echo '0'",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
}
|
||||
@@ -979,14 +617,13 @@ pipeline {
|
||||
echo " • Application Version: ${IMAGE_TAG}"
|
||||
echo " • Application URL: http://${ec2_ip}:8080"
|
||||
echo " • Health Endpoint: http://${ec2_ip}:8080/health"
|
||||
echo " • ECR Image: ${env.AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG}"
|
||||
echo " • ECR Image: ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG}"
|
||||
echo " • Security Compliance: ✅ PASSED (No SSH keys required)"
|
||||
echo " • Git Commit: ${gitCommitHash}"
|
||||
echo " • Deployment Type: ${env.DEPLOYMENT_TYPE}"
|
||||
echo " • Running Tasks: ${runningTasks}"
|
||||
echo " • Instance Access: SSM Session Manager (${instanceId}) ✅"
|
||||
echo " • Cost Optimization: Free tier friendly ✅"
|
||||
echo ""
|
||||
echo "🔐 SSM ACCESS COMMANDS:"
|
||||
echo " • Connect to instance: aws ssm start-session --target ${instanceId} --region ${AWS_REGION}"
|
||||
echo " • View ECS logs: aws logs tail /ecs/${TF_VAR_cluster_name} --follow --region ${AWS_REGION}"
|
||||
@@ -1003,18 +640,9 @@ pipeline {
|
||||
def instanceCount = "unknown"
|
||||
|
||||
try {
|
||||
withCredentials([[
|
||||
$class: 'AmazonWebServicesCredentialsBinding',
|
||||
credentialsId: env.AWS_CRED_ID
|
||||
]]) {
|
||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
||||
instanceCount = sh(
|
||||
script: """
|
||||
aws ecs list-container-instances \\
|
||||
--cluster ${TF_VAR_cluster_name} \\
|
||||
--region ${AWS_REGION} \\
|
||||
--query 'length(containerInstanceArns)' \\
|
||||
--output text 2>/dev/null || echo "unknown"
|
||||
""",
|
||||
script: "aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION} --query 'length(containerInstanceArns)' --output text 2>/dev/null || echo 'unknown'",
|
||||
returnStdout: true
|
||||
).trim()
|
||||
}
|
||||
@@ -1028,28 +656,22 @@ pipeline {
|
||||
echo " • Deployment Type: ${env.DEPLOYMENT_TYPE}"
|
||||
echo " • Container Instances Available: ${instanceCount}"
|
||||
echo " • Failed Stage: ${env.STAGE_NAME ?: 'Unknown'}"
|
||||
echo ""
|
||||
echo "📋 COMMON ISSUES AND SOLUTIONS:"
|
||||
echo ""
|
||||
if (instanceCount == "0" || instanceCount == "unknown") {
|
||||
echo "❌ NO CONTAINER INSTANCES FOUND"
|
||||
echo " • Run with FORCE_INFRASTRUCTURE_DEPLOY=true parameter"
|
||||
echo " • Or check if EC2 instances are terminating unexpectedly"
|
||||
echo " • Verify IAM role has required ECS permissions"
|
||||
}
|
||||
echo ""
|
||||
echo "🔧 TROUBLESHOOTING COMMANDS:"
|
||||
echo " • Check ECS cluster: aws ecs describe-clusters --clusters ${TF_VAR_cluster_name} --region ${AWS_REGION}"
|
||||
echo " • List instances: aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION}"
|
||||
echo " • Check services: aws ecs describe-services --cluster ${TF_VAR_cluster_name} --services ${TF_VAR_cluster_name}-service --region ${AWS_REGION}"
|
||||
echo " • View ECS events: aws ecs describe-services --cluster ${TF_VAR_cluster_name} --services ${TF_VAR_cluster_name}-service --region ${AWS_REGION} --query 'services[0].events[:5]'"
|
||||
echo ""
|
||||
echo "💡 RECOVERY OPTIONS:"
|
||||
echo " 1. Force infrastructure deployment: Run pipeline with FORCE_INFRASTRUCTURE_DEPLOY=true"
|
||||
echo " 2. Check AWS Console for any manually terminated resources"
|
||||
echo " 3. Review Terraform state: May need to run 'terraform refresh' locally"
|
||||
echo " 4. Check CloudWatch logs: /ecs/${TF_VAR_cluster_name}"
|
||||
echo ""
|
||||
echo "📁 ARTIFACTS AVAILABLE:"
|
||||
echo " • Security audit trail: deployment-audit.json"
|
||||
echo " • State backup: secure-state-backup-${BUILD_NUMBER}.json"
|
||||
@@ -1057,11 +679,6 @@ pipeline {
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
|
||||
currentBuild.description = "❌ Failed: ${env.DEPLOYMENT_TYPE} | ${env.STAGE_NAME}"
|
||||
|
||||
// Optional: Send notification
|
||||
// mail to: 'devops-team@example.com',
|
||||
// subject: "Jenkins Build Failed: ${env.JOB_NAME} #${env.BUILD_NUMBER}",
|
||||
// body: "The build failed at stage: ${env.STAGE_NAME}\n\nCheck the console output: ${env.BUILD_URL}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user