automated terminal push

This commit is contained in:
lenape
2025-07-12 19:10:42 +00:00
parent 97bf3fb9a1
commit 5e1a668093

328
Jenkinsfile vendored
View File

@@ -1,19 +1,6 @@
pipeline {
agent any
parameters {
booleanParam(
name: 'DESTROY_INFRASTRUCTURE',
defaultValue: false,
description: 'WARNING: This will destroy all infrastructure. Only use for testing.'
)
choice(
name: 'ENVIRONMENT',
choices: ['staging', 'production'],
description: 'Target environment for deployment'
)
}
environment {
GITEA_REPO = 'https://code.jacquesingram.online/lenape/nvhi-atsila-microservice.git'
GITEA_CREDS = '52ee0829-6e65-4951-925b-4186254c3f21'
@@ -50,22 +37,6 @@ pipeline {
}
stages {
stage('Pre-flight Checks') {
steps {
script {
// Enterprise-grade pre-checks
echo "🔍 Running pre-flight checks..."
echo "Environment: ${params.ENVIRONMENT}"
echo "Build Number: ${BUILD_NUMBER}"
echo "Git Commit: ${env.GIT_COMMIT}"
if (params.DESTROY_INFRASTRUCTURE) {
error("❌ DESTROY_INFRASTRUCTURE is enabled. This pipeline is halted for safety.")
}
}
}
}
stage('Checkout') {
steps {
checkout scm
@@ -74,11 +45,12 @@ pipeline {
// Archive the Git commit for traceability
def gitCommit = sh(returnStdout: true, script: 'git rev-parse HEAD').trim()
currentBuild.description = "Commit: ${gitCommit.take(8)}"
echo "🚀 Starting deployment for commit: ${gitCommit.take(8)}"
}
}
}
stage('Security Scan') {
stage('Security & Quality Checks') {
parallel {
stage('SonarQube Analysis') {
steps {
@@ -96,13 +68,12 @@ pipeline {
}
}
stage('Terraform Security Scan') {
stage('Terraform Validation') {
steps {
script {
echo "🔒 Running Terraform security checks..."
// In enterprise, you'd run tools like Checkov, tfsec, or Snyk
echo "🔒 Running Terraform security and validation checks..."
sh '''
echo "Running terraform validate..."
echo "Validating Terraform configuration..."
cd terraform && terraform init -backend=false
terraform validate
echo "✅ Terraform validation passed"
@@ -113,76 +84,104 @@ pipeline {
}
}
stage('Build & Test') {
parallel {
stage('Docker Build') {
steps {
withCredentials([[
$class: 'AmazonWebServicesCredentialsBinding',
credentialsId: env.AWS_CRED_ID
]]) {
sh '''
# Login to ECR
aws ecr get-login-password --region $AWS_REGION \
| docker login --username AWS --password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com
'''
stage('Build & Push Container') {
steps {
withCredentials([[
$class: 'AmazonWebServicesCredentialsBinding',
credentialsId: env.AWS_CRED_ID
]]) {
sh '''
echo "🔐 Logging into ECR..."
aws ecr get-login-password --region $AWS_REGION \
| docker login --username AWS --password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com
'''
script {
def img = docker.build("${IMAGE_NAME}:${IMAGE_TAG}")
script {
echo "🐳 Building container image..."
def img = docker.build("${IMAGE_NAME}:${IMAGE_TAG}")
// Enterprise: Run container security scan
echo "🔍 Running container security scan..."
// In enterprise: docker run --rm -v /var/run/docker.sock:/var/run/docker.sock aquasec/trivy image ${IMAGE_NAME}:${IMAGE_TAG}
echo "📤 Pushing to ECR..."
img.push()
img.push("latest") // Also tag as latest for convenience
img.push()
img.push("latest") // Also tag as latest
}
}
echo "✅ Container pushed successfully: ${IMAGE_NAME}:${IMAGE_TAG}"
}
}
}
}
stage('Unit Tests') {
steps {
stage('Bootstrap Backend Infrastructure') {
steps {
withCredentials([[
$class: 'AmazonWebServicesCredentialsBinding',
credentialsId: env.AWS_CRED_ID
]]) {
dir('terraform-backend') {
script {
echo "🧪 Running unit tests..."
sh '''
echo "Running Python unit tests..."
# python -m pytest tests/ --junitxml=test-results.xml
echo "✅ All tests passed"
'''
echo "🏗️ Checking backend infrastructure..."
// Check if backend resources exist
def backendExists = sh(
script: '''
if aws s3api head-bucket --bucket $TF_BACKEND_BUCKET 2>/dev/null && \
aws dynamodb describe-table --table-name $TF_DDB_TABLE 2>/dev/null; then
echo "true"
else
echo "false"
fi
''',
returnStdout: true
).trim()
if (backendExists == "false") {
echo "📦 Backend infrastructure doesn't exist. Creating..."
sh '''
terraform init
terraform plan -out=backend.tfplan \
-var="aws_region=$AWS_REGION" \
-var="backend_bucket_name=$TF_BACKEND_BUCKET" \
-var="lock_table_name=$TF_DDB_TABLE"
terraform apply backend.tfplan
'''
echo "✅ Backend infrastructure created successfully"
} else {
echo "✅ Backend infrastructure already exists. Continuing..."
}
}
}
}
}
}
stage('Infrastructure Planning') {
stage('Infrastructure State Management') {
steps {
withCredentials([[
$class: 'AmazonWebServicesCredentialsBinding',
credentialsId: env.AWS_CRED_ID
]]) {
script {
echo "📋 Planning infrastructure changes..."
dir('terraform') {
script {
echo "🔄 Managing infrastructure state and provider upgrades..."
// Backup current state (enterprise practice)
dir('terraform') {
sh '''
# Initialize and backup state
# Initialize with remote backend
terraform init \
-backend-config="bucket=${TF_BACKEND_BUCKET}" \
-backend-config="key=${TF_BACKEND_PREFIX}" \
-backend-config="region=${AWS_REGION}" \
-backend-config="dynamodb_table=${TF_DDB_TABLE}"
# Backup state before changes
terraform state pull > state-backup-${BUILD_NUMBER}.json
# Backup current state (enterprise best practice)
echo "💾 Backing up current state..."
terraform state pull > "state-backup-${BUILD_NUMBER}.json"
# Upgrade providers if needed
# Upgrade providers to handle version conflicts
echo "⬆️ Upgrading providers..."
terraform init -upgrade
# Create execution plan
terraform plan -out=tfplan-${BUILD_NUMBER} \
echo "📋 Creating execution plan..."
terraform plan -out="tfplan-${BUILD_NUMBER}" \
-var="cluster_name=${TF_VAR_cluster_name}" \
-var="vpc_cidr=${TF_VAR_vpc_cidr}" \
-var="public_subnets=${TF_VAR_public_subnets}" \
@@ -192,18 +191,19 @@ pipeline {
-var="aws_region=${TF_VAR_aws_region}"
'''
// Archive the plan for review
archiveArtifacts artifacts: "state-backup-${BUILD_NUMBER}.json,tfplan-${BUILD_NUMBER}", fingerprint: true
// Archive state backup and plan for audit trail
archiveArtifacts artifacts: "state-backup-${BUILD_NUMBER}.json,tfplan-${BUILD_NUMBER}",
fingerprint: true,
allowEmptyArchive: false
echo "✅ Infrastructure planning completed"
}
}
}
}
}
stage('Infrastructure Deployment') {
when {
not { params.DESTROY_INFRASTRUCTURE }
}
stage('Deploy Infrastructure') {
steps {
withCredentials([[
$class: 'AmazonWebServicesCredentialsBinding',
@@ -211,13 +211,14 @@ pipeline {
]]) {
dir('terraform') {
script {
echo "🚀 Deploying infrastructure..."
echo "🚀 Deploying infrastructure changes..."
sh """
# Apply the planned changes
terraform apply tfplan-${BUILD_NUMBER}
terraform apply "tfplan-${BUILD_NUMBER}"
# Verify no drift
# Verify no unexpected drift
echo "🔍 Verifying deployment consistency..."
terraform plan -detailed-exitcode \
-var="cluster_name=${TF_VAR_cluster_name}" \
-var="vpc_cidr=${TF_VAR_vpc_cidr}" \
@@ -225,20 +226,19 @@ pipeline {
-var="instance_type=${TF_VAR_instance_type}" \
-var="key_pair_name=${TF_VAR_key_pair_name}" \
-var="jenkins_ip_cidr=${TF_VAR_jenkins_ip_cidr}" \
-var="aws_region=${TF_VAR_aws_region}" || echo "Infrastructure drift detected"
-var="aws_region=${TF_VAR_aws_region}" || echo "⚠️ Infrastructure drift detected - review required"
"""
echo "✅ Infrastructure deployment completed"
}
}
}
}
}
stage('Application Deployment') {
when {
not { params.DESTROY_INFRASTRUCTURE }
}
stage('Configure & Deploy Application') {
parallel {
stage('Configure Infrastructure') {
stage('Configure EC2 Instance') {
steps {
script {
def ec2_ip = sh(
@@ -255,57 +255,64 @@ pipeline {
inventory: 'ansible/hosts',
credentialsId: env.SSH_CRED_ID
)
echo "✅ EC2 configuration completed"
}
}
stage('Deploy Application') {
stage('Deploy Application to ECS') {
steps {
withCredentials([[
$class: 'AmazonWebServicesCredentialsBinding',
credentialsId: env.AWS_CRED_ID
]]) {
sh """
script {
echo "🚢 Deploying application version ${IMAGE_TAG}..."
# Register new task definition
aws ecs register-task-definition \
--family ${TF_VAR_cluster_name} \
--network-mode bridge \
--container-definitions '[{
"name":"health-workload",
"image":"${IMAGE_NAME}:${IMAGE_TAG}",
"essential":true,
"memory":512,
"portMappings":[{"containerPort":8080,"hostPort":8080}],
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "/ecs/${TF_VAR_cluster_name}",
"awslogs-region": "${AWS_REGION}",
"awslogs-stream-prefix": "ecs"
}
},
"environment": [
{"name": "BUILD_NUMBER", "value": "${BUILD_NUMBER}"},
{"name": "GIT_COMMIT", "value": "${env.GIT_COMMIT ?: 'unknown'}"}
]
}]' \
--region ${AWS_REGION}
sh """
# Register new task definition with build metadata
aws ecs register-task-definition \
--family ${TF_VAR_cluster_name} \
--network-mode bridge \
--container-definitions '[{
"name":"health-workload",
"image":"${IMAGE_NAME}:${IMAGE_TAG}",
"essential":true,
"memory":512,
"portMappings":[{"containerPort":8080,"hostPort":8080}],
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "/ecs/${TF_VAR_cluster_name}",
"awslogs-region": "${AWS_REGION}",
"awslogs-stream-prefix": "ecs"
}
},
"environment": [
{"name": "BUILD_NUMBER", "value": "${BUILD_NUMBER}"},
{"name": "GIT_COMMIT", "value": "${env.GIT_COMMIT ?: 'unknown'}"},
{"name": "DEPLOYMENT_TIME", "value": "${new Date().format('yyyy-MM-dd HH:mm:ss')}"}
]
}]' \
--region ${AWS_REGION}
# Rolling deployment
aws ecs update-service \
--cluster ${TF_VAR_cluster_name} \
--service ${TF_VAR_cluster_name}-service \
--force-new-deployment \
--region ${AWS_REGION}
# Perform rolling deployment
aws ecs update-service \
--cluster ${TF_VAR_cluster_name} \
--service ${TF_VAR_cluster_name}-service \
--force-new-deployment \
--region ${AWS_REGION}
# Wait for stable deployment
echo "⏳ Waiting for deployment to stabilize..."
aws ecs wait services-stable \
--cluster ${TF_VAR_cluster_name} \
--services ${TF_VAR_cluster_name}-service \
--region ${AWS_REGION}
"""
# Wait for deployment to stabilize
echo "⏳ Waiting for service deployment to stabilize..."
aws ecs wait services-stable \
--cluster ${TF_VAR_cluster_name} \
--services ${TF_VAR_cluster_name}-service \
--region ${AWS_REGION}
"""
echo "✅ Application deployment completed"
}
}
}
}
@@ -313,11 +320,8 @@ pipeline {
}
stage('Post-Deployment Validation') {
when {
not { params.DESTROY_INFRASTRUCTURE }
}
parallel {
stage('Health Checks') {
stage('Health Check') {
steps {
script {
def ec2_ip = sh(
@@ -336,30 +340,35 @@ pipeline {
).trim()
echo "Health check response: ${response}"
return response == "200"
if (response == "200") {
echo "✅ Health check passed!"
return true
} else {
echo "⏳ Waiting for application to be ready..."
sleep(10)
return false
}
}
}
}
echo "✅ Health checks passed!"
}
}
}
stage('Integration Tests') {
stage('Smoke Tests') {
steps {
script {
echo "🔗 Running integration tests..."
echo "💨 Running smoke tests..."
def ec2_ip = sh(
script: "terraform -chdir=terraform output -raw ecs_instance_public_ip",
returnStdout: true
).trim()
// In enterprise: Run comprehensive API tests, load tests, etc.
// Basic smoke tests
sh """
echo "Running API tests against http://${ec2_ip}:8080"
# pytest integration_tests/ --host=${ec2_ip} --port=8080
echo "✅ Integration tests passed"
echo "Testing application endpoints..."
curl -f http://${ec2_ip}:8080/health || exit 1
echo "✅ All smoke tests passed"
"""
}
}
@@ -371,33 +380,46 @@ pipeline {
post {
always {
script {
// Enterprise: Always capture logs and metrics
echo "📊 Collecting deployment metrics..."
echo "📊 Collecting deployment artifacts and cleanup..."
// Archive important files
// Archive deployment artifacts
archiveArtifacts artifacts: 'ansible/hosts', allowEmptyArchive: true
// Clean workspace but preserve state backups
cleanWs(deleteDirs: true, notFailBuild: true, patterns: [[pattern: 'state-backup-*.json', type: 'EXCLUDE']])
// Clean workspace but preserve important files
cleanWs(deleteDirs: true, notFailBuild: true)
}
}
success {
script {
echo "🎉 Pipeline completed successfully!"
echo "🚀 Application deployed with version: ${IMAGE_TAG}"
def ec2_ip = ""
try {
ec2_ip = sh(
script: "terraform -chdir=terraform output -raw ecs_instance_public_ip",
returnStdout: true
).trim()
} catch (Exception e) {
ec2_ip = "unknown"
}
// In enterprise: Send notifications to Slack, Teams, etc.
// slackSend channel: '#deployments', message: "✅ nvhi-atsila deployed successfully to ${params.ENVIRONMENT}"
echo "🎉 Deployment completed successfully!"
echo "📋 Deployment Summary:"
echo " • Environment: Production"
echo " • Application Version: ${IMAGE_TAG}"
echo " • Application URL: http://${ec2_ip}:8080"
echo " • Build Number: ${BUILD_NUMBER}"
echo " • Git Commit: ${env.GIT_COMMIT?.take(8) ?: 'unknown'}"
currentBuild.description = "✅ Deployed ${IMAGE_TAG} to ${ec2_ip}"
}
}
failure {
script {
echo "❌ Pipeline failed!"
echo "❌ Deployment failed!"
echo "🔍 Check the logs above for details"
echo "💡 State backup available: state-backup-${BUILD_NUMBER}.json"
// In enterprise: Alert on-call team, create incident tickets
// pagerDuty serviceKey: 'xxx', incidentKey: env.BUILD_TAG
currentBuild.description = "❌ Failed at ${env.STAGE_NAME}"
}
}