Files
nvhi-atsila-microservice/Jenkinsfile
2025-07-12 18:19:18 +00:00

266 lines
8.2 KiB
Groovy

pipeline {
agent any
environment {
GITEA_REPO = 'https://code.jacquesingram.online/lenape/nvhi-atsila-microservice.git'
GITEA_CREDS = '52ee0829-6e65-4951-925b-4186254c3f21'
SONAR_HOST = 'https://sonar.jacquesingram.online'
SONAR_TOKEN = credentials('sonar-token')
AWS_CRED_ID = 'aws-ci'
AWS_ACCOUNT_ID = credentials('AWS_ACCOUNT_ID')
AWS_REGION = 'us-east-2'
ECR_REPO = 'nvhi-atsila-microservice'
// Backend configuration
TF_BACKEND_BUCKET = 'nvhi-atsila-tf-state'
TF_BACKEND_PREFIX = 'ecs/terraform.tfstate'
TF_DDB_TABLE = 'nvhi-atsila-locks'
SSH_CRED_ID = 'jenkins-ssh'
// Application variables
TF_VAR_cluster_name = 'nvhi-atsila-cluster'
TF_VAR_vpc_cidr = '10.0.0.0/16'
TF_VAR_public_subnets = '10.0.1.0/24,10.0.2.0/24'
TF_VAR_instance_type = 't2.micro'
TF_VAR_key_pair_name = 'nvhi-atsila-deployer'
TF_VAR_jenkins_ip_cidr = "${JENKINS_SSH_CIDR}/32"
TF_VAR_aws_region = "${AWS_REGION}"
IMAGE_NAME = "${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}"
IMAGE_TAG = "v1.0.${BUILD_NUMBER}"
}
stages {
stage('Checkout') {
steps {
checkout scm
}
}
stage('SonarQube Scan') {
steps {
script {
def scannerHome = tool 'SonarQubeScanner'
withSonarQubeEnv('SonarQube') {
sh """
${scannerHome}/bin/sonar-scanner \
-Dsonar.projectKey=nvhi-atsila-microservice \
-Dsonar.sources=.
"""
}
}
}
}
stage('Login to ECR') {
steps {
withCredentials([[
$class: 'AmazonWebServicesCredentialsBinding',
credentialsId: env.AWS_CRED_ID
]]) {
sh '''
aws ecr get-login-password --region $AWS_REGION \
| docker login --username AWS --password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com
'''
}
}
}
stage('Build & Push Docker Image') {
steps {
script {
def img = docker.build("${IMAGE_NAME}:${IMAGE_TAG}")
img.push()
}
}
}
stage('Bootstrap Backend Infrastructure') {
steps {
withCredentials([[
$class: 'AmazonWebServicesCredentialsBinding',
credentialsId: env.AWS_CRED_ID
]]) {
dir('terraform-backend') {
script {
// Check if backend resources exist
def backendExists = sh(
script: '''
if aws s3api head-bucket --bucket $TF_BACKEND_BUCKET 2>/dev/null && \
aws dynamodb describe-table --table-name $TF_DDB_TABLE 2>/dev/null; then
echo "true"
else
echo "false"
fi
''',
returnStdout: true
).trim()
if (backendExists == "false") {
echo "Backend infrastructure doesn't exist. Creating..."
sh '''
terraform init
terraform plan -out=backend.tfplan \
-var="aws_region=$AWS_REGION" \
-var="backend_bucket_name=$TF_BACKEND_BUCKET" \
-var="lock_table_name=$TF_DDB_TABLE"
terraform apply backend.tfplan
'''
} else {
echo "Backend infrastructure already exists. Skipping creation."
}
}
}
}
}
}
stage('Deploy Application Infrastructure') {
steps {
withCredentials([[
$class: 'AmazonWebServicesCredentialsBinding',
credentialsId: env.AWS_CRED_ID
]]) {
dir('terraform') {
sh '''
# Initialize with remote backend
terraform init \
-backend-config="bucket=${TF_BACKEND_BUCKET}" \
-backend-config="key=${TF_BACKEND_PREFIX}" \
-backend-config="region=${AWS_REGION}" \
-backend-config="dynamodb_table=${TF_DDB_TABLE}"
# Plan the deployment
terraform plan -out=main.tfplan \
-var="cluster_name=${TF_VAR_cluster_name}" \
-var="vpc_cidr=${TF_VAR_vpc_cidr}" \
-var="public_subnets=${TF_VAR_public_subnets}" \
-var="instance_type=${TF_VAR_instance_type}" \
-var="key_pair_name=${TF_VAR_key_pair_name}" \
-var="jenkins_ip_cidr=${TF_VAR_jenkins_ip_cidr}" \
-var="aws_region=${TF_VAR_aws_region}"
# Apply the deployment
terraform apply main.tfplan
'''
}
}
}
}
stage('Configure EC2 with Ansible') {
steps {
script {
def ec2_ip = sh(
script: "terraform -chdir=terraform output -raw ecs_instance_public_ip",
returnStdout: true
).trim()
echo "EC2 Instance IP: ${ec2_ip}"
writeFile file: 'ansible/hosts', text: "[inventory_hosts]\n${ec2_ip} ansible_user=ubuntu"
}
ansiblePlaybook(
playbook: 'ansible/configure_ecs.yml',
inventory: 'ansible/hosts',
credentialsId: env.SSH_CRED_ID
)
}
}
stage('Register & Deploy to ECS') {
steps {
withCredentials([[
$class: 'AmazonWebServicesCredentialsBinding',
credentialsId: env.AWS_CRED_ID
]]) {
sh """
# Register new task definition
aws ecs register-task-definition \
--family ${TF_VAR_cluster_name} \
--network-mode bridge \
--container-definitions '[{
"name":"health-workload",
"image":"${IMAGE_NAME}:${IMAGE_TAG}",
"essential":true,
"memory":512,
"portMappings":[{"containerPort":8080,"hostPort":8080}],
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "/ecs/${TF_VAR_cluster_name}",
"awslogs-region": "${AWS_REGION}",
"awslogs-stream-prefix": "ecs"
}
}
}]' \
--region ${AWS_REGION}
# Update service with new task definition
aws ecs update-service \
--cluster ${TF_VAR_cluster_name} \
--service ${TF_VAR_cluster_name}-service \
--force-new-deployment \
--region ${AWS_REGION}
# Wait for service to stabilize
echo "Waiting for service deployment to complete..."
aws ecs wait services-stable \
--cluster ${TF_VAR_cluster_name} \
--services ${TF_VAR_cluster_name}-service \
--region ${AWS_REGION}
echo "Deployment completed successfully!"
"""
}
}
}
stage('Health Check') {
steps {
script {
def ec2_ip = sh(
script: "terraform -chdir=terraform output -raw ecs_instance_public_ip",
returnStdout: true
).trim()
echo "Performing health check on http://${ec2_ip}:8080"
// Wait for the service to be available
timeout(time: 5, unit: 'MINUTES') {
waitUntil {
script {
def response = sh(
script: "curl -s -o /dev/null -w '%{http_code}' http://${ec2_ip}:8080 || echo '000'",
returnStdout: true
).trim()
echo "Health check response: ${response}"
return response == "200"
}
}
}
echo "Health check passed! Application is running successfully."
}
}
}
}
post {
always {
// Clean up workspace
cleanWs()
}
success {
echo "Pipeline completed successfully!"
}
failure {
echo "Pipeline failed. Check the logs for details."
}
}
}