Files
nvhi-atsila-microservice/Jenkinsfile
2025-07-12 10:10:12 +00:00

186 lines
6.2 KiB
Groovy

pipeline {
agent any
environment {
GITEA_REPO = 'https://code.jacquesingram.online/lenape/nvhi-atsila-microservice.git'
GITEA_CREDS = '52ee0829-6e65-4951-925b-4186254c3f21'
SONAR_HOST = 'https://sonar.jacquesingram.online'
SONAR_TOKEN = credentials('sonar-token')
AWS_CRED_ID = 'aws-ci'
AWS_ACCOUNT_ID = credentials('AWS_ACCOUNT_ID')
AWS_REGION = 'us-east-2'
ECR_REPO = 'nvhi-atsila-microservice'
TF_BACKEND_BUCKET = 'nvhi-atsila-tf-state'
TF_BACKEND_PREFIX = 'ecs/terraform.tfstate'
TF_DDB_TABLE = 'nvhi-atsila-locks'
SSH_CRED_ID = 'jenkins-ssh'
TF_VAR_cluster_name = 'nvhi-atsila-cluster'
TF_VAR_vpc_cidr = '10.0.0.0/16'
TF_VAR_public_subnets = '10.0.1.0/24,10.0.2.0/24'
TF_VAR_instance_type = 't2.micro'
TF_VAR_key_pair_name = 'nvhi-atsila-deployer'
// ensure we pass a valid CIDR (/32)
TF_VAR_jenkins_ip_cidr = "${JENKINS_SSH_CIDR}/32"
IMAGE_NAME = "${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}"
IMAGE_TAG = "v1.0.${BUILD_NUMBER}"
}
stages {
stage('Checkout') {
steps { checkout scm }
}
stage('SonarQube Scan') {
steps {
script {
def scannerHome = tool 'SonarQubeScanner'
withSonarQubeEnv('SonarQube') {
sh """
${scannerHome}/bin/sonar-scanner \
-Dsonar.projectKey=nvhi-atsila-microservice \
-Dsonar.sources=.
"""
}
}
}
}
stage('Login to ECR') {
steps {
withCredentials([[
$class: 'AmazonWebServicesCredentialsBinding',
credentialsId: env.AWS_CRED_ID
]]) {
sh '''
aws ecr get-login-password --region $AWS_REGION \
| docker login --username AWS --password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com
'''
}
}
}
stage('Build & Push Docker Image') {
steps {
script {
def img = docker.build("${IMAGE_NAME}:${IMAGE_TAG}")
img.push()
}
}
}
stage('Bootstrap Remote State') {
steps {
withCredentials([[
$class: 'AmazonWebServicesCredentialsBinding',
credentialsId: env.AWS_CRED_ID
]]) {
sh '''
set -e
# Ensure S3 bucket exists
if ! aws s3api head-bucket --bucket $TF_BACKEND_BUCKET 2>/dev/null; then
aws s3api create-bucket --bucket $TF_BACKEND_BUCKET --region $AWS_REGION \
--create-bucket-configuration LocationConstraint=$AWS_REGION
aws s3api put-bucket-encryption \
--bucket $TF_BACKEND_BUCKET \
--server-side-encryption-configuration '{"Rules":[{"ApplyServerSideEncryptionByDefault":{"SSEAlgorithm":"AES256"}}]}'
aws s3api put-bucket-versioning \
--bucket $TF_BACKEND_BUCKET \
--versioning-configuration Status=Enabled
fi
# Ensure DynamoDB table exists and is ready
if ! aws dynamodb describe-table --table-name $TF_DDB_TABLE 2>/dev/null; then
aws dynamodb create-table \
--table-name $TF_DDB_TABLE \
--attribute-definitions AttributeName=LockID,AttributeType=S \
--key-schema AttributeName=LockID,KeyType=HASH \
--billing-mode PAY_PER_REQUEST
aws dynamodb wait table-exists --table-name $TF_DDB_TABLE
aws dynamodb update-continuous-backups \
--table-name $TF_DDB_TABLE \
--point-in-time-recovery-specification PointInTimeRecoveryEnabled=true
fi
'''
}
}
}
stage('Terraform Init & Apply') {
steps {
withCredentials([[
$class: 'AmazonWebServicesCredentialsBinding',
credentialsId: env.AWS_CRED_ID
]]) {
dir('terraform') {
sh """
terraform init \
-backend-config="bucket=${TF_BACKEND_BUCKET}" \
-backend-config="key=${TF_BACKEND_PREFIX}" \
-backend-config="region=${AWS_REGION}" \
-backend-config="dynamodb_table=${TF_DDB_TABLE}"
terraform apply -auto-approve \
-var="cluster_name=${TF_VAR_cluster_name}" \
-var="vpc_cidr=${TF_VAR_vpc_cidr}" \
-var="public_subnets=${TF_VAR_public_subnets}" \
-var="instance_type=${TF_VAR_instance_type}" \
-var="key_pair_name=${TF_VAR_key_pair_name}" \
-var="jenkins_ip_cidr=${TF_VAR_jenkins_ip_cidr}"
"""
}
}
}
}
stage('Configure EC2 with Ansible') {
steps {
script {
def ec2_ip = sh(
script: "terraform -chdir=terraform output -raw ecs_instance_public_ip",
returnStdout: true
).trim()
writeFile file: 'ansible/hosts', text: "[inventory_hosts]\n${ec2_ip} ansible_user=ubuntu"
}
ansiblePlaybook(
playbook: 'ansible/configure_ecs.yml',
inventory: 'ansible/hosts',
credentialsId: env.SSH_CRED_ID
)
}
}
stage('Register & Deploy to ECS') {
steps {
withCredentials([[
$class: 'AmazonWebServicesCredentialsBinding',
credentialsId: env.AWS_CRED_ID
]]) {
sh """
aws ecs register-task-definition \
--family ${TF_VAR_cluster_name} \
--network-mode bridge \
--container-definitions '[{
"name":"health-workload",
"image":"${IMAGE_NAME}:${IMAGE_TAG}",
"essential":true,
"portMappings":[{"containerPort":8080,"hostPort":8080}]
}]' \
--region ${AWS_REGION}
aws ecs update-service \
--cluster ${TF_VAR_cluster_name} \
--service ${TF_VAR_cluster_name}-service \
--force-new-deployment \
--region ${AWS_REGION}
"""
}
}
}
}
}