automated terminal push

This commit is contained in:
lenape
2025-07-12 18:19:18 +00:00
parent df5de796b9
commit 40b0504f57
4 changed files with 438 additions and 102 deletions

155
Jenkinsfile vendored
View File

@@ -1,5 +1,6 @@
pipeline {
agent any
environment {
GITEA_REPO = 'https://code.jacquesingram.online/lenape/nvhi-atsila-microservice.git'
GITEA_CREDS = '52ee0829-6e65-4951-925b-4186254c3f21'
@@ -11,19 +12,21 @@ pipeline {
AWS_REGION = 'us-east-2'
ECR_REPO = 'nvhi-atsila-microservice'
// Backend configuration
TF_BACKEND_BUCKET = 'nvhi-atsila-tf-state'
TF_BACKEND_PREFIX = 'ecs/terraform.tfstate'
TF_DDB_TABLE = 'nvhi-atsila-locks'
SSH_CRED_ID = 'jenkins-ssh'
// Application variables
TF_VAR_cluster_name = 'nvhi-atsila-cluster'
TF_VAR_vpc_cidr = '10.0.0.0/16'
TF_VAR_public_subnets = '10.0.1.0/24,10.0.2.0/24'
TF_VAR_instance_type = 't2.micro'
TF_VAR_key_pair_name = 'nvhi-atsila-deployer'
// ensure we pass a valid CIDR (/32)
TF_VAR_jenkins_ip_cidr = "${JENKINS_SSH_CIDR}/32"
TF_VAR_aws_region = "${AWS_REGION}"
IMAGE_NAME = "${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}"
IMAGE_TAG = "v1.0.${BUILD_NUMBER}"
@@ -31,7 +34,9 @@ pipeline {
stages {
stage('Checkout') {
steps { checkout scm }
steps {
checkout scm
}
}
stage('SonarQube Scan') {
@@ -72,66 +77,74 @@ pipeline {
}
}
stage('Bootstrap Remote State') {
stage('Bootstrap Backend Infrastructure') {
steps {
withCredentials([[
$class: 'AmazonWebServicesCredentialsBinding',
credentialsId: env.AWS_CRED_ID
]]) {
sh '''
set -e
dir('terraform-backend') {
script {
// Check if backend resources exist
def backendExists = sh(
script: '''
if aws s3api head-bucket --bucket $TF_BACKEND_BUCKET 2>/dev/null && \
aws dynamodb describe-table --table-name $TF_DDB_TABLE 2>/dev/null; then
echo "true"
else
echo "false"
fi
''',
returnStdout: true
).trim()
# Ensure S3 bucket exists
if ! aws s3api head-bucket --bucket $TF_BACKEND_BUCKET 2>/dev/null; then
aws s3api create-bucket --bucket $TF_BACKEND_BUCKET --region $AWS_REGION \
--create-bucket-configuration LocationConstraint=$AWS_REGION
aws s3api put-bucket-encryption \
--bucket $TF_BACKEND_BUCKET \
--server-side-encryption-configuration '{"Rules":[{"ApplyServerSideEncryptionByDefault":{"SSEAlgorithm":"AES256"}}]}'
aws s3api put-bucket-versioning \
--bucket $TF_BACKEND_BUCKET \
--versioning-configuration Status=Enabled
fi
# Ensure DynamoDB table exists and is ready
if ! aws dynamodb describe-table --table-name $TF_DDB_TABLE 2>/dev/null; then
aws dynamodb create-table \
--table-name $TF_DDB_TABLE \
--attribute-definitions AttributeName=LockID,AttributeType=S \
--key-schema AttributeName=LockID,KeyType=HASH \
--billing-mode PAY_PER_REQUEST
aws dynamodb wait table-exists --table-name $TF_DDB_TABLE
aws dynamodb update-continuous-backups \
--table-name $TF_DDB_TABLE \
--point-in-time-recovery-specification PointInTimeRecoveryEnabled=true
fi
'''
if (backendExists == "false") {
echo "Backend infrastructure doesn't exist. Creating..."
sh '''
terraform init
terraform plan -out=backend.tfplan \
-var="aws_region=$AWS_REGION" \
-var="backend_bucket_name=$TF_BACKEND_BUCKET" \
-var="lock_table_name=$TF_DDB_TABLE"
terraform apply backend.tfplan
'''
} else {
echo "Backend infrastructure already exists. Skipping creation."
}
}
}
}
}
}
stage('Terraform Init & Apply') {
stage('Deploy Application Infrastructure') {
steps {
withCredentials([[
$class: 'AmazonWebServicesCredentialsBinding',
credentialsId: env.AWS_CRED_ID
]]) {
dir('terraform') {
sh """
sh '''
# Initialize with remote backend
terraform init \
-backend-config="bucket=${TF_BACKEND_BUCKET}" \
-backend-config="key=${TF_BACKEND_PREFIX}" \
-backend-config="region=${AWS_REGION}" \
-backend-config="dynamodb_table=${TF_DDB_TABLE}"
terraform apply -auto-approve \
# Plan the deployment
terraform plan -out=main.tfplan \
-var="cluster_name=${TF_VAR_cluster_name}" \
-var="vpc_cidr=${TF_VAR_vpc_cidr}" \
-var="public_subnets=${TF_VAR_public_subnets}" \
-var="instance_type=${TF_VAR_instance_type}" \
-var="key_pair_name=${TF_VAR_key_pair_name}" \
-var="jenkins_ip_cidr=${TF_VAR_jenkins_ip_cidr}"
"""
-var="jenkins_ip_cidr=${TF_VAR_jenkins_ip_cidr}" \
-var="aws_region=${TF_VAR_aws_region}"
# Apply the deployment
terraform apply main.tfplan
'''
}
}
}
@@ -144,8 +157,11 @@ pipeline {
script: "terraform -chdir=terraform output -raw ecs_instance_public_ip",
returnStdout: true
).trim()
echo "EC2 Instance IP: ${ec2_ip}"
writeFile file: 'ansible/hosts', text: "[inventory_hosts]\n${ec2_ip} ansible_user=ubuntu"
}
ansiblePlaybook(
playbook: 'ansible/configure_ecs.yml',
inventory: 'ansible/hosts',
@@ -161,6 +177,7 @@ pipeline {
credentialsId: env.AWS_CRED_ID
]]) {
sh """
# Register new task definition
aws ecs register-task-definition \
--family ${TF_VAR_cluster_name} \
--network-mode bridge \
@@ -168,18 +185,82 @@ pipeline {
"name":"health-workload",
"image":"${IMAGE_NAME}:${IMAGE_TAG}",
"essential":true,
"portMappings":[{"containerPort":8080,"hostPort":8080}]
"memory":512,
"portMappings":[{"containerPort":8080,"hostPort":8080}],
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "/ecs/${TF_VAR_cluster_name}",
"awslogs-region": "${AWS_REGION}",
"awslogs-stream-prefix": "ecs"
}
}
}]' \
--region ${AWS_REGION}
# Update service with new task definition
aws ecs update-service \
--cluster ${TF_VAR_cluster_name} \
--service ${TF_VAR_cluster_name}-service \
--force-new-deployment \
--region ${AWS_REGION}
# Wait for service to stabilize
echo "Waiting for service deployment to complete..."
aws ecs wait services-stable \
--cluster ${TF_VAR_cluster_name} \
--services ${TF_VAR_cluster_name}-service \
--region ${AWS_REGION}
echo "Deployment completed successfully!"
"""
}
}
}
stage('Health Check') {
steps {
script {
def ec2_ip = sh(
script: "terraform -chdir=terraform output -raw ecs_instance_public_ip",
returnStdout: true
).trim()
echo "Performing health check on http://${ec2_ip}:8080"
// Wait for the service to be available
timeout(time: 5, unit: 'MINUTES') {
waitUntil {
script {
def response = sh(
script: "curl -s -o /dev/null -w '%{http_code}' http://${ec2_ip}:8080 || echo '000'",
returnStdout: true
).trim()
echo "Health check response: ${response}"
return response == "200"
}
}
}
echo "Health check passed! Application is running successfully."
}
}
}
}
}
post {
always {
// Clean up workspace
cleanWs()
}
success {
echo "Pipeline completed successfully!"
}
failure {
echo "Pipeline failed. Check the logs for details."
}
}
}

99
terraform-backend/main.tf Normal file
View File

@@ -0,0 +1,99 @@
# Backend Infrastructure - Creates the S3 bucket and DynamoDB table for remote state
# This should be run FIRST with local state, then never changed
terraform {
# No backend configuration - uses local state for bootstrap
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 5.0"
}
}
}
provider "aws" {
region = var.aws_region
}
# S3 bucket for Terraform state
resource "aws_s3_bucket" "tfstate" {
bucket = var.backend_bucket_name
tags = {
Name = var.backend_bucket_name
Environment = "Production"
Purpose = "Terraform State Storage"
}
}
# S3 bucket versioning
resource "aws_s3_bucket_versioning" "tfstate_versioning" {
bucket = aws_s3_bucket.tfstate.id
versioning_configuration {
status = "Enabled"
}
}
# S3 bucket encryption
resource "aws_s3_bucket_server_side_encryption_configuration" "tfstate_encryption" {
bucket = aws_s3_bucket.tfstate.id
rule {
apply_server_side_encryption_by_default {
sse_algorithm = "AES256"
}
}
}
# S3 bucket public access block
resource "aws_s3_bucket_public_access_block" "tfstate_block" {
bucket = aws_s3_bucket.tfstate.id
block_public_acls = true
block_public_policy = true
ignore_public_acls = true
restrict_public_buckets = true
}
# DynamoDB table for state locking
resource "aws_dynamodb_table" "locks" {
name = var.lock_table_name
billing_mode = "PAY_PER_REQUEST"
hash_key = "LockID"
attribute {
name = "LockID"
type = "S"
}
point_in_time_recovery {
enabled = true
}
tags = {
Name = var.lock_table_name
Environment = "Production"
Purpose = "Terraform State Locking"
}
}
# Outputs for reference
output "s3_bucket_name" {
description = "Name of the S3 bucket for Terraform state"
value = aws_s3_bucket.tfstate.bucket
}
output "dynamodb_table_name" {
description = "Name of the DynamoDB table for state locking"
value = aws_dynamodb_table.locks.name
}
output "s3_bucket_arn" {
description = "ARN of the S3 bucket"
value = aws_s3_bucket.tfstate.arn
}
output "dynamodb_table_arn" {
description = "ARN of the DynamoDB table"
value = aws_dynamodb_table.locks.arn
}

View File

@@ -0,0 +1,17 @@
variable "aws_region" {
description = "AWS region for resources"
type = string
default = "us-east-2"
}
variable "backend_bucket_name" {
description = "Name of the S3 bucket for Terraform state"
type = string
default = "nvhi-atsila-tf-state"
}
variable "lock_table_name" {
description = "Name of the DynamoDB table for state locking"
type = string
default = "nvhi-atsila-locks"
}

View File

@@ -1,84 +1,91 @@
# Application Infrastructure - Uses remote state backend
# This contains your ECS cluster, VPC, and application resources
terraform {
backend "s3" {
# These values will be provided via backend-config during terraform init
# bucket = "nvhi-atsila-tf-state"
# key = "ecs/terraform.tfstate"
# region = "us-east-2"
# dynamodb_table = "nvhi-atsila-locks"
}
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 5.0"
}
}
}
provider "aws" {
region = var.aws_region
}
data "aws_availability_zones" "azs" {}
# Hardened remote-state S3 bucket
resource "aws_s3_bucket" "tfstate" {
bucket = "nvhi-atsila-tf-state"
server_side_encryption_configuration {
rule {
apply_server_side_encryption_by_default {
sse_algorithm = "AES256"
}
}
}
versioning {
enabled = true
}
tags = {
Name = "nvhi-atsila-tf-state"
Environment = "Production"
}
}
resource "aws_s3_bucket_public_access_block" "tfstate_block" {
bucket = aws_s3_bucket.tfstate.id
block_public_acls = true
block_public_policy = true
ignore_public_acls = true
restrict_public_buckets = true
}
resource "aws_dynamodb_table" "locks" {
name = "nvhi-atsila-locks"
billing_mode = "PAY_PER_REQUEST"
hash_key = "LockID"
attribute {
name = "LockID"
type = "S"
}
point_in_time_recovery {
enabled = true
}
tags = {
Name = "nvhi-atsila-locks"
Environment = "Production"
}
}
# VPC
resource "aws_vpc" "main" {
cidr_block = var.vpc_cidr
cidr_block = var.vpc_cidr
enable_dns_hostnames = true
enable_dns_support = true
tags = {
Name = "${var.cluster_name}-vpc"
}
}
# Internet Gateway
resource "aws_internet_gateway" "main" {
vpc_id = aws_vpc.main.id
tags = {
Name = "${var.cluster_name}-igw"
}
}
# Public Subnets
resource "aws_subnet" "public" {
count = length(split(",", var.public_subnets))
vpc_id = aws_vpc.main.id
cidr_block = element(split(",", var.public_subnets), count.index)
availability_zone = data.aws_availability_zones.azs.names[count.index]
map_public_ip_on_launch = true
tags = {
Name = "${var.cluster_name}-public-${count.index}"
}
}
# Route Table for public subnets
resource "aws_route_table" "public" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.main.id
}
tags = {
Name = "${var.cluster_name}-public-rt"
}
}
# Route Table Associations
resource "aws_route_table_association" "public" {
count = length(aws_subnet.public)
subnet_id = aws_subnet.public[count.index].id
route_table_id = aws_route_table.public.id
}
# Security Group
resource "aws_security_group" "ecs_sg" {
name = "${var.cluster_name}-sg"
description = "Allow SSH & HTTP to ECS"
vpc_id = aws_vpc.main.id
ingress {
description = "SSH from Jenkins"
from_port = 22
to_port = 22
protocol = "tcp"
@@ -86,6 +93,7 @@ resource "aws_security_group" "ecs_sg" {
}
ingress {
description = "HTTP from anywhere"
from_port = 8080
to_port = 8080
protocol = "tcp"
@@ -93,6 +101,7 @@ resource "aws_security_group" "ecs_sg" {
}
egress {
description = "All outbound traffic"
from_port = 0
to_port = 0
protocol = "-1"
@@ -104,36 +113,166 @@ resource "aws_security_group" "ecs_sg" {
}
}
# Key Pair - Using hardcoded public key from your original config
resource "aws_key_pair" "deployer" {
key_name = var.key_pair_name
public_key = file("${path.module}/../lenape_key.pub")
}
public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDFBAOogBj/GHKXQs6FLROGQfXkZe2uKbRron0We7ZOLgt6e1bI7U8IMe+DIH250CHSi4R5DBYFQF5Bk1TkS5cgMtPIAb87vRUGI3sLs29DQA/kllYiZlQi9ejxcEz2+TRWn10Q/Kltlb6ESNLnnnTsIUUxKUeY3MKFFd+V13FleSVLGYondwPWYwD/XJ6a3VwSTJ1wFKO+lpKknSjDl2ZOgYpWFALPH+EwMlRGVMrUXAB604zqR1XOzYXAAWnhmmC9IGgCzU/5JnEgFyhfZbR3kpEH8SmSXahvdFZERp+3j9d3ROjchqnf0Z0zZ7vzX+G+jvzT/jGOkzH9tx0/OqIO9f47OFF8iUfZgUtJU1QGbepdsmQqognhxfJQfMZbVtKUw7zt+mzJz3A0XcRp7IwVHaqJ2QW2dpXi4UbWtejtZqROg6byWq2FpvFGNIT3eiKTf+EpCoOec6YGSrRQlj73Ob0+FhmsyQ6e8KKncaRYx38PqtnWsI3UnLtdKmEJmDBPI0ipxJzmKJKtb0vtJPVYvFEpgiXSwnDX883rAUQrXR/EhOMmbMwk7JSes6/GXH9rWN10JHh1/i1LLpl+rg6VyktFgVBHzVw++y29QSfFixeTvFkkTS5kl//CpKd1GDQb9ZBH6SPgkgOjmASPUo+p5e/NiN/SIBSpYpMjOKs7Q== jacques@Xochiquetzal"
data "aws_ami" "ubuntu" {
most_recent = true
owners = ["099720109477"]
filter {
name = "name"
values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"]
tags = {
Name = var.key_pair_name
}
}
resource "aws_ecs_cluster" "main" {
name = var.cluster_name
# Get Amazon Linux 2 AMI (better for ECS)
data "aws_ami" "amazon_linux" {
most_recent = true
owners = ["amazon"]
filter {
name = "name"
values = ["amzn2-ami-ecs-hvm-*-x86_64-ebs"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
}
# IAM Role for ECS Instance
resource "aws_iam_role" "ecs_instance_role" {
name = "${var.cluster_name}-ecs-instance-role"
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = "sts:AssumeRole"
Effect = "Allow"
Principal = {
Service = "ec2.amazonaws.com"
}
}
]
})
tags = {
Name = "${var.cluster_name}-ecs-instance-role"
}
}
# IAM Role Policy Attachment
resource "aws_iam_role_policy_attachment" "ecs_instance_role_policy" {
role = aws_iam_role.ecs_instance_role.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
}
# IAM Instance Profile
resource "aws_iam_instance_profile" "ecs_instance_profile" {
name = "${var.cluster_name}-ecs-instance-profile"
role = aws_iam_role.ecs_instance_role.name
tags = {
Name = "${var.cluster_name}-ecs-instance-profile"
}
}
# ECS Cluster
resource "aws_ecs_cluster" "main" {
name = var.cluster_name
setting {
name = "containerInsights"
value = "enabled"
}
tags = {
Name = var.cluster_name
}
}
# User data script for ECS instance
locals {
user_data = base64encode(<<-EOF
#!/bin/bash
yum update -y
yum install -y ecs-init
echo ECS_CLUSTER=${var.cluster_name} >> /etc/ecs/ecs.config
service docker start
start ecs
EOF
)
}
# EC2 Instance for ECS
resource "aws_instance" "ecs_instance" {
ami = data.aws_ami.ubuntu.id
ami = data.aws_ami.amazon_linux.id
instance_type = var.instance_type
subnet_id = aws_subnet.public[0].id
vpc_security_group_ids = [aws_security_group.ecs_sg.id]
key_name = aws_key_pair.deployer.key_name
iam_instance_profile = aws_iam_instance_profile.ecs_instance_profile.name
user_data_base64 = local.user_data
root_block_device {
volume_type = "gp3"
volume_size = 20
encrypted = true
}
tags = {
Name = "${var.cluster_name}-instance"
}
}
output "ecs_instance_public_ip" {
value = aws_instance.ecs_instance.public_ip
# ECS Service (placeholder - you may want to manage this separately)
resource "aws_ecs_service" "main" {
name = "${var.cluster_name}-service"
cluster = aws_ecs_cluster.main.id
desired_count = 1
launch_type = "EC2"
# This will be updated by your Jenkins pipeline
task_definition = "${var.cluster_name}:1"
depends_on = [aws_instance.ecs_instance]
lifecycle {
ignore_changes = [task_definition]
}
tags = {
Name = "${var.cluster_name}-service"
}
}
# CloudWatch Log Group for ECS
resource "aws_cloudwatch_log_group" "ecs_logs" {
name = "/ecs/${var.cluster_name}"
retention_in_days = 7
tags = {
Name = "${var.cluster_name}-logs"
}
}
# Outputs
output "ecs_instance_public_ip" {
description = "Public IP of the ECS instance"
value = aws_instance.ecs_instance.public_ip
}
output "ecs_cluster_name" {
description = "Name of the ECS cluster"
value = aws_ecs_cluster.main.name
}
output "vpc_id" {
description = "ID of the VPC"
value = aws_vpc.main.id
}
output "public_subnet_ids" {
description = "IDs of the public subnets"
value = aws_subnet.public[*].id
}