automated terminal push

This commit is contained in:
lenape
2025-07-14 03:15:21 +00:00
parent c13a8aacf9
commit f6a98772f4
3 changed files with 297 additions and 307 deletions

View File

@@ -57,20 +57,13 @@ resource "aws_route_table_association" "public" {
route_table_id = aws_route_table.public.id
}
# Security Group
# Security Group - Updated for SSM (removed SSH, kept application access)
resource "aws_security_group" "ecs_sg" {
name = "${var.cluster_name}-sg"
description = "Allow SSH & HTTP to ECS"
description = "Allow HTTP to ECS and HTTPS outbound for SSM/ECR"
vpc_id = aws_vpc.main.id
ingress {
description = "SSH from Jenkins"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [var.jenkins_ip_cidr]
}
# HTTP access for application
ingress {
description = "HTTP from anywhere"
from_port = 8080
@@ -79,11 +72,30 @@ resource "aws_security_group" "ecs_sg" {
cidr_blocks = ["0.0.0.0/0"]
}
# HTTPS outbound for SSM, ECR, and AWS services
egress {
description = "All outbound traffic"
from_port = 0
to_port = 0
protocol = "-1"
description = "HTTPS outbound for AWS services"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# HTTP outbound for package updates
egress {
description = "HTTP outbound for package updates"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# DNS resolution
egress {
description = "DNS resolution"
from_port = 53
to_port = 53
protocol = "udp"
cidr_blocks = ["0.0.0.0/0"]
}
@@ -92,7 +104,7 @@ resource "aws_security_group" "ecs_sg" {
}
}
# Key Pair
# Key Pair (keeping for compatibility, but not needed for SSM)
resource "aws_key_pair" "deployer" {
key_name = var.key_pair_name
public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDFBAOogBj/GHKXQs6FLROGQfXkZe2uKbRron0We7ZOLgt6e1bI7U8IMe+DIH250CHSi4R5DBYFQF5Bk1TkS5cgMtPIAb87vRUGI3sLs29DQA/kllYiZlQi9ejxcEz2+TRWn10Q/Kltlb6ESNLnnnTsIUUxKUeY3MKFFd+V13FleSVLGYondwPWYwD/XJ6a3VwSTJ1wFKO+lpKknSjDl2ZOgYpWFALPH+EwMlRGVMrUXAB604zqR1XOzYXAAWnhmmC9IGgCzU/5JnEgFyhfZbR3kpEH8SmSXahvdFZERp+3j9d3ROjchqnf0Z0zZ7vzX+G+jvzT/jGOkzH9tx0/OqIO9f47OFF8iUfZgUtJU1QGbepdsmQqognhxfJQfMZbVtKUw7zt+mzJz3A0XcRp7IwVHaqJ2QW2dpXi4UbWtejtZqROg6byWq2FpvFGNIT3eiKTf+EpCoOec6YGSrRQlj73Ob0+FhmsyQ6e8KKncaRYx38PqtnWsI3UnLtdKmEJmDBPI0ipxJzmKJKtb0vtJPVYvFEpgiXSwnDX883rAUQrXR/EhOMmbMwk7JSes6/GXH9rWN10JHh1/i1LLpl+rg6VyktFgVBHzVw++y29QSfFixeTvFkkTS5kl//CpKd1GDQb9ZBH6SPgkgOjmASPUo+p5e/NiN/SIBSpYpMjOKs7Q== jacques@Xochiquetzal"
@@ -140,12 +152,18 @@ resource "aws_iam_role" "ecs_instance_role" {
}
}
# IAM Role Policy Attachment
# IAM Role Policy Attachment for ECS
resource "aws_iam_role_policy_attachment" "ecs_instance_role_policy" {
role = aws_iam_role.ecs_instance_role.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
}
# IAM Role Policy Attachment for SSM
resource "aws_iam_role_policy_attachment" "ecs_instance_ssm_policy" {
role = aws_iam_role.ecs_instance_role.name
policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
}
# IAM Instance Profile
resource "aws_iam_instance_profile" "ecs_instance_profile" {
name = "${var.cluster_name}-ecs-instance-profile"
@@ -170,17 +188,11 @@ resource "aws_ecs_cluster" "main" {
}
}
# User data script for ECS instance
# User data script for ECS instance with SSM
locals {
user_data = base64encode(<<-EOF
#!/bin/bash
yum update -y
yum install -y ecs-init
echo ECS_CLUSTER=${var.cluster_name} >> /etc/ecs/ecs.config
service docker start
start ecs
EOF
)
user_data = base64encode(templatefile("${path.module}/user_data.sh", {
cluster_name = var.cluster_name
}))
}
# EC2 Instance for ECS
@@ -241,6 +253,11 @@ output "ecs_instance_public_ip" {
value = aws_instance.ecs_instance.public_ip
}
output "ecs_instance_id" {
description = "Instance ID for SSM access"
value = aws_instance.ecs_instance.id
}
output "ecs_cluster_name" {
description = "Name of the ECS cluster"
value = aws_ecs_cluster.main.name

73
terraform/user_data.sh Normal file
View File

@@ -0,0 +1,73 @@
#!/bin/bash
# Enhanced user data script with SSM and better logging
exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
echo "=== Starting EC2 User Data Script ==="
echo "Timestamp: $(date)"
echo "Instance ID: $(curl -s http://169.254.169.254/latest/meta-data/instance-id)"
echo "Cluster Name: ${cluster_name}"
# Update system
echo "=== Updating system packages ==="
yum update -y
# Install and configure SSM agent (should already be installed on Amazon Linux 2)
echo "=== Configuring SSM Agent ==="
yum install -y amazon-ssm-agent
systemctl enable amazon-ssm-agent
systemctl start amazon-ssm-agent
# Install ECS agent
echo "=== Installing ECS Agent ==="
yum install -y ecs-init
# Configure ECS cluster
echo "=== Configuring ECS Cluster ==="
cat > /etc/ecs/ecs.config << EOF
ECS_CLUSTER=${cluster_name}
ECS_ENABLE_LOGGING=true
ECS_LOGLEVEL=info
ECS_ENABLE_CONTAINER_METADATA=true
ECS_ENABLE_TASK_IAM_ROLE=true
ECS_AVAILABLE_LOGGING_DRIVERS=["json-file","awslogs"]
ECS_CONTAINER_STOP_TIMEOUT=30s
ECS_CONTAINER_START_TIMEOUT=3m
ECS_DISABLE_IMAGE_CLEANUP=false
EOF
# Start Docker and ECS
echo "=== Starting Docker and ECS services ==="
systemctl enable docker
systemctl start docker
systemctl enable ecs
systemctl start ecs
# Wait for services to be ready
echo "=== Waiting for services to initialize ==="
sleep 30
# Verify services
echo "=== Service Status Check ==="
echo "SSM Agent Status:"
systemctl status amazon-ssm-agent --no-pager || echo "SSM agent status check failed"
echo "Docker Status:"
systemctl status docker --no-pager || echo "Docker status check failed"
echo "ECS Status:"
systemctl status ecs --no-pager || echo "ECS status check failed"
# Check ECS agent connection
echo "=== ECS Agent Status ==="
for i in {1..5}; do
if curl -s http://localhost:51678/v1/metadata; then
echo "ECS agent is responding"
break
else
echo "ECS agent not ready yet, attempt $i/5"
sleep 10
fi
done
echo "=== User Data Script Completed ==="
echo "Timestamp: $(date)"