https://gasidaseo.notion.site/gasidaseo/CloudNet-Blog-c9dfa44a27ff431dafdd2edacc8a1863
파일 레이아웃으로 상태 파일 격리해보자.
사전 공부
RDS 등을 콘솔로 한번 만들어보자.
코드화 하려면 기본적으로 어떤 리소스를 만들어야 하는지 아는 게 필요하다.
1단계
dev
stag
prod
mgmt
global - s3 , iam 리소스 만들기
2단계
vpc
services
data-storage - rds
파일 이름
main-iam.tf
다른 폴더 파일 참조하기
terraform_remote_state
1
로드 밸런서 -------- 서버 2대 (오토스케일링)--------- rds
S3
2
s3 만들기
파일?
global - s3 - main.tf
버킷 이름에 - files를 더 붙임
1
백엔드 만들기
폴더 만들기
글로벌
S3 만들기
2
rds 만들자.
폴더 만들기
디비 서브넷 그룹을 만들고 rds 만들자.
vpc 만들기
rds 생성 - 디비 서브넷 그룹 만들고, 서브넷 3 ,4에 생기도록 한다. rds 만든다.
여기서는 외부의 환경 변수를 통해 디비 패스워드를 가져오도록 한다.
rds 생성 모니터링
apply
3
웹 만들기
폴더 만들기
RDS의 정보를 terraform_remote_state로 알 수 있다.
웹서버 배포 시 terraform_remote_state로 RDS 정보를 가져와 사용한다.
VPC ID 등은 RDS 쪽에서 만들걸 가져온다. terraform_remote_state 사용.
USER DATA는 파일로 만들어 가져온다.
오토 스케일링 그룹을 만든다.
>> MAIN.TF
// >> 표시는 내용 추가이다.
1
# 디렉터리 생성
mkdir -p global/s3 && cd global/s3
2
# S3 버킷 이름은 고유해야되어서 자신의 NICKNAME
NICKNAME=masterseo
3
# 코드 파일 생성
cat <<EOT > main.tf
provider "aws" {
region = "ap-northeast-2"
}
resource "aws_s3_bucket" "mys3bucket" {
bucket = "$NICKNAME-t101study-tfstate-week3-files"
}
# Enable versioning so you can see the full revision history of your state files
resource "aws_s3_bucket_versioning" "mys3bucket_versioning" {
bucket = aws_s3_bucket.mys3bucket.id
versioning_configuration {
status = "Enabled"
}
}
resource "aws_dynamodb_table" "mydynamodbtable" {
name = "terraform-locks-week3-files"
billing_mode = "PAY_PER_REQUEST"
hash_key = "LockID"
attribute {
name = "LockID"
type = "S"
}
}
EOT
4
cat <<EOT > outputs.tf
output "s3_bucket_arn" {
value = aws_s3_bucket.mys3bucket.arn
description = "The ARN of the S3 bucket"
}
output "dynamodb_table_name" {
value = aws_dynamodb_table.mydynamodbtable.name
description = "The name of the DynamoDB table"
}
EOT
5
# 배포
terraform init && terraform plan && terraform apply -auto-approve
6
# 배포 확인
terraform state list
[root@ip-172-31-61-209 s3]# terraform state list
aws_dynamodb_table.mydynamodbtable
aws_s3_bucket.mys3bucket
aws_s3_bucket_versioning.mys3bucket_versioning
7
aws s3 ls
masterseo-t101study-tfstate-week3-files
8
aws dynamodb list-tables --output text
TABLENAMES terraform-locks-week3-files
9
# 기존 작업 디렉터리로 이동
cd
1
cat <<EOT > main-vpcsg.tf
terraform {
backend "s3" {
bucket = "$NICKNAME-t101study-tfstate-week3-files"
key = "stage/data-stores/mysql/terraform.tfstate"
region = "ap-northeast-2"
dynamodb_table = "terraform-locks-week3-files"
}
}
provider "aws" {
region = "ap-northeast-2"
}
resource "aws_vpc" "myvpc" {
cidr_block = "10.10.0.0/16"
enable_dns_hostnames = true
tags = {
Name = "t101-study"
}
}
resource "aws_subnet" "mysubnet3" {
vpc_id = aws_vpc.myvpc.id
cidr_block = "10.10.3.0/24"
availability_zone = "ap-northeast-2a"
tags = {
Name = "t101-subnet3"
}
}
resource "aws_subnet" "mysubnet4" {
vpc_id = aws_vpc.myvpc.id
cidr_block = "10.10.4.0/24"
availability_zone = "ap-northeast-2c"
tags = {
Name = "t101-subnet4"
}
}
resource "aws_route_table" "myrt2" {
vpc_id = aws_vpc.myvpc.id
tags = {
Name = "t101-rt2"
}
}
resource "aws_route_table_association" "myrtassociation3" {
subnet_id = aws_subnet.mysubnet3.id
route_table_id = aws_route_table.myrt2.id
}
resource "aws_route_table_association" "myrtassociation4" {
subnet_id = aws_subnet.mysubnet4.id
route_table_id = aws_route_table.myrt2.id
}
resource "aws_security_group" "mysg2" {
vpc_id = aws_vpc.myvpc.id
name = "T101 SG - RDS"
description = "T101 Study SG - RDS"
}
resource "aws_security_group_rule" "rdssginbound" {
type = "ingress"
from_port = 0
to_port = 3389
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.mysg2.id
}
resource "aws_security_group_rule" "rdssgoutbound" {
type = "egress"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.mysg2.id
}
EOT
2
terraform init -force-copy && terraform plan && terraform apply -auto-approve
3
terraform state list
[root@ip-172-31-61-209 mysql]# terraform state list
aws_route_table.myrt2
aws_route_table_association.myrtassociation3
aws_route_table_association.myrtassociation4
aws_security_group.mysg2
aws_security_group_rule.rdssginbound
aws_security_group_rule.rdssgoutbound
aws_subnet.mysubnet3
aws_subnet.mysubnet4
aws_vpc.myvpc
4
rds
cat <<EOT > main.tf
resource "aws_db_subnet_group" "mydbsubnet" {
name = "mydbsubnetgroup"
subnet_ids = [aws_subnet.mysubnet3.id, aws_subnet.mysubnet4.id]
tags = {
Name = "My DB subnet group"
}
}
resource "aws_db_instance" "myrds" {
identifier_prefix = "t101"
engine = "mysql"
allocated_storage = 10
instance_class = "db.t2.micro"
db_subnet_group_name = aws_db_subnet_group.mydbsubnet.name
vpc_security_group_ids = [aws_security_group.mysg2.id]
skip_final_snapshot = true
db_name = var.db_name
username = var.db_username
password = var.db_password
}
EOT
5
output
cat <<EOT > outputs.tf
output "address" {
value = aws_db_instance.myrds.address
description = "Connect to the database at this endpoint"
}
output "port" {
value = aws_db_instance.myrds.port
description = "The port the database is listening on"
}
output "vpcid" {
value = aws_vpc.myvpc.id
description = "My VPC Id"
}
EOT
6
var
cat <<EOT > variables.tf
# ---------------------------------------------------------------------------------------------------------------------
# REQUIRED PARAMETERS
# You must provide a value for each of these parameters.
# ---------------------------------------------------------------------------------------------------------------------
variable "db_username" {
description = "The username for the database"
type = string
sensitive = true
}
variable "db_password" {
description = "The password for the database"
type = string
sensitive = true
}
# ---------------------------------------------------------------------------------------------------------------------
# OPTIONAL PARAMETERS
# These parameters have reasonable defaults.
# ---------------------------------------------------------------------------------------------------------------------
variable "db_name" {
description = "The name to use for the database"
type = string
default = "tstudydb"
}
EOT
7
변수
export TF_VAR_db_username='clouta'
export TF_VAR_db_password='clou!'
export | grep TF_VAR
rds 모니링
while true; do aws rds describe-db-instances --query "*[].[Endpoint.Address,Endpoint.Port,MasterUsername]" --output text ; echo "------------------------------" ; sleep 1; done
8
terraform plan && terraform apply -auto-approve
------------------------------
None None cloudneta
------------------------------
None None cloudneta
------------------------------
t10120221101055259067100000001.cn5kwtftpfwb.ap-northeast-2.rds.amazonaws.com 3306 cloudneta
------------------------------
t10120221101055259067100000001.cn5kwtftpfwb.ap-northeast-2.rds.amazonaws.com 3306 cloudneta
------------------------------
9
terraform output
root@ip-172-31-61-209 mysql]# terraform output
address = "t10120221101055259067100000001.cn5kwtftpfwb.ap-northeast-2.rds.amazonaws.com"
port = 3306
vpcid = "vpc-0cbefbbef6a85f8d8"
0
terraform_remote_state 데이터 소스 에서 데이터베이스 주소와 포트 정보를 가져와서 HTTP 응답에 정보를 노출하기
user_data = <<EOF
#!/bin/bash
echo "Hello, World" >> index.html
echo "${data.terraform_remote_state.db.outputs.address}" >> index.html
echo "${data.terraform_remote_state.db.outputs.port}" >> index.html
nohup busybox httpd -f -p ${var.server_port} &
EOF
1
cd
mkdir -p stage/services/webserver-cluster && cd stage/services/webserver-cluster
NICKNAME=masterseo
2
백엔드와 서브넷 설정
cat <<EOT > main.tf
terraform {
backend "s3" {
bucket = "$NICKNAME-t101study-tfstate-week3-files"
key = "stage/services/webserver-cluster/terraform.tfstate"
region = "ap-northeast-2"
dynamodb_table = "terraform-locks-week3-files"
}
}
provider "aws" {
region = "ap-northeast-2"
}
data "terraform_remote_state" "db" {
backend = "s3"
config = {
bucket = "$NICKNAME-t101study-tfstate-week3-files"
key = "stage/data-stores/mysql/terraform.tfstate"
region = "ap-northeast-2"
}
}
resource "aws_subnet" "mysubnet1" {
vpc_id = data.terraform_remote_state.db.outputs.vpcid
cidr_block = "10.10.1.0/24"
availability_zone = "ap-northeast-2a"
tags = {
Name = "t101-subnet1"
}
}
resource "aws_subnet" "mysubnet2" {
vpc_id = data.terraform_remote_state.db.outputs.vpcid
cidr_block = "10.10.2.0/24"
availability_zone = "ap-northeast-2c"
tags = {
Name = "t101-subnet2"
}
}
resource "aws_internet_gateway" "myigw" {
vpc_id = data.terraform_remote_state.db.outputs.vpcid
tags = {
Name = "t101-igw"
}
}
resource "aws_route_table" "myrt" {
vpc_id = data.terraform_remote_state.db.outputs.vpcid
tags = {
Name = "t101-rt"
}
}
resource "aws_route_table_association" "myrtassociation1" {
subnet_id = aws_subnet.mysubnet1.id
route_table_id = aws_route_table.myrt.id
}
resource "aws_route_table_association" "myrtassociation2" {
subnet_id = aws_subnet.mysubnet2.id
route_table_id = aws_route_table.myrt.id
}
resource "aws_route" "mydefaultroute" {
route_table_id = aws_route_table.myrt.id
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.myigw.id
}
resource "aws_security_group" "mysg" {
vpc_id = data.terraform_remote_state.db.outputs.vpcid
name = "T101 SG"
description = "T101 Study SG"
}
resource "aws_security_group_rule" "mysginbound" {
type = "ingress"
from_port = 8080
to_port = 8080
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.mysg.id
}
resource "aws_security_group_rule" "mysgoutbound" {
type = "egress"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.mysg.id
}
EOT
3
terraform init -force-copy && terraform plan && terraform apply -auto-approve
Apply complete! Resources: 10 added, 0 changed, 0 destroyed.
terraform state list
[root@ip-172-31-61-209 webserver-cluster]# terraform state list
data.terraform_remote_state.db
aws_internet_gateway.myigw
aws_route.mydefaultroute
aws_route_table.myrt
aws_route_table_association.myrtassociation1
aws_route_table_association.myrtassociation2
aws_security_group.mysg
aws_security_group_rule.mysginbound
aws_security_group_rule.mysgoutbound
aws_subnet.mysubnet1
aws_subnet.mysubnet2
4
userdata
cat <<EOT > user-data.sh
#!/bin/bash
wget https://busybox.net/downloads/binaries/1.31.0-defconfig-multiarch-musl/busybox-x86_64
mv busybox-x86_64 busybox
chmod +x busybox
cat > index.html <<EOF
<h1>T101 Study</h1>
<p>My RDS DB address: \${db_address}</p>
<p>My RDS DB port: \${db_port}</p>
EOF
nohup ./busybox httpd -f -p \${server_port} &
EOT
5
asg add
cat <<EOT >> main.tf
data "template_file" "user_data" {
template = file("user-data.sh")
vars = {
server_port = 8080
db_address = data.terraform_remote_state.db.outputs.address
db_port = data.terraform_remote_state.db.outputs.port
}
}
data "aws_ami" "my_amazonlinux2" {
most_recent = true
filter {
name = "owner-alias"
values = ["amazon"]
}
filter {
name = "name"
values = ["amzn2-ami-hvm-*-x86_64-ebs"]
}
owners = ["amazon"]
}
resource "aws_launch_configuration" "mylauchconfig" {
name_prefix = "t101-lauchconfig-"
image_id = data.aws_ami.my_amazonlinux2.id
instance_type = "t2.micro"
security_groups = [aws_security_group.mysg.id]
associate_public_ip_address = true
# Render the User Data script as a template
user_data = templatefile("user-data.sh", {
server_port = 8080
db_address = data.terraform_remote_state.db.outputs.address
db_port = data.terraform_remote_state.db.outputs.port
})
# Required when using a launch configuration with an auto scaling group.
lifecycle {
create_before_destroy = true
}
}
resource "aws_autoscaling_group" "myasg" {
name = "myasg"
launch_configuration = aws_launch_configuration.mylauchconfig.name
vpc_zone_identifier = [aws_subnet.mysubnet1.id, aws_subnet.mysubnet2.id]
min_size = 2
max_size = 10
tag {
key = "Name"
value = "terraform-asg"
propagate_at_launch = true
}
}
EOT
6
terraform init -upgrade
7
terraform plan && terraform apply -auto-approve
ec2 모니터링
------------------------------
terraform-asg 54.180.150.94 running
terraform-asg 3.35.169.26 running
------------------------------
terraform-asg 54.180.150.94 running
terraform-asg 3.35.169.26 running
8
alb 코드에 내용 추가
cat <<EOT >> main.tf
resource "aws_lb" "myalb" {
name = "t101-alb"
load_balancer_type = "application"
subnets = [aws_subnet.mysubnet1.id, aws_subnet.mysubnet2.id]
security_groups = [aws_security_group.mysg.id]
tags = {
Name = "t101-alb"
}
}
resource "aws_lb_listener" "myhttp" {
load_balancer_arn = aws_lb.myalb.arn
port = 8080
protocol = "HTTP"
# By default, return a simple 404 page
default_action {
type = "fixed-response"
fixed_response {
content_type = "text/plain"
message_body = "404: page not found - T101 Study"
status_code = 404
}
}
}
resource "aws_lb_target_group" "myalbtg" {
name = "t101-alb-tg"
port = 8080
protocol = "HTTP"
vpc_id = data.terraform_remote_state.db.outputs.vpcid
health_check {
path = "/"
protocol = "HTTP"
matcher = "200-299"
interval = 5
timeout = 3
healthy_threshold = 2
unhealthy_threshold = 2
}
}
resource "aws_lb_listener_rule" "myalbrule" {
listener_arn = aws_lb_listener.myhttp.arn
priority = 100
condition {
path_pattern {
values = ["*"]
}
}
action {
type = "forward"
target_group_arn = aws_lb_target_group.myalbtg.arn
}
}
output "myalb_dns" {
value = aws_lb.myalb.dns_name
description = "The DNS Address of the ALB"
}
EOT
9
# 코드 파일 편집
vi main.tf
---------------------
resource "aws_autoscaling_group" "myasg" {
health_check_type = "ELB"
target_group_arns = [aws_lb_target_group.myalbtg.arn]
...
10
terraform plan && terraform apply -auto-approve
11
모니터링
/root/stage/services/webserver-cluster
ALBDNS=$(terraform output -raw myalb_dns)
while true; do curl --connect-timeout 1 http://$ALBDNS:8080 ; echo; echo "------------------------------"; date; sleep 1; done
curl -s http://$ALBDNS:8080
------------------------------
Tue Nov 1 06:24:31 UTC 2022
<h1>T101 Study</h1>
<p>My RDS DB address: t10120221101055259067100000001.cn5kwtftpfwb.ap-northeast-2.rds.amazonaws.com</p>
<p>My RDS DB port: 3306</p>
------------------------------
Tue Nov 1 06:24:33 UTC 2022
<h1>T101 Study</h1>
<p>My RDS DB address: t10120221101055259067100000001.cn5kwtftpfwb.ap-northeast-2.rds.amazonaws.com</p>
<p>My RDS DB port: 3306</p>
------------------------------
Tue Nov 1 06:24:34 UTC 2022
12
삭제
# 각 폴더에서 리소스 삭제
stage/services/webserver-cluster$ terraform destroy -auto-approve
stage/data-stores/mysql$ terraform destroy -auto-approve
# S3 버킷에 객체 삭제
aws s3 rm s3://$NICKNAME-t101study-tfstate-week3-files --recursive
# S3 버킷에 버저닝 객체 삭제
aws s3api delete-objects \
--bucket $NICKNAME-t101study-tfstate-week3-files \
--delete "$(aws s3api list-object-versions \
--bucket "${NICKNAME}-t101study-tfstate-week3-files" \
--output=json \
--query='{Objects: Versions[].{Key:Key,VersionId:VersionId}}')"
# S3 버킷에 삭제마커 삭제
aws s3api delete-objects --bucket $NICKNAME-t101study-tfstate-week3-files \
--delete "$(aws s3api list-object-versions --bucket "${NICKNAME}-t101study-tfstate-week3-files" \
--query='{Objects: DeleteMarkers[].{Key:Key,VersionId:VersionId}}')"
# 백엔드 리소스 삭제
global/s3$ terraform destroy -auto-approve
# 관련 디렉터리 삭제
cd
Github 가입
스터디 전용 Public Repository 를 생성
코드를 깃에 올려주세요.
다음
https://brunch.co.kr/@topasvga/2797
https://brunch.co.kr/@topasvga/2421
감사합니다.