brunch
매거진 테라폼 AWS

19탄-28.테라폼-aws-멀티 프로바이더

by Master Seo

다음은 주말 CloudNet 테라폼 스터디 내용 참고하여 정리한 부분입니다.

https://gasidaseo.notion.site/gasidaseo/CloudNet-Blog-c9dfa44a27ff431dafdd2edacc8a1863



<1> Provider 이론

<2> Provider 실습

<3> 2개의 리전에 RDS 리소스 배포하기 준비 - 백엔드 리소스 배포

<4> 2개의 리전에 RDS리소스 배포하기 - rds 배포하기



<1> Provider 이론


1

provicer 설정


provider "aws" {

region = "us-east-2"

}



2

provider 상세 설정


terraform {

required_providers {

<LOCAL_NAME> = {

source = "<URL>"

version = "<VERSION>"

}

}

}



terraform {

required_providers {

aws = {

source = "hashicorp/aws"

version = "~> 4.0"

}

}

}



registry.terraform.io/hashicorp/aws

= hashicorp/aws



3

2개 리전 배포시


provider "aws" {

region = "us-east-2"

}


provider "aws" {

region = "us-west-1"

}


리전간 리소스 구분법은 alias 사용

provider "aws" {

region = "us-east-2"

alias = "region_1"

}


provider "aws" {

region = "us-west-1"

alias = "region_2"

}


data "aws_region" "region_1" {

provider = aws.region_1

}


data "aws_region" "region_2" {

provider = aws.region_2

}


output "region_1" {

value = data.aws_region.region_1.name

description = "The name of the first region"

}


output "region_2" {

value = data.aws_region.region_2.name

description = "The name of the second region"

}





<2> Provider 실습


1

실습 하기


소스 다운로드

git clone https://github.com/brikis98/terraform-up-and-running-code.git

cd terraform-up-and-running-code/code/terraform

tree


참고

실습은 테라폼 버전 1.2.3 사용하는것으로 한다.

https://brunch.co.kr/@topasvga/2844



2

# 이동

cd ~/terraform-up-and-running-code/code/terraform/07-working-with-multiple-providers



3

# 테라폼 코드 파일 생성


cat <<EOT > multple-region.tf

terraform {

required_version = ">= 1.0.0, < 2.0.0"


required_providers {

aws = {

source = "hashicorp/aws"

version = "~> 4.0"

}

}

}


provider "aws" {

region = "us-east-2"

alias = "region_1"

}


provider "aws" {

region = "us-west-1"

alias = "region_2"

}


data "aws_region" "region_1" {

provider = aws.region_1

}


data "aws_region" "region_2" {

provider = aws.region_2

}


output "region_1" {

value = data.aws_region.region_1.name

description = "The name of the first region"

}


output "region_2" {

value = data.aws_region.region_2.name

description = "The name of the second region"

}

EOT



4

# init & plan & apply


terraform init

terraform plan & terraform apply -auto-approve

Outputs:

region_1 = "us-east-2"

region_2 = "us-west-1"



5

2개 리전에 ec2 생성

리전별 ami가 다르므로 필터를 활용한다.


data "aws_ami" "ubuntu_region_1" {

provider = aws.region_1


most_recent = true

owners = ["099720109477"] # Canonical


filter {

name = "name"

values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"]

}

}


data "aws_ami" "ubuntu_region_2" {

provider = aws.region_2


most_recent = true

owners = ["099720109477"] # Canonical


filter {

name = "name"

values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"]

}



6

ami는

resource "aws_instance" "region_1" {

provider = aws.region_1


ami = data.aws_ami.ubuntu_region_1.id

instance_type = "t2.micro"

}


resource "aws_instance" "region_2" {

provider = aws.region_2


ami = data.aws_ami.ubuntu_region_2.id

instance_type = "t2.micro"

}


7

각 리전에 ec2 생성 실습하기



# [터미널1] us-east-2

while true; do aws ec2 describe-instances --region us-east-2 --query "Reservations[*].Instances[*].{PublicIPAdd:PublicIpAddress,InstanceName:Tags[?Key=='Name']|[0].Value,Status:State.Name}" --filters Name=instance-state-name,Values=running --output text ; echo "------------------------------" ; sleep 1; done


# [터미널2] us-west-1

while true; do aws ec2 describe-instances --region us-west-1 --query "Reservations[*].Instances[*].{PublicIPAdd:PublicIpAddress,InstanceName:Tags[?Key=='Name']|[0].Value,Status:State.Name}" --filters Name=instance-state-name,Values=running --output text ; echo "------------------------------" ; sleep 1; done


# 이동

cd ~/terraform-up-and-running-code/code/terraform/07-working-with-multiple-providers/examples/multi-region

cat main.tf outputs.tf




[ec2-user@ip-172-31-61-209 s3]$ cat main.tf

terraform {

required_version = ">= 1.0.0, < 2.0.0"

required_providers {

aws = {

source = "hashicorp/aws"

version = "~> 4.0"

}

}

}

provider "aws" {

region = "us-east-2"

}

resource "aws_s3_bucket" "terraform_state" {

bucket = var.bucket_name

// This is only here so we can destroy the bucket as part of automated tests. You should not copy this for production

// usage

force_destroy = true

}

# Enable versioning so you can see the full revision history of your

# state files

resource "aws_s3_bucket_versioning" "enabled" {

bucket = aws_s3_bucket.terraform_state.id

versioning_configuration {

status = "Enabled"

}

}

# Enable server-side encryption by default

resource "aws_s3_bucket_server_side_encryption_configuration" "default" {

bucket = aws_s3_bucket.terraform_state.id

rule {

apply_server_side_encryption_by_default {

sse_algorithm = "AES256"

}

}

}

# Explicitly block all public access to the S3 bucket

resource "aws_s3_bucket_public_access_block" "public_access" {

bucket = aws_s3_bucket.terraform_state.id

block_public_acls = true

block_public_policy = true

ignore_public_acls = true

restrict_public_buckets = true

}

resource "aws_dynamodb_table" "terraform_locks" {

name = var.table_name

billing_mode = "PAY_PER_REQUEST"

hash_key = "LockID"

attribute {

name = "LockID"

type = "S"

}

}




[ec2-user@ip-172-31-61-209 s3]$ cat variables.tf

variable "bucket_name" {

description = "The name of the S3 bucket. Must be globally unique."

type = string

}

variable "table_name" {

description = "The name of the DynamoDB table. Must be unique in this AWS account."

type = string

}[ec2-user@ip-172-31-61-209 s3]$






# init & plan & apply

terraform init

terraform plan && terraform apply -auto-approve


Outputs:

instance_region_1_az = "us-east-2c"

instance_region_2_az = "us-west-1c"

region_1 = "us-east-2"

region_2 = "us-west-1"



------------------------------

None 54.177.224.99 running

------------------------------

None 54.177.224.99 running



None 18.223.235.39 running

------------------------------

None 18.223.235.39 running




# 아래 RDS 배포 후 데이터 INSERT 등 실습 후 삭제해도 됨

terraform destroy -auto-approve






<3> 2개의 리전에 RDS 리소스 배포하기 준비 - 백엔드 리소스 배포


1

환경변수

# 환경변수에 지정

export TF_VAR_bucket_name=masterseo-t101-tfstate

export TF_VAR_table_name=masterseo-t101-locks


# 환경변수 확인

export | grep TF_VAR_



# 이동

cd ~/terraform-up-and-running-code/code/terraform/03-terraform-state/file-layout-example/global/s3

cat main.tf variables.tf




[ec2-user@ip-172-31-61-209 s3]$ cat main.tf

terraform {

required_version = ">= 1.0.0, < 2.0.0"

required_providers {

aws = {

source = "hashicorp/aws"

version = "~> 4.0"

}

}

}

provider "aws" {

region = "us-east-2"

}

resource "aws_s3_bucket" "terraform_state" {

bucket = var.bucket_name

// This is only here so we can destroy the bucket as part of automated tests. You should not copy this for production

// usage

force_destroy = true

}

# Enable versioning so you can see the full revision history of your

# state files

resource "aws_s3_bucket_versioning" "enabled" {

bucket = aws_s3_bucket.terraform_state.id

versioning_configuration {

status = "Enabled"

}

}

# Enable server-side encryption by default

resource "aws_s3_bucket_server_side_encryption_configuration" "default" {

bucket = aws_s3_bucket.terraform_state.id

rule {

apply_server_side_encryption_by_default {

sse_algorithm = "AES256"

}

}

}

# Explicitly block all public access to the S3 bucket

resource "aws_s3_bucket_public_access_block" "public_access" {

bucket = aws_s3_bucket.terraform_state.id

block_public_acls = true

block_public_policy = true

ignore_public_acls = true

restrict_public_buckets = true

}

resource "aws_dynamodb_table" "terraform_locks" {

name = var.table_name

billing_mode = "PAY_PER_REQUEST"

hash_key = "LockID"

attribute {

name = "LockID"

type = "S"

}

}




[ec2-user@ip-172-31-61-209 s3]$ cat variables.tf

variable "bucket_name" {

description = "The name of the S3 bucket. Must be globally unique."

type = string

}

variable "table_name" {

description = "The name of the DynamoDB table. Must be unique in this AWS account."

type = string

}

[ec2-user@ip-172-31-61-209 s3]$





# 초기화 및 검증 및 배포 : 환경변수 적용 확인

terraform init

terraform plan && terraform apply -auto-approve


# 확인

aws s3 ls

[ec2-user@ip-172-31-61-209 s3]$ aws s3 ls

2022-12-12 23:05:39 masterseo-t101-tfstate


aws dynamodb list-tables --output text

[ec2-user@ip-172-31-61-209 s3]$ aws dynamodb list-tables --output text

TABLENAMES masterseo-t101-locks


dynamodb.png





<4> 2개의 리전에 RDS리소스 배포하기 - rds 배포하기



primary rds 배포

다른 리전에 리드 리플리카 배포하기



리드 리플리카는 primary rds 접속 정보를 알아야 한다.

따라서 백엔드를 먼저 만든다.


1

백엔드 만들기

s3 와 dynamodb 를 만든다.



2

rds 만드는데 20분 걸린다.


rds 모니터링1

rds 모니터링2



콘솔로 rds 생성 확인



3

# [터미널1] us-east-2

while true; do aws rds describe-db-instances --region us-east-2 --query "*[].[Endpoint.Address,Endpoint.Port,MasterUsername]" --output text ; echo "------------------------------" ; sleep 1; done


# [터미널2] us-west-1

while true; do aws rds describe-db-instances --region us-west-1 --query "*[].[Endpoint.Address,Endpoint.Port,MasterUsername]" --output text ; echo "------------------------------" ; sleep 1; done


# 환경변수에 지정

export TF_VAR_db_username='cloudneta'

export TF_VAR_db_password='cloudnetaQ!'


# 이동

cd ~/terraform-up-and-running-code/code/terraform/07-working-with-multiple-providers/live/prod/data-stores/mysql


# main.tf 에 백엔드 부분 수정

vi main.tf


backend "s3" {

# This backend configuration is filled in automatically at test time by Terratest. If you wish to run this example

# manually, uncomment and fill in the config below.


bucket = "masterseo-t101-tfstate"

key = "prod/data-stores/mysql/terraform.tfstate"

region = "us-east-2"

dynamodb_table = "masterseo-t101-locks"

# encrypt = true

}

}



# 초기화 및 검증 : 환경변수 적용 확인

terraform init

terraform plan


# 배포 : 총 25분 정도 소요 (8분 후 primary 완료, 이후 16분 후 replica 생성 및 동기화 완료)

terraform apply -auto-approve



터미널1

10분 후

terraform-up-and-running2xxcdywcyg.us-east-2.rds.amazonaws.com 3306 cloudneta

------------------------------

terraform-up-and-running202ywcyg.us-east-2.rds.amazonaws.com 3306 cloudneta

------------------------------



터미널2

10분후

None None cloudneta

------------------------------

None None cloudneta

------------------------------

None None cloudneta

------------------------------


20분 후

------------------------------

terraform-up-and-running20221212235630237200000001.czsgi0qnuznr.us-west-1.rds.amazonaws.com 3306 cloudneta

------------------------------

terraform-up-and-running20221212235630237200000001.czsgi0qnuznr.us-west-1.rds.amazonaws.com 3306 cloudneta

------------------------------





콘솔에서 확인

오하이오 us-east-2



20분 후

캘리포니아 us-west-1


terraform output

aws s3 ls s3://$TF_VAR_bucket_name --recursive --human-readable --summarize



[ec2-user@ip-172-31-61-209 mysql]$ cat main.tf

terraform {

required_version = ">= 1.0.0, < 2.0.0"

required_providers {

aws = {

source = "hashicorp/aws"

version = "~> 4.0"

}

}

backend "s3" {

# This backend configuration is filled in automatically at test time by Terratest. If you wish to run this example

# manually, uncomment and fill in the config below.

bucket = "masterseo-t101-tfstate"

key = "prod/data-stores/mysql/terraform.tfstate"

region = "us-east-2"

dynamodb_table = "masterseo-t101-locks"

# encrypt = true

}

}

provider "aws" {

region = "us-east-2"

alias = "primary"

}

provider "aws" {

region = "us-west-1"

alias = "replica"

}

module "mysql_primary" {

source = "../../../../modules/data-stores/mysql"

providers = {

aws = aws.primary

}

db_name = var.db_name

db_username = var.db_username

db_password = var.db_password

# Must be enabled to support replication

backup_retention_period = 1

}

module "mysql_replica" {

source = "../../../../modules/data-stores/mysql"

providers = {

aws = aws.replica

}

# Make this a replica of the primary

replicate_source_db = module.mysql_primary.arn

}

[ec2-user@ip-172-31-61-209 mysql]$




.

[ec2-user@ip-172-31-61-209 mysql]$ cat variables.tf

# ---------------------------------------------------------------------------------------------------------------------

# REQUIRED PARAMETERS

# You must provide a value for each of these parameters.

# ---------------------------------------------------------------------------------------------------------------------

variable "db_username" {

description = "The username for the database"

type = string

sensitive = true

}

variable "db_password" {

description = "The password for the database"

type = string

sensitive = true

}

# ---------------------------------------------------------------------------------------------------------------------

# OPTIONAL PARAMETERS

# These parameters have reasonable defaults.

# ---------------------------------------------------------------------------------------------------------------------

variable "db_name" {

description = "The name to use for the database"

type = string

default = "example_database_prod"

}

[ec2-user@ip-172-31-61-209 mysql]$








[ec2-user@ip-172-31-61-209 mysql]$ more outputs.tf

output "primary_address" {

value = module.mysql_primary.address

description = "Connect to the primary database at this endpoint"

}

output "primary_port" {

value = module.mysql_primary.port

description = "The port the primary database is listening on"

}

output "primary_arn" {

value = module.mysql_primary.arn

description = "The ARN of the primary database"

}

output "replica_address" {

value = module.mysql_replica.address

description = "Connect to the replica database at this endpoint"

}

output "replica_port" {

value = module.mysql_replica.port

description = "The port the replica database is listening on"

}

output "replica_arn" {

value = module.mysql_replica.arn

description = "The ARN of the replica database"

}

[ec2-user@ip-172-31-61-209 mysql]$




code

https://github.com/brikis98/terraform-up-and-running-code/blob/3rd-edition/code/terraform/07-working-with-multiple-providers/live/prod/data-stores/mysql/main.tf



# (옵션) Primary RDS에 데이터 INSERT 후 Replica RDS에 복제되는지 확인 해보세요



# 삭제 6분 정도 소요

terraform destroy -auto-approve


# S3/DynamoDB 삭제

cd ~/terraform-up-and-running-code/code/terraform/03-terraform-state/file-layout-example/global/s3

terraform destroy -auto-approve




주의

실무에서는 고려할 사항이 많다.

alias 많은 사용은 권장하지 않는다. 하나의 리전 장애시 코드가 동작하지 않는다. 격리해야 한다.






다음

https://brunch.co.kr/@topasvga/2857






https://brunch.co.kr/@topasvga/2421

terraform.png xx


감사합니다.

매거진의 이전글19탄-27. 테라폼-AWS- 프로덕션 수준의 테라폼