1
# 현재 VPC 수 확인하자. 콘솔 , 3개까지 생성 가능
# 없거나 최소 1개만 있어야 한다. 테라폼으로 2개 VPC는 더 만들어야 한다.
2
명령서버 네트워크 만들기
192.168.0.0/16
192.168.0.0/24
3
ncloud CLI
# ncloud CLI 다운로드
cd
wget https://www.ncloud.com/api/support/download/files/cli/CLI_1.1.25_20250717.zip
unzip CLI_1.1.25_20250717.zip
cd CLI_1.1.25_20250717
cd cli_linux/
cp ncloud /usr/bin
ncloud help
cd
또는
cd
wget https://www.ncloud.com/api/support/download/files/cli/CLI_1.1.19_20240321.zip
unzip CLI_1.1.19_20240321.zip
cd CLI_1.1.19_20240321/
cd cli_linux/
cp ncloud /usr/bin
ncloud help
최신 버전
https://cli.ncloud-docs.com/docs/guide-clichange
4
# 네이버 클라우드 리소스 만들수 있도록 권한 받기
# Access키, 시크릿키로 권한 받기
ncloud configure
권한 부여 확인
ncloud vserver getRegionList
5
테라폼 설치 ?
로키 리눅스의 경우 - 아마존 리눅스 버전 설치하면 동작 한다.
# 로키 리눅스 = Amazon Linux 영 사용
sudo yum install -y yum-utils shadow-utils
sudo yum-config-manager --add-repo https://rpm.releases.hashicorp.com/AmazonLinux/hashicorp.repo
sudo yum -y install terraform
terraform -version
6
# 쿠버네티스 API에 명령을 내리는 kubectl 명령어 설치
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
kubectl version
7
# TF 소스 받기
# 네트워크, 서버
# 쿠버네티스 클러스터
cd
wget https://github.com/NaverCloudPlatform/terraform-provider-ncloud/archive/refs/heads/master.zip
unzip master.zip
cd /root/terraform-provider-ncloud-main/examples/vpc/scenario01
cd /root/terraform-provider-ncloud-main/examples/nks
8
네이버 쿠버네티스 서비스의 권한을 받기 위한 툴 설치
# ncp-iam-authenticator 설치
curl -o ncp-iam-authenticator -L https://github.com/NaverCloudPlatform/ncp-iam-authenticator/releases/latest/download/ncp-iam-authenticator_linux_amd64
chmod +x ./ncp-iam-authenticator
mkdir -p $HOME/bin && cp ./ncp-iam-authenticator $HOME/bin/ncp-iam-authenticator &&
export PATH=$PATH:$HOME/bin
echo 'export PATH=$PATH:$HOME/bin' >> ~/.bash_profile
ncp-iam-authenticator help
# 메뉴얼
https://guide.ncloud-docs.com/docs/k8s-iam-auth-ncp-iam-authenticator
9
# 인증 = 쿠버네티스 생성후에 가능하다. = UUID 확인 필요.
cd
ncp-iam-authenticator create-kubeconfig --region KR --clusterUuid a2143bde-4e6f-44d8-bfaf-075ede2a452f --output kubeconfig.yaml
10
테라폼으로 네트워크 생성해보자.
쿠버네티스 생성해 보자
아래 3과정을 거친다.
terraform init
terraform plan
terraform apply -auto-approve
terraform destroy --auto-approve
1
다운로드
cd
wget https://github.com/NaverCloudPlatform/terraform-provider-ncloud/archive/refs/heads/master.zip
unzip master.zip
main.tf variables.tf versions.tf
파일 3개
메인, 변수, 버전
2
포털 > 마이페이지 > 계정 관리 > 인증키 관리에서 키 확인에서
access_key와 secret_key를 확인해 복사해 놓는다.
3
테라폼 파일 내용 확인
첫번째 파일 ~~~~~~~~
vi main.tf
dd로 삭제
: wq!
4
mkdir backjup
cp * backjup/
5
[root@quick1 scenario01]# vi main.tf
# VPC > User scenario > Scenario 1. Single Public Subnet
# https://docs.ncloud.com/ko/networking/vpc/vpc_userscenario1.html
provider "ncloud" {
support_vpc = true
region = "KR"
access_key = var.access_key
secret_key = var.secret_key
}
resource "ncloud_login_key" "key_scn_01" {
key_name = var.name_scn01
}
resource "ncloud_vpc" "vpc_scn_01" {
name = var.name_scn01
ipv4_cidr_block = "10.0.0.0/16"
}
resource "ncloud_subnet" "subnet_scn_01" {
name = var.name_scn01
vpc_no = ncloud_vpc.vpc_scn_01.id
subnet = cidrsubnet(ncloud_vpc.vpc_scn_01.ipv4_cidr_block, 8, 1)
// 10.0.1.0/24
zone = "KR-2"
network_acl_no = ncloud_vpc.vpc_scn_01.default_network_acl_no
subnet_type = "PUBLIC"
// PUBLIC(Public) | PRIVATE(Private)
}
resource "ncloud_server" "server_scn_01" {
subnet_no = ncloud_subnet.subnet_scn_01.id
name = var.name_scn01
server_image_product_code = "SW.VSVR.OS.LNX64.CNTOS.0703.B050"
login_key_name = ncloud_login_key.key_scn_01.key_name
}
resource "ncloud_public_ip" "public_ip_scn_01" {
server_instance_no = ncloud_server.server_scn_01.id
description = "for ${var.name_scn01}"
}
locals {
scn01_inbound = [
[1, "TCP", "0.0.0.0/0", "80", "ALLOW"],
[2, "TCP", "0.0.0.0/0", "443", "ALLOW"],
[3, "TCP", "${var.client_ip}/32", "22", "ALLOW"],
[4, "TCP", "${var.client_ip}/32", "3389", "ALLOW"],
[5, "TCP", "0.0.0.0/0", "32768-65535", "ALLOW"],
[197, "TCP", "0.0.0.0/0", "1-65535", "DROP"],
[198, "UDP", "0.0.0.0/0", "1-65535", "DROP"],
[199, "ICMP", "0.0.0.0/0", null, "DROP"],
]
scn01_outbound = [
[1, "TCP", "0.0.0.0/0", "80", "ALLOW"],
[2, "TCP", "0.0.0.0/0", "443", "ALLOW"],
[3, "TCP", "${var.client_ip}/32", "1000-65535", "ALLOW"],
[197, "TCP", "0.0.0.0/0", "1-65535", "DROP"],
[198, "UDP", "0.0.0.0/0", "1-65535", "DROP"],
[199, "ICMP", "0.0.0.0/0", null, "DROP"]
]
}
resource "ncloud_network_acl_rule" "network_acl_01_rule" {
network_acl_no = ncloud_vpc.vpc_scn_01.default_network_acl_no
dynamic "inbound" {
for_each = local.scn01_inbound
content {
priority = inbound.value[0]
protocol = inbound.value[1]
ip_block = inbound.value[2]
port_range = inbound.value[3]
rule_action = inbound.value[4]
description = "for ${var.name_scn01}"
}
}
dynamic "outbound" {
for_each = local.scn01_outbound
content {
priority = outbound.value[0]
protocol = outbound.value[1]
ip_block = outbound.value[2]
port_range = outbound.value[3]
rule_action = outbound.value[4]
description = "for ${var.name_scn01}"
}
}
}
6
# 변수 파일에 값을 넣어야 한다!!!!
curl ifconfig.me
[root@quick1 scenario01]# curl ifconfig.me
211.188.49.233[root@quick1 scenario01]#
211.188.49.233
vi variables.tf
variable name_scn01 {
default = "tf-scn01"
}
variable client_ip {
default = "211.188.49.233"
}
variable access_key {
default = "YOUR_ACCESS_KEY"
}
variable secret_key {
default = "YOUR_SECRET_KEY"
}
7
실행
terraform init
terraform plan
terraform apply -auto-approve
terraform destroy --auto-approve
8
콘솔에서 생성 확인!!
2
# 삭제
terraform destroy --auto-approve
1
cd /root/terraform-provider-ncloud-main/examples/vpc/scenario02
2
[root@quick1 scenario02]# more main.tf
# VPC > User scenario > Scenario 2. Public and Private Subnet
# https://docs.ncloud.com/ko/networking/vpc/vpc_userscenario2.html
provider "ncloud" {
support_vpc = true
region = "KR"
access_key = var.access_key
secret_key = var.secret_key
}
// resource "ncloud_login_key" "key_scn_02" {
// key_name = var.name_scn02
// }
login_key_name = "ncp-web-2025-0807"
# VPC
resource "ncloud_vpc" "vpc_scn_02" {
name = var.name_scn02
ipv4_cidr_block = "10.0.0.0/16"
}
# Subnet
resource "ncloud_subnet" "subnet_scn_02_public" {
name = "${var.name_scn02}-public"
vpc_no = ncloud_vpc.vpc_scn_02.id
subnet = cidrsubnet(ncloud_vpc.vpc_scn_02.ipv4_cidr_block, 8, 0)
// "10.0.0.0/24"
zone = "KR-2"
network_acl_no = ncloud_network_acl.network_acl_02_public.id
subnet_type = "PUBLIC"
// PUBLIC(Public)
}
resource "ncloud_subnet" "subnet_scn_02_private" {
name = "${var.name_scn02}-private"
vpc_no = ncloud_vpc.vpc_scn_02.id
subnet = cidrsubnet(ncloud_vpc.vpc_scn_02.ipv4_cidr_block, 8, 1)
// "10.0.1.0/24"
zone = "KR-2"
network_acl_no = ncloud_network_acl.network_acl_02_private.id
subnet_type = "PRIVATE"
// PRIVATE(Private)
}
resource "ncloud_subnet" "subnet_scn_02_public_natgw" {
vpc_no = ncloud_vpc.vpc_scn_02.id
subnet = cidrsubnet(ncloud_vpc.vpc_scn_02.ipv4_cidr_block, 8, 2)
// "10.0.2.0/24"
zone = "KR-2"
network_acl_no = ncloud_network_acl.network_acl_02_public.id
subnet_type = "PUBLIC"
usage_type = "NATGW"
}
# Network ACL
resource "ncloud_network_acl" "network_acl_02_public" {
vpc_no = ncloud_vpc.vpc_scn_02.id
name = "${var.name_scn02}-public"
}
resource "ncloud_network_acl" "network_acl_02_private" {
vpc_no = ncloud_vpc.vpc_scn_02.id
name = "${var.name_scn02}-private"
}
# Server
resource "ncloud_server" "server_scn_02_public" {
subnet_no = ncloud_subnet.subnet_scn_02_public.id
name = "${var.name_scn02}-public"
server_image_product_code = "SW.VSVR.OS.LNX64.CNTOS.0703.B050"
login_key_name = ncloud_login_key.key_scn_02.key_name
//server_product_code = "SVR.VSVR.STAND.C002.M008.NET.SSD.B050.G002"
}
resource "ncloud_server" "server_scn_02_private" {
subnet_no = ncloud_subnet.subnet_scn_02_private.id
name = "${var.name_scn02}-private"
server_image_product_code = "SW.VSVR.OS.LNX64.CNTOS.0703.B050"
login_key_name = ncloud_login_key.key_scn_02.key_name
//server_product_code = "SVR.VSVR.STAND.C002.M008.NET.SSD.B050.G002"
}
# Public IP
resource "ncloud_public_ip" "public_ip_scn_02" {
server_instance_no = ncloud_server.server_scn_02_public.id
description = "for ${var.name_scn02}"
}
# NAT Gateway
resource "ncloud_nat_gateway" "nat_gateway_scn_02" {
vpc_no = ncloud_vpc.vpc_scn_02.id
subnet_no = ncloud_subnet.subnet_scn_02_public_natgw.id
zone = "KR-2"
name = var.name_scn02
}
# Route Table
resource "ncloud_route" "route_scn_02_nat" {
route_table_no = ncloud_vpc.vpc_scn_02.default_private_route_table_no
destination_cidr_block = "0.0.0.0/0"
target_type = "NATGW"
// NATGW (NAT Gateway) | VPCPEERING (VPC Peering) | VGW (Virtual Private Gateway).
target_name = ncloud_nat_gateway.nat_gateway_scn_02.name
target_no = ncloud_nat_gateway.nat_gateway_scn_02.id
}
data "ncloud_root_password" "scn_02_root_password" {
server_instance_no = ncloud_server.server_scn_02_public.id
private_key = ncloud_login_key.key_scn_02.private_key
}
resource "null_resource" "ls-al" {
connection {
type = "ssh"
host = ncloud_public_ip.public_ip_scn_02.public_ip
user = "root"
port = "22"
password = data.ncloud_root_password.scn_02_root_password.root_password
}
provisioner "remote-exec" {
inline = [
"ls -al",
]
}
depends_on = [
ncloud_public_ip.public_ip_scn_02,
ncloud_server.server_scn_02_public
]
}
# You can add ACG rules remove comment If you want
/*
locals {
default_acg_rules_inbound = [
["TCP", "0.0.0.0/0", "80"],
["TCP", "0.0.0.0/0", "443"],
["TCP", "${var.client_ip}/32", "22"],
["TCP", "${var.client_ip}/32", "3389"],
]
default_acg_rules_outbound = [
["TCP", "0.0.0.0/0", "1-65535"],
["UDP", "0.0.0.0/0", "1-65534"],
["ICMP", "0.0.0.0/0", null]
]
}
resource "ncloud_access_control_group" "acg_scn_02" {
description = "for acc test"
vpc_no = ncloud_vpc.vpc_scn_02.id
}
resource "ncloud_access_control_group_rule" "acg_rule_scn_02" {
access_control_group_no = ncloud_access_control_group.acg_scn_02.id
dynamic "inbound" {
for_each = local.default_acg_rules_inbound
content {
protocol = inbound.value[0]
ip_block = inbound.value[1]
port_range = inbound.value[2]
}
}
dynamic "outbound" {
for_each = local.default_acg_rules_outbound
content {
protocol = outbound.value[0]
ip_block = outbound.value[1]
port_range = outbound.value[2]
}
}
}
*/
[root@quick1 scenario02]#
3
╵
╷
│ Error: Unsupported argument
│
│ on main.tf line 15:
│ 15: login_key_name = "ncp-web-2025-0807"
│
│ An argument named "login_key_name" is not expected here.
╵
╷
│ Error: Unsupported argument
│
│ on main.tf line 15:
│ 15: login_key_name = "ncp-web-2025-0807"
│
│ An argument named "login_key_name" is not expected here.
4
│
│ on main.tf line 20, in resource "ncloud_vpc" "vpc_scn_02":
│ 20: name = var.name_scn02
│
│ An input variable with the name "name_scn02" has not been declared. Did you mean "name_scn01"?
╵
5
│
│ on main.tf line 74, in resource "ncloud_server" "server_scn_02_public":
│ 74: login_key_name = ncloud_login_key.key_scn_02.key_name
│
│ A managed resource "ncloud_login_key" "key_scn_02" has not been declared in the root module.
╵
╷
│ Error: Reference to undeclared resource
│
│ on main.tf line 82, in resource "ncloud_server" "server_scn_02_private":
│ 82: login_key_name = ncloud_login_key.key_scn_02.key_name
│
│ A managed resource "ncloud_login_key" "key_scn_02" has not been declared in the root module.
╵
Mian.tf 에서 로그인 키 생성은 삭제한다.
기존 존재하는 인증키 키로 사용한다.
콘솔에서 서버 클릭하여 인증키 이름 확인
ncp-web-2025-0807
변경전
# resource "ncloud_login_key" "loginkey" {
# key_name = var.login_key
# }
변경전
login_key_name = ncloud_login_key.loginkey.key_name
변경후
인증키 이름
login_key_name = “ncp-web-2025-0807”
cd /root/terraform-provider-ncloud-main/examples/vpc/scenario01
terraform init
terraform plan
terraform apply -auto-approve
terraform destroy --auto-approve
https://brunch.co.kr/@topasvga/3595
감사합니다.