gcs/ This example sets up a GCS backend with a minimal example of a state stored in it.
It:
Creates an GCS bucket with a random name (‘changeme-xxxxxxxxxxxxx’)
Sets up an GCP VPC, storing state in that backend
These are the files used:
destroy.sh
- Shell script to clean up any previous run of run.sh
run.sh
- Run this whole example up, creating the bucket, backend, and GCP VPC
google_storage_bucket/main.tf
- Terraform code to set up a bucket
google_storage_bucket/run.sh
- Script to create just the bucket
google_storage_bucket/destroy.sh
- Script to destroy just the bucket
google_compute_network/main_template
- Template file for Terraform code for GCP VPC
destroy.sh #!/bin/bash
set - o errexit
set - o pipefail
set - o nounset
# Move to the folder this script is in.
cd "${0%/*}"
# shellcheck disable=SC1091
source ../../ bin / shared . sh
log "Cleaning up google_compute_network"
cd google_compute_network
./ destroy . sh 2 >/ dev / null || true
rm - f main . tf
rm - rf . terraform terraform *
log "Cleaning up google_storage_bucket"
cd -
cd google_storage_bucket
./ destroy . sh
cd -
destroy.sh #!/bin/bash
../../../ bin / destroy . sh google
main_template # Summary: Uses the 'count' feature to create multiple EC2 instances.
# Documentation: https://www.terraform.io/docs/language/settings/index.html
terraform {
required_version = ">= 1.0.0"
required_providers {
google = {
source = "hashicorp/google"
version = "~> 3.0"
}
}
# Documentation: https://www.terraform.io/docs/language/settings/backends/gcs.html
backend "gcs" {
bucket = $ { BUCKET_NAME }
prefix = "terraform/state"
}
}
# Documentation: https://www.terraform.io/docs/language/values/variables.html
variable "project_id" {
type = string
}
# Documentation: https://www.terraform.io/docs/language/providers/requirements.html
provider "google" {
project = var . project_id
region = "us-central1"
zone = "us-central1-c"
}
# Documentation: https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_network
resource "google_compute_network" "changeme_backends_gcs_vpc" {
name = "changeme-backends-gcs-vpc"
auto_create_subnetworks = "false"
}
run.sh #!/bin/bash
../../../ bin / apply . sh google
destroy.sh #!/bin/bash
../../../ bin / destroy . sh google
main.tf # Summary: Creates a persistent disk
# Documentation: https://www.terraform.io/docs/language/settings/index.html
terraform {
required_version = ">= 1.0.0"
required_providers {
google = {
source = "hashicorp/google"
version = "~> 3.0"
}
}
}
# Documentation: https://www.terraform.io/docs/language/values/variables.html
variable "project_id" {
type = string
}
# Documentation: https://www.terraform.io/docs/language/providers/requirements.html
provider "google" {
project = var . project_id
region = "us-central1"
zone = "us-central1-a"
}
# Explanation: This resource is not necessary for the creation of an GCS bucket, but is here to ensure that
# the GCS bucket name is unique.
#
# Documentation: https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/id
resource "random_id" "changeme_gcs_google_storage_bucket_name" {
byte_length = 16
}
# GCS (Google cloud storage service) Bucket
# Documentation: https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/storage_bucket
resource "google_storage_bucket" "changeme_simple_bucket" {
name = "changeme-${random_id.changeme_gcs_google_storage_bucket_name.hex}"
location = "us-central1"
force_destroy = true
storage_class = "REGIONAL"
versioning {
enabled = true
}
}
run.sh #!/bin/bash
../../../ bin / apply . sh google
run.sh #!/bin/bash
set - o errexit
set - o pipefail
set - o nounset
# Move to the folder this script is in.
cd "${0%/*}"
# shellcheck disable=SC1091
source ../../ bin / shared . sh
log "Set up bucket..."
cd google_storage_bucket
./ run . sh
BUCKET_NAME = "$(terraform show -no-color | grep -w name | awk '{print $NF}')"
export BUCKET_NAME
log "Bucket name is: ${BUCKET_NAME}"
cd -
cd google_compute_network
log "Setting up Terraform code from main_template in google_compute_network..."
# shellcheck disable=SC2016
envsubst '$BUCKET_NAME' < "main_template" > "main.tf"
log "Set up vpc, storing state in backend..."
./ run . sh
cd -
gcs/google_compute_network/ destroy.sh #!/bin/bash
../../../ bin / destroy . sh google
main_template # Summary: Uses the 'count' feature to create multiple EC2 instances.
# Documentation: https://www.terraform.io/docs/language/settings/index.html
terraform {
required_version = ">= 1.0.0"
required_providers {
google = {
source = "hashicorp/google"
version = "~> 3.0"
}
}
# Documentation: https://www.terraform.io/docs/language/settings/backends/gcs.html
backend "gcs" {
bucket = $ { BUCKET_NAME }
prefix = "terraform/state"
}
}
# Documentation: https://www.terraform.io/docs/language/values/variables.html
variable "project_id" {
type = string
}
# Documentation: https://www.terraform.io/docs/language/providers/requirements.html
provider "google" {
project = var . project_id
region = "us-central1"
zone = "us-central1-c"
}
# Documentation: https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_network
resource "google_compute_network" "changeme_backends_gcs_vpc" {
name = "changeme-backends-gcs-vpc"
auto_create_subnetworks = "false"
}
run.sh #!/bin/bash
../../../ bin / apply . sh google
gcs/google_storage_bucket/ destroy.sh #!/bin/bash
../../../ bin / destroy . sh google
main.tf # Summary: Creates a persistent disk
# Documentation: https://www.terraform.io/docs/language/settings/index.html
terraform {
required_version = ">= 1.0.0"
required_providers {
google = {
source = "hashicorp/google"
version = "~> 3.0"
}
}
}
# Documentation: https://www.terraform.io/docs/language/values/variables.html
variable "project_id" {
type = string
}
# Documentation: https://www.terraform.io/docs/language/providers/requirements.html
provider "google" {
project = var . project_id
region = "us-central1"
zone = "us-central1-a"
}
# Explanation: This resource is not necessary for the creation of an GCS bucket, but is here to ensure that
# the GCS bucket name is unique.
#
# Documentation: https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/id
resource "random_id" "changeme_gcs_google_storage_bucket_name" {
byte_length = 16
}
# GCS (Google cloud storage service) Bucket
# Documentation: https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/storage_bucket
resource "google_storage_bucket" "changeme_simple_bucket" {
name = "changeme-${random_id.changeme_gcs_google_storage_bucket_name.hex}"
location = "us-central1"
force_destroy = true
storage_class = "REGIONAL"
versioning {
enabled = true
}
}
run.sh #!/bin/bash
../../../ bin / apply . sh google
remote/ This example sets up a remote backend with a minimal example of a state stored in it.
It:
Connects to Terraform Cloud organization “terraform-examples” and creates/updates workspace “backends/remote”
Sets up an AWS VPC, storing state in that backend
These are the files used:
destroy.sh
- Shell script to clean up any previous run of run.sh
run.sh
- Run this whole example up, setting up backend, and AWS VPC
main.tf
- Template file for Terraform code for AWS VPC using remote backend
There are mandatory manual steps to be done on Terraform Cloud:
Register an account Create organization Create workspace in that organization Change “Execution Mode” (Settings->General) to “Local”, this will make sure that Terraform Cloud is only used to store and synchronize state. We will follow CLI-driven Run Workflow . Ensure you are properly authenticated into Terraform Cloud by running terraform login
or by use a credentials block. After running run.sh, and approving the apply you should be able to see your state stored in the workspace under the State
tabs. destroy.sh #!/bin/bash
# Move to the folder this script is in.
cd "${0%/*}" || exit 1
# shellcheck disable=SC1091
source ../../ bin / shared . sh
log "Cleaning up aws_vpc"
terraform destroy - auto - approve 2 >/ dev / null || true
rm - rvf . terraform . terraform . lock . hcl terraform *
main.tf # Summary: template for remote backend
# Documentation: https://www.terraform.io/docs/language/providers/requirements.html
terraform {
required_version = ">= 1.0.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = "3.42.0"
}
}
# Documentation: https://www.terraform.io/docs/language/settings/backends/remote.html
backend "remote" {
organization = "changeme-terraform-examples"
workspaces {
# Documentation: https://www.terraform.io/docs/cloud/workspaces/naming.html
name = "changeme-terraform-examples-workspace"
}
}
}
# Documentation: https://www.terraform.io/docs/language/providers/requirements.html
provider "aws" {
region = "us-east-1"
# Documentation: https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags
default_tags {
tags = {
cs_terraform_examples = "backends/remote"
}
}
}
# Documentation: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc
resource "aws_vpc" "changeme_aws_vpc_remote_backend" {
cidr_block = "10.1.0.0/16"
}
run.sh #!/bin/bash
# Move to the folder this script is in.
cd "${0%/*}" || exit 1
# shellcheck disable=SC1091
source ../../ bin / shared . sh
log "Destroying any pre-existing runs..."
./ destroy . sh
log "Setting up Terraform code in aws_vpc..."
log "Set up vpc, storing state in backend..."
terraform init
terraform plan
terraform apply - auto - approve
s3/ This example sets up a S3 backend with a minimal example of a state stored in it.
It:
Creates an S3 bucket with a random name (‘te-xxxxxxxxxxxxx’)
Sets up an AWS VPC, storing state in that backend
These are the files used:
destroy.sh
- Shell script to clean up any previous run of run.sh
run.sh
- Run this whole example up, creating the Bucket, backend, and AWS VPC
aws_s3_bucket/main.tf
- Terraform code to set up a bucket
aws_s3_bucket/run.sh
- Script to create just the bucket
aws_s3_bucket/destroy.sh
- Script to destroy just the bucket
aws_vpc/main_template
- Template file for Terraform code for AWS VPC
destroy.sh #!/bin/bash
../../../ bin / destroy . sh aws
main.tf # Summary: Creates an S3 bucket in AWS with a unique name.
# Documentation: https://www.terraform.io/docs/language/settings/index.html
terraform {
required_version = ">= 1.0.0"
}
# Documentation: https://www.terraform.io/docs/language/providers/requirements.html
provider "aws" {
region = "us-east-1"
}
# Explanation: This resource is not necessary for the creation of an S3 bucket, but is here to ensure that
# the S3 bucket name is unique.
#
# Documentation: https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/id
resource "random_id" "changeme_backends_s3_bucket_name" {
byte_length = 16
}
# Explanation: This is the resource that creates the bucket.
#
# Documentation: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket
resource "aws_s3_bucket" "changeme_aws_s3_bucket_backend_simple" {
force_destroy = true
bucket = "changeme-${random_id.changeme_backends_s3_bucket_name.hex}"
}
run.sh #!/bin/bash
../../../ bin / apply . sh aws
destroy.sh #!/bin/bash
../../../ bin / destroy . sh aws
main_template # Summary: template for S3 backend
# Documentation: https://www.terraform.io/docs/language/providers/requirements.html
terraform {
# Documentation: https://www.terraform.io/docs/language/settings/backends/s3.html
backend "s3" {
bucket = BUCKET_NAME
region = "us-east-1"
key = "tfstate"
}
}
# Documentation: https://www.terraform.io/docs/language/providers/requirements.html
provider "aws" {
region = "us-east-1"
}
# Documentation: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc
resource "aws_vpc" "changeme_aws_vpc_s3_backend" {
cidr_block = "10.0.0.0/16"
}
destroy.sh #!/bin/bash
# Move to the folder this script is in.
cd "${0%/*}" || exit 1
# shellcheck disable=SC1091
source ../../ bin / shared . sh
log "Cleaning up aws_vpc"
cd aws_vpc || exit 1
rm - f main . tf
./ destroy . sh 2 >/ dev / null || rm - rf . terraform terraform *
log "Cleaning up aws_s3_bucket"
cd ../ aws_s3_bucket || exit 1
terraform destroy - auto - approve
run.sh #!/bin/bash
# Move to the folder this script is in.
cd "${0%/*}" || exit 1
# shellcheck disable=SC1091
source ../../ bin / shared . sh
log "Destroying any pre-existing runs..."
./ destroy . sh
log "Set up bucket..."
cd aws_s3_bucket || exit 1
# shellcheck disable=SC1091
source run . sh
terraform init
terraform plan
terraform apply - auto - approve
BUCKET_NAME = "$(terraform show -no-color | grep -w bucket | awk '{print $NF}')"
log "Bucket name is: ${BUCKET_NAME}"
cd ../ aws_vpc || exit 1
log "Setting up Terraform code from main_template in aws_vpc..."
sed "s/BUCKET_NAME/${BUCKET_NAME}/g" main_template > main . tf
log "Set up vpc, storing state in backend..."
terraform init
terraform plan
terraform apply
s3/aws_s3_bucket/ destroy.sh #!/bin/bash
../../../ bin / destroy . sh aws
main.tf # Summary: Creates an S3 bucket in AWS with a unique name.
# Documentation: https://www.terraform.io/docs/language/settings/index.html
terraform {
required_version = ">= 1.0.0"
}
# Documentation: https://www.terraform.io/docs/language/providers/requirements.html
provider "aws" {
region = "us-east-1"
}
# Explanation: This resource is not necessary for the creation of an S3 bucket, but is here to ensure that
# the S3 bucket name is unique.
#
# Documentation: https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/id
resource "random_id" "changeme_backends_s3_bucket_name" {
byte_length = 16
}
# Explanation: This is the resource that creates the bucket.
#
# Documentation: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket
resource "aws_s3_bucket" "changeme_aws_s3_bucket_backend_simple" {
force_destroy = true
bucket = "changeme-${random_id.changeme_backends_s3_bucket_name.hex}"
}
run.sh #!/bin/bash
../../../ bin / apply . sh aws
s3/aws_vpc/ destroy.sh #!/bin/bash
../../../ bin / destroy . sh aws
main_template # Summary: template for S3 backend
# Documentation: https://www.terraform.io/docs/language/providers/requirements.html
terraform {
# Documentation: https://www.terraform.io/docs/language/settings/backends/s3.html
backend "s3" {
bucket = BUCKET_NAME
region = "us-east-1"
key = "tfstate"
}
}
# Documentation: https://www.terraform.io/docs/language/providers/requirements.html
provider "aws" {
region = "us-east-1"
}
# Documentation: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc
resource "aws_vpc" "changeme_aws_vpc_s3_backend" {
cidr_block = "10.0.0.0/16"
}