Terraform s3 configuration changed significantly for provider hashicorp/aws version 4.
Examples shown use the new configuration.
For creating an user and giving it access to S3 bucket, see:
https://sites.google.com/site/pawneecity/terraform/iam-terraform#h.m7i6tdp3tf70
Eg: docrepo
modules/s3/variables.tf
variable "bucket_cfg" {
description = "bucket configuration"
type = object({
bucket_arn : string
bucket_namespace : string
cors_allowed_origins : optional(list(string), [])
versioning_configuration_status : string #Enabled or Disabled
})
validation {
condition = contains(["global", "account-regional"], var.bucket_cfg.bucket_namespace)
error_message = "The bucket_namespace must be 'global' or 'account-regional'."
}
}
variable "serve_static_content" {
description = "configure to serve static content"
default = false
}
variable "cloudfront_arn" {
default = ""
description = "Cloudfront ARN to secure it"
}
variable "folders" {
description = "Array of folders"
default = []
type = list(string)
}
variable "intelligent_tiering" {
description = "Allow to apply intelligent tiering rules"
default = null
type = object({
days_for_infrequent_access : number
days_for_glacier_access : number
})
}
variable "bucket_policy" {
description = "Bucket policy in JSON format"
nullable = false
type = string
}
variable "lifecycle_configuration_expiration_days" {
default = null # (10 * 365 = 10 years) Lifetime of the objects //TODO no default!
description = "Object DELETION. null prevents object expiration"
nullable = true
type = number
}
modules/s3/outputs.tf
output "bucket_arn" {
value = aws_s3_bucket.bucket.arn
description = "The ARN of the bucket"
}
output "bucket_id" {
value = aws_s3_bucket.bucket.id
description = "The Id of the bucket"
}
output "bucket_website_endpoint" {
value = try(aws_s3_bucket_website_configuration.website_configuration[0].website_endpoint, "")
description = "Website endpoint"
}
output "bucket_domain_name" {
value = aws_s3_bucket.bucket.bucket_regional_domain_name
description = "Bucket domain name"
}
modules/s3/s3.tf
resource "aws_s3_bucket" "bucket" {
bucket = split(":", var.bucket_cfg.bucket_arn)[5]
bucket_namespace = var.bucket_cfg.bucket_namespace #{account-regional|global} Regional implies name must be: "[your-name]-[account-id]-[region]-an"
force_destroy = false
}
resource "aws_s3_bucket_ownership_controls" "bucket_ownership" {
bucket = aws_s3_bucket.bucket.id
rule {
object_ownership = "BucketOwnerEnforced" #Disables ACLs in favor of IAM and bucket policies
}
}
resource "aws_s3_bucket_server_side_encryption_configuration" "encrypt" {
bucket = aws_s3_bucket.bucket.id
rule {
apply_server_side_encryption_by_default {
sse_algorithm = "AES256"
}
}
}
resource "aws_s3_bucket_public_access_block" "access" {
bucket = aws_s3_bucket.bucket.id
block_public_acls = true
block_public_policy = true
ignore_public_acls = true
restrict_public_buckets = true
}
resource "aws_s3_bucket_policy" "policy" {
bucket = aws_s3_bucket.bucket.id
policy = var.bucket_policy
}
resource "aws_s3_bucket_website_configuration" "website_configuration" {
count = var.serve_static_content ? 1 : 0
bucket = aws_s3_bucket.bucket.id
index_document {
suffix = "index.html"
}
error_document {
key = "index.html"
}
}
resource "aws_s3_object" "folders" {
for_each = {
for f in var.folders : f => f
}
bucket = aws_s3_bucket.bucket.id
key = "${each.value}/"
}
resource "aws_s3_bucket_cors_configuration" "cors_config" {
count = length(var.bucket_cfg.cors_allowed_origins) > 0 ? 1 : 0
bucket = aws_s3_bucket.bucket.id
cors_rule {
# HEAD & GET allow AJAX/Fetch. POST & PUT allow uploads. Both from the browser
allowed_methods = ["HEAD", "GET", "POST", "PUT"]
allowed_origins = var.bucket_cfg.cors_allowed_origins
allowed_headers = ["*"]
# Allows the browser to read the ETag (useful for verifying uploads)
expose_headers = ["ETag"]
# Cache the preflight response for 1 hour to improve performance
max_age_seconds = 3600
}
}
resource "aws_s3_bucket_intelligent_tiering_configuration" "global_tiering_rule" {
count = var.intelligent_tiering != null ? 1 : 0
bucket = aws_s3_bucket.bucket.id
name = "TieringRule"
tiering {
access_tier = "ARCHIVE_ACCESS"
days = var.intelligent_tiering.days_for_infrequent_access
}
tiering {
access_tier = "DEEP_ARCHIVE_ACCESS"
days = var.intelligent_tiering.days_for_glacier_access
}
}
# Lifecycle rules
resource "aws_s3_bucket_lifecycle_configuration" "lifecycle" {
count = !var.serve_static_content ? 1 : 0 #Only if not static website, eg: frontend Angular
depends_on = [aws_s3_bucket_versioning.versioning_config]
bucket = aws_s3_bucket.bucket.id
rule {
id = "all_tiering"
status = "Enabled"
filter {} # Applies to all objects in the bucket
transition {
days = 256
storage_class = "INTELLIGENT_TIERING" //Intelligent-Tiering
}
transition {
days = 512
storage_class = "GLACIER_IR" //Glacier Isntant Retrieval
}
dynamic "expiration" {
for_each = var.lifecycle_configuration_expiration_days != null ? [1] : []
content {
days = var.lifecycle_configuration_expiration_days
}
}
#Permanently delete noncurrent versions of objects >> Days after objects become noncurrent
noncurrent_version_expiration {
noncurrent_days = 384
}
noncurrent_version_transition {
noncurrent_days = 192
storage_class = "GLACIER_IR" //Glacier Instant Retrieval
}
#Delete expired object delete markers or incomplete multipart uploads >> Incomplete multipart uploads
abort_incomplete_multipart_upload {
days_after_initiation = 128
}
}
}
#
resource "aws_s3_bucket_versioning" "versioning_config" {
bucket = aws_s3_bucket.bucket.id
versioning_configuration {
status = var.bucket_cfg.versioning_configuration_status #{Enabled|Disabled}
}
}
modules/s3/variables.tf
variable "env" {}
variable "ci" {}
variable "Department" {}
variable "Program" {}
variable "bucket_data1" {}
modules/s3/outputs.tf
# Buckets regional domain name, taken from any of them
output "s3_domain" {
value = aws_s3_bucket.myapp-bucket-data1.bucket_regional_domain_name
}
output "s3_data1_arn" {
value = aws_s3_bucket.myapp-bucket-data1.arn
}
output "s3_data1_bucket" {
value = aws_s3_bucket.myapp-bucket-data1.bucket
}
output "s3_data1_id" {
value = aws_s3_bucket.myapp-bucket-data1.id
}
modules/s3/s3.tf
resource "aws_s3_bucket" "data1" {
bucket = "${var.env}-${var.ci}-${var.bucket_data1}"
tags = {
Name = "${var.bucket_data1}-s3-${var.env}"
ci = var.ci
Departament = var.Department
COUEnv = var.env
Programa = var.Program
}
}
resource "aws_s3_bucket_acl" "data1_acl" {
bucket = aws_s3_bucket.myapp-bucket-data1.id
acl = "private"
}
resource "aws_s3_bucket_versioning" "data1_versioning" {
bucket = aws_s3_bucket.myapp-bucket-data1.id
versioning_configuration {
status = "Enabled"
}
}
resource "aws_s3_bucket_server_side_encryption_configuration" "data1_encrypt" {
bucket = aws_s3_bucket.myapp-bucket-data1.id
rule {
apply_server_side_encryption_by_default {
sse_algorithm = "AES256"
}
}
}
resource "aws_s3_bucket_lifecycle_configuration" "data1_lifecycle" {
depends_on = [aws_s3_bucket_versioning.data1_versioning]
bucket = aws_s3_bucket.data1.id
rule {
id = "all_tiering_expire"
status = "Enabled"
filter {} # Applies to all objects in the bucket
expiration {
days = var.bucket_data1_lifecycle_expiration_days
}
transition {
days = 256
storage_class = "INTELLIGENT_TIERING" //Intelligent-Tiering
}
transition {
days = 512
storage_class = "GLACIER_IR" //Glacier Isntant Retrieval
}
#Permanently delete noncurrent versions of objects >> Days after objects become noncurrent
noncurrent_version_expiration {
noncurrent_days = 384
}
noncurrent_version_transition {
noncurrent_days = 4
storage_class = "GLACIER_IR" //Glacier Instant Retrieval
}
#Delete expired object delete markers or incomplete multipart uploads >> Incomplete multipart uploads
abort_incomplete_multipart_upload {
days_after_initiation = 128
}
}
}
Eg: fwdefense
modules/s3/variables
variable "front_policy_text" {
nullable = false
}
#finalwork_cors_origins. Eg: ["http://localhost:4200", "https://${local.domain}]"]
variable "finalwork_cors_origins" {
description = "CORS AllowedOrigins array"
type = list(string)
nullable = false
}
modules/s3/s3.tf
# Frontend app
resource "aws_s3_bucket" "frontend" {
bucket = "${var.env}-${var.ci}-s3"
force_destroy = true
}
# Block all public access
resource "aws_s3_bucket_public_access_block" "frontend" {
bucket = aws_s3_bucket.frontend.id
block_public_acls = true
block_public_policy = true
ignore_public_acls = true
restrict_public_buckets = true
}
# ACL private [1/2] (frontend)
resource "aws_s3_bucket_ownership_controls" "frontend" {
bucket = aws_s3_bucket.frontend.id
rule {
object_ownership = "BucketOwnerPreferred"
}
}
# ACL private [2/2] (frontend)
resource "aws_s3_bucket_acl" "frontend" {
depends_on = [aws_s3_bucket_ownership_controls.frontend]
bucket = aws_s3_bucket.frontend.id
acl = "private"
}
# Allow access from CloudFront (frontend)
# The policy text (JSON) is generated from the module 'cloudfront' using one of:
# data "aws_iam_policy_document" "origin_access_control" (attribute 'json')
# data "aws_iam_policy_document" "origin access identity" (attribute 'json')
resource "aws_s3_bucket_policy" "front_policy" {
bucket = aws_s3_bucket.frontend.id
policy = var.front_policy_text
}
# CORS. Ensure the browser AJAX fill be able to POST for uploading the file
resource "aws_s3_bucket_cors_configuration" "finalwork_cors" {
bucket = aws_s3_bucket.finalwork_s3.id
cors_rule {
allowed_headers = ["*"] //Eg: ["*"]
allowed_methods = ["GET", "HEAD", "POST"]
allowed_origins = var.finalwork_cors_origins
expose_headers = [] //Eg: ["ETag"]
max_age_seconds = 3000
}
}