application-master-$account.json¶
- Purpose
- Example Configuration
- Configuration Details
app
Blockapp_description
approval_skip
approval_timeout
archaius_enabled
custom_tags
eureka_enabled
instance_profile
instance_type
lambda_concurrency_limit
lambda_destinations
lambda_dlq
lambda_environment
lambda_filesystems
lambda_layers
lambda_memory
lambda_provisioned_throughput
lambda_role
lambda_subnet_count
lambda_subnet_purpose
lambda_timeout
lambda_tracing
cloudfunction_timeout
cloudfunction_memory_mb
cloudfunction_environment
cloudfunction_allow_unauthenticated
cloudfunction_iam_bindings
cloudfunction_max_instances
cloudfunction_ingress_type
cloudfunction_vpc
cloudfunction_event_trigger
asg
Blockelb
Blockregions
Keydeploy_strategy
Keysecurity_group
Blockdns
Blocklambda_triggers
datapipeline
Blocks3
Blockstepfunction
Blockqe
Block
- Example Quality Stage JSON Configuration
Purpose¶
This configuration file holds infrastruction information for $account. Each AWS account in your pipeline would need a seperate application-master-$account.json file. If your account is named dev, you would want an application-master-dev.json file.
Example Configuration¶
{
"app": {
"app_description": null,
"approval_skip": false,
"approval_timeout": null,
"archaius_enabled": false,
"canary": false,
"custom_tags": {},
"email": null,
"eureka_enabled": false,
"instance_profile": "{{ profile }}",
"instance_type": "t2.micro",
"lambda_concurrency_limit": null,
"lambda_destinations": {},
"lambda_dlq": {},
"lambda_environment": {},
"lambda_filesystems": [],
"lambda_layers": [],
"lambda_memory": "128",
"lambda_role": null,
"lambda_provisioned_throughput": null,
"lambda_subnet_count": null,
"lambda_subnet_purpose": "internal",
"lambda_timeout": "30",
"lambda_tracing": {},
"cloudfunction_timeout": "60",
"cloudfunction_memory_mb": "128",
"cloudfunction_max_instances": "0",
"cloudfunction_vpc_connector": null
},
"asg": {
"hc_type": "ELB",
"hc_grace_period": 180,
"app_grace_period": 0,
"max_inst": 3,
"min_inst": 1,
"ssh_keypair": null,
"subnet_purpose": "internal",
"enable_public_ips": null,
"provider_healthcheck": {
"amazon": false
},
"scaling_policy": {},
"custom_scaling_policies":[],
"scheduled_actions": []
},
"elb": {
"certificate": null,
"policies": [],
"listener_policies": [],
"backend_policies": [],
"idle_timeout": null,
"access_log": {},
"connection_draining_timeout": null,
"health": {
"interval": 20,
"threshold": 2,
"timeout": 10,
"unhealthy_threshold": 5
},
"i_port": 8080,
"i_proto": "HTTP",
"lb_port": 80,
"lb_proto": "HTTP",
"subnet_purpose": "internal",
"target": "TCP:8080"
},
"qe": {
},
"regions": {
"us-east-1": {}
},
"deploy_strategy": "highlander",
"security_group": {
"description": "Auto-Gen SG for {{ app }}",
"egress": "0.0.0.0/0",
"elb_extras": [],
"ingress": {
},
"instance_extras": []
},
"dns": {
"ttl": 60,
"failover_dns": true,
"region_specific": true
},
"lambda_triggers": [],
"s3": {
"shared_bucket_master": false,
"bucket_name": "",
"path": "/",
"bucket_acl": "private",
"bucket_policy": {},
"content_metadata": [],
"cors": {
"enabled": false,
"cors_rules": [{
"cors_headers": [],
"cors_methods": [],
"cors_origins": [],
"cors_expose_headers": [],
"cors_max_age": 600
}]
},
"encryption": {
"enabled": false,
"encryption_rules": [{
"ApplyServerSideEncryptionByDefault": {
"SSEAlgorithm": "AES256"
}
}]
},
"lifecycle": {
"enabled": false,
"lifecycle_rules": [{}]
},
"logging": {
"enabled": false,
"logging_grants": [],
"logging_bucket": "",
"logging_bucket_prefix": "{{ app }}/"
},
"notification": {
"enabled": false,
"topic_configurations": [{}],
"queue_configurations": [{}],
"lambda_configurations": [{}]
},
"tagging": {
"tags": {}
},
"versioning": {
"enabled": false,
"mfa_delete": "Disabled"
},
"website": {
"enabled": false,
"index_suffix": "index.html",
"error_document": "404.html"
}
},
"datapipeline": {
"description": "",
"activate_on_deploy": false,
"json_definition": {}
},
"stepfunction": {
"tracing": {
"enabled": false
},
"logging_configuration": {},
"statemachine_type": "STANDARD",
"json_definition": {}
}
}
Configuration Details¶
app
Block¶
Top level key that contains information on the application and EC2 details
approval_skip
¶
Enable the ability to skip approval stage for a given environment. Must be enabled in foremast configs per environment to allow overrides.
Type: booleanDefault:false
approval_timeout
¶
Enable the ability to override Spinnaker’s default Stage Timeout (typically 72-hours) with a custom timeout specified in milliseconds.
This is helpful to maintain cleaner pipelines, and fail pipelines not ready for the next environment.
For example, 2 hours
is represented as 7200000
.
Type: intFormat: msDefault:null
archaius_enabled
¶
Setting this value to true
will autocreate archiaus pathing in
a specified archaius S3 bucket.
Type: booleanDefault:false
custom_tags
¶
Custom Tags to be used during deployment stages on resources such as ELBs and EC2s.
custom_tags
Example¶
{
"app": {
"custom_tags": {
"example_key": "example_value",
"app_name": "application_name"
}
}
}
eureka_enabled
¶
Setting this value to true
will not create an ELB, DNS record, and set the
ASG health check to EC2.
Type: booleanDefault:false
instance_profile
¶
The instance profile to start EC2 instances with. Foremast creates default instance profile based on the default string. Specifying a different profile name assumes the profile exists.
Type: stringDefault:"${group}_${app}_profile"
instance_type
¶
The size/type of the EC2 instance. Uses Standard AWS instance names. See https://aws.amazon.com/ec2/instance-types/ for details
Type: stringDefault:"t2.micro"
lambda_concurrency_limit
¶
Each region in your AWS account has a Lambda concurrency limit. The concurrency limit determines how many function invocations can run simultaneously in one region. The limit applies to all functions in the same region and is set to 1000 by default.
If you exceed a concurrency limit, Lambda starts throttling the offending functions by rejecting requests. Depending on the invocation type, you’ll run into the following situations:
More info on limits can be found here: https://docs.aws.amazon.com/lambda/latest/dg/limits.html
lambda_destinations
¶
This feature provides the ability to control what happens when a function is successful or fails e.g. if a specific function fails you may want to invoke another lambda function to perform some error management. In the past you would have to add this bespoke functionality into your code.
Destinations currently support following: * ARN of Lambda Function * ARN of SQS Queue * ARN of SNS Topic * ARN of Amazon EventBridge event bus
You may either an individual destination path OR one for both success and failure.
More details on lambda destinations can be found here: https://aws.amazon.com/blogs/compute/introducing-aws-lambda-destinations/
Type: ObjectDefault:{}
lambda_destinations
Example¶
"lambda_destinations": {
"OnSuccess": { "Destination": "arn"},
"OnFailure": { "Destination": "arn"}
}
lambda_dlq
¶
A dead letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing
Dead Letter Queues are supported in either SNS or SQS and pass in the ARN. See https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html for more details
Type: ObjectDefault:{}
lambda_dlq
Example¶
"lambda_dlq": {
"TargetArn": "arn:aws:sns:us-east-1:accountnumber:topic"
}
lambda_environment
¶
Environment variables which are passed to the lambda function.
lambda_environment
Keys¶
Variables
: Dictionary of environment variables.Type: objectDefault:null
lambda_environment
Example¶
{
"lambda_environment": {
"Variables": {
"VAR1": "val1",
"VAR2": "val2",
"VAR3": "val3"
}
}
}
lambda_filesystems
¶
List of Dictionaries that are passed with the EFS filesystem configuration. Expects the ARN of the filesystem and the Local Mount Path
Type: listDefault:[]
lambda_filesystems
Example¶
{
"Arn": "arn",
"LocalMountPath": "/mnt/efs/"
}
lambda_memory
¶
The amount of memory to give a Lambda function
Type: stringDefault:"128"
Units: Megabytes
lambda_provisioned_throughput
¶
This will allow provisioned throughput of a lambda function. This specifically will ensure the function is warmed for a provisioned amount to eliminate any function cold starts (not to be confused with VPC cold starts)
More info on provisioned throughput can be found here: https://aws.amazon.com/blogs/aws/new-provisioned-concurrency-for-lambda-functions/
Type: intDefault:null
lambda_role
¶
Override the default generated IAM Role name.
Type: stringDefault:"${group}_${app}_role"
lambda_subnet_count
¶
Enables ablity to specify subnet resiliency of lambda functions. By default, uses all subnets of type subnet_purpose
specified.
Each VPC in your AWS account has a Hyperplane ENI limit. The ENI Limit determines how many Hyperplane ENIs you can have in one VPC. The limit applies to Lambda in the same VPC and is set to 250 by default. If you exceed a ENI Limit, Lambda deployment will fail with a Hyperplane ENI Limit error.
At this time, you will need to submit a limit increase or reduce how many SG:Subnet Tuples you have per function. When you connect a function to a VPC, Lambda creates an elastic network interface for each combination of security group and subnet in your function’s VPC configuration.
More info on limits can be found here: https://docs.aws.amazon.com/lambda/latest/dg/limits.html
Type: intDefault:<<MAX SUBNET COUNT>>
lambda_subnet_purpose
¶
Determines if the AWS Lambda should be public (external) or non-public (internal).
Type: stringDefault:"internal"
Options
"internal"
"external"
lambda_timeout
¶
The timeout setting for Lambda function. See official limits https://docs.aws.amazon.com/lambda/latest/dg/limits.html
Type: stringDefault:"900"
Units: Seconds
lambda_tracing
¶
Lambda Tracing feature allows you to enable X-Ray APIs to your lambda function to identify performance bottlenecks and troubleshoot requests that are in error.
If you’ve enabled X-Ray tracing in a service that invokes your function, Lambda sends traces to X-Ray automatically. The upstream service, such as Amazon API Gateway, or an application hosted on Amazon EC2 that is instrumented with the X-Ray SDK, samples incoming requests and adds a tracing header that tells Lambda to send traces or not. For a full list of services that support active instrumentation, see Supported AWS Services in the AWS X-Ray Developer Guide. For more details see: https://docs.aws.amazon.com/lambda/latest/dg/lambda-x-ray.html
Currently AWS API supports Active or PassThrough.
Type: ObjectDefault:{}
lambda_tracing
Example¶
"lambda_tracing": {
"Mode": "Active"
}
cloudfunction_timeout
¶
The function execution timeout. Execution is considered failed and can be terminated if the function is not completed at the end of the timeout period. Defaults to 60 seconds.
A duration in seconds with up to nine fractional digits, terminated by ‘s’. Example: “3.5s”.
Type: StringDefault:None
Example:"60s"
cloudfunction_memory_mb
¶
Memory in Mb specified as an integer (without Mb or mb after the value). GCP currently defaults to 256Mb if no value is given.
Type: IntegerDefault:None
Example:128
cloudfunction_environment
¶
Environment variables that should be present when the Cloud Function is invoked.
Type: DictionaryDefault:None
Example:{ 'MY_ENV_VAR': 'My value!' }
cloudfunction_allow_unauthenticated
¶
Creates an IAM Binding which will allow anonymous/unauthenticated users to invoke the function. This only applies to
HTTP triggers, event triggers cannot be invoked by users directly. This option mimics the gcloud functions deploy ... --allow-unauthenticated
CLI option and
adds an IAM binding for ‘allUsers’ with ‘roles/cloudfunctions.invoker’.
Type: BooleanDefault:False
cloudfunction_iam_bindings
¶
Updates the Cloud Function’s IAM Policy, which can be used to control which users or service accounts can access the function. It allows granular control
on who has permissions to the function, and not which permissions the function itself has. For allowing anonymous access to the function the
cloudfunction_allow_unauthenticated=True
option is simpler.
Type: ArrayDefault:[]
Example:[ { "members": [ "user:jon.snow@GameOfThrones.com", "serviceAccount:my-service-acccount@my-project.iam.gserviceaccount.com", ], "role": "roles/cloudfunctions.invoker" } ]
Note
- The role roles/cloudfunctions.invoker does not allow invoking via
gcloud functions call ...
, instead use a command like curl $URL -H "Authorization: bearer $(gcloud auth print-identity-token)
to test granular invocation permissions
cloudfunction_max_instances
¶
Maximum number of instances of a function that can run in parallel. GCP defaults to no limit if a value is not given.
Type: IntegerDefault:None
Example:5
cloudfunction_ingress_type
¶
Ingress type to use. Foremast does not have a default, however GCP Defaults to ALLOW_ALL
if none is given.
Options are: INGRESS_SETTINGS_UNSPECIFIED
, ALLOW_ALL
, ALLOW_INTERNAL_ONLY
, ALLOW_INTERNAL_AND_GCLB
For information on this option see the GCP Documentation on Ingress Settings.
Type: StringDefault:None
Example:ALLOW_INTERNAL_ONLY
cloudfunction_vpc
¶
"cloudfunction_vpc": {
"connector": {
"us-central1": "projects/your-vpc-project/locations/us-central1/connectors/stage-us-central1",
"us-east1": "projects/your-vpc-project/locations/us-east1/connectors/stage-us-east1"
},
"egress_type": "PRIVATE_RANGES_ONLY"
}
connector
¶
VPC Connector to use, which will allow private VPC network access to the Cloud Function. Should be defined as key/value pairs where the key is the region and the value is the VPC connector.
Type: DictionaryDefault:None
Example:{ "us-central1": "projects/your-host-project/locations/us-central1/connectors/yourconnector-us-central1", "us-east1": "projects/your-host-project/locations/us-east1/connectors/yourconnector-us-east1" }
egress_type
¶
Egress type to use. Foremast does not have a default, however GCP Defaults to PRIVATE_RANGES_ONLY
if none is given.
Options are: VPC_CONNECTOR_EGRESS_SETTINGS_UNSPECIFIED
, PRIVATE_RANGES_ONLY
, ALL_TRAFFIC
.
For information on this option see the GCP Documentation on VPC Egress Settings.
Type: StringDefault:None
Example:PRIVATE_RANGES_ONLY
cloudfunction_event_trigger
¶
Configures a trigger for a GCP Cloud Function. If none is given, GCP will default to an HTTPS trigger. Trigger types are immutable in GCP, so once a trigger type is used (https, pub/sub, GCS, etc) it cannot be changed. It is possible to change the resource used in the trigger, but not the trigger type itself.
Example Pub/Sub trigger:"cloudfunction_event_trigger": { "event_type": "providers/cloud.pubsub/eventTypes/topic.publish", "resource": "/topics/my_topic", "failure_policy": { "retry": true } }Example GCS Bucket trigger:"cloudfunction_event_trigger": { "resource": "buckets/my_bucket_name", "event_type": "google.storage.object.archive", "failure_policy": { "retry": false } }
event_type
¶
Event type to trigger the Cloud Function. Event types and their formats can vary, the easiest way to determine your event type is to run the command gcloud functions event-types list. and refer to the EVENT_TYPE column.
Type: StringDefault:None
Example Pub/Sub:providers/cloud.pubsub/eventTypes/topic.publish
Example Storage:google.pubsub.topic.publish
Example Firestore Storage:providers/cloud.firestore/eventTypes/document.write
resource
¶
The resource to trigger off of. The resource type given must match the event_type
specified. For example, a resource
path to a GCS Bucket with a Pub/Sub event trigger will be rejected. GCP expects the project to be specified and the
full path to the resource, however if omitted Foremast will add this automatically.
Type: StringDefault:None
Example Pub/Sub:topics/my_topic
Example Storage:buckets/my_bucket
asg
Block¶
Top level key containing information regarding application ASGs
hc_type
¶
Note
See
foremast.pipeline.construct_pipeline_block.construct_pipeline_block()
for cases where the Health Check type is overridden to "EC2"
.
ASG Health check type (EC2 or ELB)
Type: stringDefault:"ELB"
Options:
"ELB"
"EC2"
app_grace_period
¶
App specific health check grace period (added onto default ASG healthcheck grace period) to delay sending of health check requests. This is useful in the event your application takes longer to boot than the default hc_grace_period defined in templates.
For example, hc_grace_period may be 180 seconds, but an app may need a variable amount of time to boot (say 30 seconds extra). This will add 180 + 30 to calculate the overall hc_grace_period of 210 seconds.
Type: numberDefault:0
Units: Seconds
min_inst
¶
Minimum number of instances your auto-scaling group should have at all times. This is also the default number of instances
Type: numberDefault:1
ssh_keypair
¶
SSH key that your EC2 instances will use. Must already be created in AWS. This replaces the non-functional and deprecated app_ssh_key configuration key.
Type: stringDefault:"{{ account }}_{{ region }}_default"
- {{ account }} being the AWS account in the configuration name
subnet_purpose
¶
Determines if the instances should be public (external) or non-public (internal).
Type: stringDefault:"internal"
Options
"internal"
"external"
enable_public_ips
¶
Determines if instances in an cluster should have public IPs associated. By default, this is set to null which means it uses default behavior configured for your subnets in your cloud provider.
Type: booleanDefault: nullOptions
true
false
scaling_policy
¶
To better explain this feature, this has has been moved to: scaling_policy - V1 Cluster Scaling
custom_scaling_policies
¶
To better explain this feature, this has has been moved to: custom_scaling_policies - V2 Cluster Scaling
scheduled_actions
¶
To better explain this feature, this has has been moved to: scheduled_actions
elb
Block¶
Top level key for ELB configuration
access_log
¶
Access Log configuration block. Ensure S3 bucket has proper bucket policy to enable writing.
access_log
Keys¶
bucket_name
: Name of S3 bucket to write access log toType: stringDefault: Null
bucket_prefix
: Prefix to write to in the S3 bucketType: stringDefault: Null
emit_interval
: ELB Access Log write delayType: numberRange: 5-60Units: secondsDefault: Null
connection_draining_timeout
¶
Connection Draining Timeout to set on the ELB. This allows existing requests to complete before the load balancer shifts traffic away from a deregistered or unhealthy instance.
Type: numberRange: 1-3600Units: secondsDefault: Null
certificate
¶
Name of SSL certification for ELB. SSL certificate must be uploaded to AWS first.
Type: stringDefault: Null
health
¶
Health check configuration block
health
Keys¶
interval
: ELB health check interval
Type: numberUnits: secondsDefault:20
threshold
: Number of consecutive health check succeses before declaring EC2
instance healthy.
Type: numberDefault:2
timeout
: Health check response timeout
Type: numberUnits: secondsDefault:10
unhealthy_threshold
: number of consecutive health check failures before
declaring EC2 instance unhealthy
Type: numberDefault:5
idle_timeout
¶
Idle Timeout to set on the ELB. This the time, in seconds, that the connection is allowed to be idle (no data has been sent over the connection) before it is closed by the load balancer.
Type: numberRange: 1-3600Units: secondsDefault: 60
ports
¶
Defines ELB listeners. Expects a list of listeners.
ports
Keys¶
instance
: The protocol:port of the instance
Type: stringDefault:"HTTP:8080"
loadbalancer
: the protocol:port of the load balancer
Type: stringDefault:"HTTP:80"
stickiness
: defines stickiness on ELB; if app, specify cookie_name, if elb,
specify cookie_ttl
Type: objectDefault:None
Supported Types:elb
,app
Example app:{ "stickiness": { "type": "app", "cookie_name": "$cookiename" } }Example elb:{ "stickiness": { "type": "elb", "cookie_ttl": 300 } }
certificate
: The name of the certificate to use if required
Type: stringDefault:null
listener_policies
: A list of listener policies to associate to an ELB. Must
be created in AWS first.
Type: arrayDefault:[]
backend_policies
: A list of backend server policies to associate to an ELB.
Must be created in AWS first.
Type: arrayDefault:[]
Example:["WebSocket-Proxy-Protocol"]`
ports
Example¶
{
"ports": [
{
"instance": "HTTP:8080",
"loadbalancer": "HTTP:80",
"stickiness": {
"type": "app",
"cookie_name": "cookie"
}
},
{
"certificate": "my_cert",
"instance": "HTTP:8443",
"loadbalancer": "HTTPS:443",
"listener_policies": [
"MyExamplePolicy"
],
"stickiness": {
"type": "elb",
"cookie_name": 300
}
}
]
}
subnet_purpose
¶
Determines if the load balancer should be public (external) or non-public (internal). When changing this option, the ELB and DNS Records must be manually destroyed before deployment. This is necessary because the ELB Scheme is not modifiable.
Type: stringDefault:"internal"
Options:
"internal"
"external"
regions
Key¶
Dictionary of AWS regions that application will be deployed to.
Type: arrayDefault:{ "us-east-1": {} }
deploy_strategy
Key¶
Spinnaker strategy to use for deployments.
Type: stringDefault: “highlander”Options:
"highlander"
- destroy old server group"redblack"
- disables old server group but do not destroy"canary"
- Only used in S3 deployments. Causes pipeline to first deploy to CANARY path"alpha"
- Only used in S3 deployments. Causes pipeline to first deploy to an ALPHA path"mirror"
- Only used in S3 deployments. Contents are deployed as-is, no version or LATEST directory"branchrelease"
- Only used in S3 deployments. S3 Folders coorelate to Git Branches, using versions and LATEST directory
security_group
Block¶
Hold configuration for creating application specific security group
description
¶
Description of the security group. Used in AWS for creation
Type: stringDefault:"Auto-Gen SG for {{ app }}"
elb_extras
¶
A list of extra security groups to assign to ELB
Type: arrayDefault:[]
Example:["all_access", "test_sg"]`
instance_extras
¶
A list of extra security groups to assign to each instance
Type: arrayDefault:[]
Example:["all_access", "test_sg"]`
security_group
Example¶
You can reference SG by name or by cidr block, you can also specify cross account SG by name by referring to the spinnaker environment name. To see an example of this see below:
{
"security_group": {
"ingress": {
"examplesecuritygroupname": [
{"start_port": 80, "end_port": 80, "protocol": "tcp"},
{"start_port": 443, "end_port": 443, "protocol": "tcp"},
{"start_port": 443, "end_port": 443, "protocol": "tcp", "env": "prod"}
],
"192.168.100.0/24": [
{"start_port": 80, "end_port": 80, "protocol": "tcp"}
]
},
"egress": {
"192.168.100.0/24": [
{"start_port": 80, "end_port": 80, "protocol": "tcp"}
]
}
}
}
lambda_triggers
¶
A list of all events to trigger a Lambda function. See Lambda Triggers and Events for details
Type: arrayDefault:[]
datapipeline
Block¶
Top level key for AWS Data Pipeline settings. Only necessary for Data Pipeline deployments.
name
¶
Name of the Data Pipeline. This defaults to the application name.
Type: stringDefault:$appname
activate_on_deploy
¶
Activates a Data Pipeline after deployment. Useful for OnDemand pipelines
Type: booleanDefault:false
json_definition
¶
The exported JSON definition of the AWS Data Pipeline. You can get this by clicking “Export” in the AWS Console when creating the Data Pipeline.
Type: objectDefault:{}
s3
Block¶
Holds settings related to s3 deployments
path
¶
Path to upload assets to in a specified s3 bucket. Only works for S3 pipelines not using shared/master bucket setup. Refer to s3_bucket_master for more information.
Type: stringDefault:"/"
bucket_acl
¶
General ACL to apply to S3 bucket
Type: stringDefault:"private"
Options:
"public"
"private"
bucket_name
¶
Allows an S3 bucket name to be specified vs generated by pipeline
Type: stringDefault:""
bucket_policy
¶
The S3 bucket policy in json format to apply to created S3 bucket. Must be a valid S3 bucket policy; use the AWS policy generator/simulator to test your policy. (https://awspolicygen.s3.amazonaws.com/policygen.html)
Type: jsonDefault:"{}"
content_metadata
¶
S3 object metadata based on path. The “path” field should have NO leading or trailing slashes.
Type: objectDefault:None
Example config:[ { "path": "assets/compressed", "content-encoding": "br" }, { "path": "assets/gzip", "content-encoding": "gzip" } ]
cors
¶
S3 CORS configuration block
cors
Keys¶
enabled
: Enables/Disables CORS configuration
Type: booleanDefault:false
cors_rules
:
A list of CORS rules including lists of headers, methods, origins, exposed headers, and max age. For more details refer to: http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.put_bucket_cors
Type: objectDefault:None
Example config:[ { "cors_headers": [], "cors_methods": [], "cors_origins": [], "cors_expose_headers": [], "cors_max_age": 600 }, { "cors_headers": [], "cors_methods": [], "cors_origins": [], "cors_expose_headers": [], "cors_max_age": 600 } ]
encryption
¶
S3 Encryption configuration block
encryption
Keys¶
enabled
: Enables/Disables S3 Encryption configuration
Type: booleanDefault:false
encryption_rules
:
A list of S3 encryption rules. As of today only one rule is supported: ApplyServerSideEncryptionByDefault. Built in support for additional rules if this changes. Support for both AES256 or custom KMS (aws:kms) SSEAlgorithm. For aws:kms, specify a custom KMSMasterKeyID; this is not needed for AES256 and should not be specified. For more details refer to: http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.put_bucket_encryption
Type: objectDefault:None
Example config:[ { "ApplyServerSideEncryptionByDefault": { 'SSEAlgorithm': 'AES256'|'aws:kms', 'KMSMasterKeyID': 'string' } } ]
lifecycle
¶
S3 Lifecycle configuration block
lifecycle
Keys¶
enabled
: Enables/Disables S3 Lifecycle configuration
Type: booleanDefault:false
lifecycle_rules
:
A list of S3 lifecycle rules, if a lifecycle exists it replaces it. For more details refer to: http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.put_bucket_lifecycle_configuration
Type: arrayDefault:[{}]
Example config:[ { 'Expiration': { 'Date': datetime(2015, 1, 1), 'Days': 123, 'ExpiredObjectDeleteMarker': True|False }, 'ID': 'string', 'Prefix': 'string', 'Filter': { 'Prefix': 'string', 'Tag': { 'Key': 'string', 'Value': 'string' }, 'And': { 'Prefix': 'string', 'Tags': [ { 'Key': 'string', 'Value': 'string' }, ] } }, 'Status': 'Enabled'|'Disabled', 'Transitions': [ { 'Date': datetime(2015, 1, 1), 'Days': 123, 'StorageClass': 'GLACIER'|'STANDARD_IA'|'ONEZONE_IA' }, ], 'NoncurrentVersionTransitions': [ { 'NoncurrentDays': 123, 'StorageClass': 'GLACIER'|'STANDARD_IA'|'ONEZONE_IA' }, ], 'NoncurrentVersionExpiration': { 'NoncurrentDays': 123 }, 'AbortIncompleteMultipartUpload': { 'DaysAfterInitiation': 123 } } ]
logging
¶
S3 access logging configuration block
logging
Keys¶
enabled
: Enables/Disables S3 logging configuration
Type: booleanDefault:false
logging_grants
:
Specify permissions for who can view and modify the logging parameters. To set the logging status of a bucket, you must be the bucket owner. For more details refer to: http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.put_bucket_logging
Type: objectDefault:None
Example config:[ { 'Grantee': { 'DisplayName': 'string', 'EmailAddress': 'string', 'ID': 'string', 'Type': 'CanonicalUser'|'AmazonCustomerByEmail'|'Group', 'URI': 'string' }, 'Permission': 'FULL_CONTROL'|'READ'|'WRITE' } ]
logging_bucket
: Specifies the bucket where you want Amazon S3 to store server access logs.
Type: stringDefault:""
logging_bucket_prefix
: This element lets you specify a prefix for the keys that the log files will be stored under.
Type: stringDefault:{{ app }}/
notification
¶
S3 Notification configuration block
notification
Keys¶
enabled
: Enables/Disables S3 Notification configuration
Type: booleanDefault:false
topic_configurations
:
A list of S3 SNS topic notification rules, if an SNS notification configuration exists it replaces it. For more details refer to: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.put_bucket_notification_configuration
Type: arrayDefault:[{}]
Example config:
queue_configurations
:
A list of S3 SQS notification rules, if an SQS notification configuration exists it replaces it. For more details refer to: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.put_bucket_notification_configuration
Type: arrayDefault:[{}]
Example config:
lambda_configurations
:
A list of S3 Lambda notification rules, if an Lambda notification configuration exists it replaces it. For more details refer to: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.put_bucket_notification_configuration
Type: arrayDefault:[{}]
Example config:
tagging
¶
S3 tagging configuration block
tagging
Keys¶
tags
: This element lets you specify a prefix for the keys that the log files will be stored under.
Type: objectDefault:{}
Example config:{ "key": "value", "key1": "value1" }
versioning
¶
S3 versioning configuration block
versioning
Keys¶
enabled
: Enables/Disables S3 versioning configuration
Type: booleanDefault:false
mfa_delete
: Specifies whether MFA delete is enabled in the bucket versioning configuration.
Type: stringDefault:Disabled
website
¶
S3 Website configuration block
website
Keys¶
enabled
: Enables/Disables an S3 bucket from being website enabled
Type: booleanDefault:true
index_suffix
: Default index page
Type: stringDefault:"index.html"
error_document
: Default error page
Type: stringDefault:"404.html"
stepfunction
Block¶
Top level key for AWS Step Function settings. Only necessary for Step Function deployments.
statemachine_type
¶
Determines whether a Standard or Express state machine is created. You cannot update the type of a state machine once it has been created.
Type: stringDefault:"STANDARD"
Options:
"STANDARD"
"EXPRESS"
json_definition
¶
The exported JSON definition of the AWS Step Function State Machine. You could craft this using the GUI or by using the AWS Toolkit in various IDEs.
Type: objectDefault:{}
tracing
¶
Determine whether AWS X-Ray tracing is enabled.
Type: objectDefault:{"enabled": false}
Options:
{"enabled": false}
{"enabled": true}
logging_configuration
¶
Description of the Data Pipeline.
Type: objectDefault:{}
Example:{ "level": "ALL"|"ERROR"|"FATAL"|"OFF", "includeExecutionData": true|false, "destinations": [{ "cloudWatchLogsLogGroup": { "logGroupArn": "arn:aws:logs:region:account-id:log-group:log_group_name" } }] }
qe
Block¶
Top level key for Quality Test settings; used by Quality Testing Stages typically post and pre deployment. This specific feature is left up for custom Jinja Templates as most have specific testing flows. All keys in the QE block are passed and can be interpreted by custom Jinja2 Stages. While we are providing a base example below, every user of foremast might need to tweak the logic to add/remove what they need in their respective organization!
To facilitate this concept, this section highlights some common keys and ideas that can be implemented to achieve testing Post Deploy stages (using custom paramaters per environment).
Refer to our full example templates here: https://github.com/foremast/foremast-template-examples
failure_action
¶
Define how Spinnaker should handle a quality stage failure. #. fail_pipeline - default behaviour #. fail_branch - only fail that branch of the pipeline #. fail_branch_continue_pipeline - fail the branch but continue the pipeline; then fail the pipeline #. ignore_failures - ignores issues
Type: stringDefault:"fail_pipeline"
Values:
"fail_pipeline"
"fail_branch"
"fail_branch_continue_pipeline"
"ignore_failures"
ignore_unstable_results
¶
If set to true, ignore and treat unstable results from Jenkins stage as success.
Type: boolean
jenkins_master
¶
Name of Jenkins Master configured in Spinnaker to run your Jobs against
Type: string
stage_timeout
¶
Timeout quality tests step after a specified amount of time in milliseconds
Type: intFormat: ms
wait_for_completion
¶
If set to true, wait until the Jenkins stage is complete to proceed.
Type: boolean
Example Quality Stage JSON Configuration¶
{
"requisiteStageRefIds":[""],
"refId": "master",
"type": "jenkins",
"name": "{{ data.app.environment|upper }}: Quality Tests",
"waitForCompletion": {{ data.qe.wait_for_completion|default(true)|tojson }},
"markUnstableAsSuccessful": {{ data.qe.ignore_unstable_results|default(false)|tojson }},
"master": "{{ data.qe.jenkins_master or 'jenkinsci' }}",
{% if data.qe.stage_timeout %}
"stageTimeoutMs": {{ data.qe.stage_timeout }},
{% endif %}
{% if data.qe %}
"job": "{{ data.qe.test_job }}",
{% if data.qe.stage_failure_action %}
{% if data.qe.stage_failure_action == "fail_pipeline" %}
"completeOtherBranchesThenFail": false,
"continuePipeline": false,
"failPipeline": true,
{% elif data.qe.stage_failure_action == "fail_branch" %}
"completeOtherBranchesThenFail": false,
"continuePipeline": false,
"failPipeline": false,
{% elif data.qe.stage_failure_action == "fail_branch_continue_pipeline" %}
"completeOtherBranchesThenFail": true,
"continuePipeline": false,
"failPipeline": false,
{% elif data.qe.stage_failure_action == "ignore_failures" %}
"completeOtherBranchesThenFail": false,
"continuePipeline": true,
"failPipeline": false,
{% endif %}
{% else %}
"completeOtherBranchesThenFail": false,
"continuePipeline": false,
"failPipeline": true,
{% endif %}
"parameters": {
{% if data.qe.test_params %}
{% for param_name, param_value in data.qe.test_params.items() %}
"{{ param_name }}": "{{ param_value }}"{%- if not loop.last -%},{%- endif -%}
{% endfor %}
{% endif %}
{% if data.qe.test_json %}
{% if data.qe.test_params %},{% endif %}
"test_json": "{{ data.qe }}"
{% endif %}
}
{% else %}
"completeOtherBranchesThenFail": false,
"continuePipeline": false,
"failPipeline": true,
"job": "spinnaker-qe-{{ data.app.environment }}",
"parameters": {
"QE_LEVEL": "{{ data.qe.test_type or 'load' }}",
"SPINNAKER_APP_NAME": "{{ data.app.appname }}"
}
{% endif %}
}
Completion Webhooks are Spinnaker Webhook Stages that are appended to the pipeline stages for this environment.
{
"completion_webhooks": [
{
"url": "https://webhook.com/webhook1",
"custom_headers": {
"my-header": "hello"
},
"method": "POST",
"name": "Webhook 1",
"payload": {
"webhook": "one"
}
},
{
"url": "https://webhook.com/webhook2",
"custom_headers": {
"my-header": "hello again"
},
"method": "POST",
"name": "Webhook 2",
"payload": {
"webhook": "two"
}
}
]
}