Skip to content

Commit

Permalink
Add support for Scalable Storage (#1054)
Browse files Browse the repository at this point in the history
* Add support for Scalable Storage

* update docs, terrafmt fmt

* add sleep in between create of cluster and resize
  • Loading branch information
dweinshenker committed Oct 23, 2023
1 parent 6bcb253 commit 1b5db16
Show file tree
Hide file tree
Showing 5 changed files with 101 additions and 16 deletions.
7 changes: 7 additions & 0 deletions digitalocean/database/datasource_database_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package database

import (
"context"
"strconv"

"github.com/digitalocean/godo"
"github.com/digitalocean/terraform-provider-digitalocean/digitalocean/config"
Expand Down Expand Up @@ -123,6 +124,11 @@ func DataSourceDigitalOceanDatabaseCluster() *schema.Resource {
},

"tags": tag.TagsSchema(),

"storage_size_mib": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
Expand Down Expand Up @@ -174,6 +180,7 @@ func dataSourceDigitalOceanDatabaseClusterRead(ctx context.Context, d *schema.Re
d.Set("region", db.RegionSlug)
d.Set("node_count", db.NumNodes)
d.Set("tags", tag.FlattenTags(db.Tags))
d.Set("storage_size_mib", strconv.FormatUint(db.StorageSizeMib, 10))

if _, ok := d.GetOk("maintenance_window"); ok {
if err := d.Set("maintenance_window", flattenMaintWindowOpts(*db.MaintenanceWindow)); err != nil {
Expand Down
32 changes: 18 additions & 14 deletions digitalocean/database/datasource_database_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@ func TestAccDataSourceDigitalOceanDatabaseCluster_Basic(t *testing.T) {
"data.digitalocean_database_cluster.foobar", "private_network_uuid"),
resource.TestCheckResourceAttrSet(
"data.digitalocean_database_cluster.foobar", "project_id"),
resource.TestCheckResourceAttrSet(
"data.digitalocean_database_cluster.foobar", "storage_size_mib"),
testAccCheckDigitalOceanDatabaseClusterURIPassword(
"digitalocean_database_cluster.foobar", "uri"),
testAccCheckDigitalOceanDatabaseClusterURIPassword(
Expand Down Expand Up @@ -87,25 +89,27 @@ func testAccCheckDataSourceDigitalOceanDatabaseClusterExists(n string, databaseC

const testAccCheckDataSourceDigitalOceanDatabaseClusterConfigBasic = `
resource "digitalocean_database_cluster" "foobar" {
name = "%s"
engine = "pg"
version = "15"
size = "db-s-1vcpu-1gb"
region = "nyc1"
node_count = 1
tags = ["production"]
name = "%s"
engine = "pg"
version = "15"
size = "db-s-1vcpu-1gb"
region = "nyc1"
node_count = 1
tags = ["production"]
storage_size_mib = 10240
}
`

const testAccCheckDataSourceDigitalOceanDatabaseClusterConfigWithDatasource = `
resource "digitalocean_database_cluster" "foobar" {
name = "%s"
engine = "pg"
version = "15"
size = "db-s-1vcpu-1gb"
region = "nyc1"
node_count = 1
tags = ["production"]
name = "%s"
engine = "pg"
version = "15"
size = "db-s-1vcpu-1gb"
region = "nyc1"
node_count = 1
tags = ["production"]
storage_size_mib = 10240
}
data "digitalocean_database_cluster" "foobar" {
Expand Down
23 changes: 21 additions & 2 deletions digitalocean/database/resource_database_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,12 @@ func ResourceDigitalOceanDatabaseCluster() *schema.Resource {
},
},
},

"storage_size_mib": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
},

Timeouts: &schema.ResourceTimeout{
Expand Down Expand Up @@ -283,6 +289,13 @@ func resourceDigitalOceanDatabaseClusterCreate(ctx context.Context, d *schema.Re
opts.BackupRestore = expandBackupRestore(v.([]interface{}))
}

if v, ok := d.GetOk("storage_size_mib"); ok {
v, err := strconv.ParseUint(v.(string), 10, 64)
if err == nil {
opts.StorageSizeMib = v
}
}

log.Printf("[DEBUG] database cluster create configuration: %#v", opts)
database, _, err := client.Databases.Create(context.Background(), opts)
if err != nil {
Expand Down Expand Up @@ -343,12 +356,17 @@ func resourceDigitalOceanDatabaseClusterCreate(ctx context.Context, d *schema.Re
func resourceDigitalOceanDatabaseClusterUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*config.CombinedConfig).GodoClient()

if d.HasChanges("size", "node_count") {
if d.HasChanges("size", "node_count", "storage_size_mib") {
opts := &godo.DatabaseResizeRequest{
SizeSlug: d.Get("size").(string),
NumNodes: d.Get("node_count").(int),
}

if v, ok := d.GetOk("storage_size_mib"); ok {
v, err := strconv.ParseUint(v.(string), 10, 64)
if err == nil {
opts.StorageSizeMib = v
}
}
resp, err := client.Databases.Resize(context.Background(), d.Id(), opts)
if err != nil {
// If the database is somehow already destroyed, mark as
Expand Down Expand Up @@ -467,6 +485,7 @@ func resourceDigitalOceanDatabaseClusterRead(ctx context.Context, d *schema.Reso
d.Set("size", database.SizeSlug)
d.Set("region", database.RegionSlug)
d.Set("node_count", database.NumNodes)
d.Set("storage_size_mib", strconv.FormatUint(database.StorageSizeMib, 10))
d.Set("tags", tag.FlattenTags(database.Tags))

if _, ok := d.GetOk("maintenance_window"); ok {
Expand Down
54 changes: 54 additions & 0 deletions digitalocean/database/resource_database_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,8 @@ func TestAccDigitalOceanDatabaseCluster_Basic(t *testing.T) {
"digitalocean_database_cluster.foobar", "private_network_uuid"),
resource.TestCheckResourceAttrSet(
"digitalocean_database_cluster.foobar", "project_id"),
resource.TestCheckResourceAttrSet(
"digitalocean_database_cluster.foobar", "storage_size_mib"),
testAccCheckDigitalOceanDatabaseClusterURIPassword(
"digitalocean_database_cluster.foobar", "uri"),
testAccCheckDigitalOceanDatabaseClusterURIPassword(
Expand Down Expand Up @@ -105,6 +107,46 @@ func TestAccDigitalOceanDatabaseCluster_WithUpdate(t *testing.T) {
})
}

func TestAccDigitalOceanDatabaseCluster_WithAdditionalStorage(t *testing.T) {
var database godo.Database
databaseName := acceptance.RandomTestName()

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acceptance.TestAccPreCheck(t) },
ProviderFactories: acceptance.TestAccProviderFactories,
CheckDestroy: testAccCheckDigitalOceanDatabaseClusterDestroy,
Steps: []resource.TestStep{
{
Config: fmt.Sprintf(testAccCheckDigitalOceanDatabaseClusterConfigBasic, databaseName),
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanDatabaseClusterExists("digitalocean_database_cluster.foobar", &database),
testAccCheckDigitalOceanDatabaseClusterAttributes(&database, databaseName),
resource.TestCheckResourceAttr(
"digitalocean_database_cluster.foobar", "storage_size_mib", "30720"),
),
},
{
Config: fmt.Sprintf(testAccCheckDigitalOceanDatabaseClusterConfigBasic, databaseName),
Check: resource.TestCheckFunc(
func(s *terraform.State) error {
time.Sleep(30 * time.Second)
return nil
},
),
},
{
Config: fmt.Sprintf(testAccCheckDigitalOceanDatabaseClusterConfigWithAdditionalStorage, databaseName),
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanDatabaseClusterExists("digitalocean_database_cluster.foobar", &database),
testAccCheckDigitalOceanDatabaseClusterAttributes(&database, databaseName),
resource.TestCheckResourceAttr(
"digitalocean_database_cluster.foobar", "storage_size_mib", "61440"),
),
},
},
})
}

func TestAccDigitalOceanDatabaseCluster_WithMigration(t *testing.T) {
var database godo.Database
databaseName := acceptance.RandomTestName()
Expand Down Expand Up @@ -704,6 +746,18 @@ resource "digitalocean_database_cluster" "foobar" {
tags = ["production"]
}`

const testAccCheckDigitalOceanDatabaseClusterConfigWithAdditionalStorage = `
resource "digitalocean_database_cluster" "foobar" {
name = "%s"
engine = "pg"
version = "15"
size = "db-s-1vcpu-2gb"
region = "nyc1"
node_count = 1
tags = ["production"]
storage_size_mib = 61440
}`

const testAccCheckDigitalOceanDatabaseClusterConfigWithMigration = `
resource "digitalocean_database_cluster" "foobar" {
name = "%s"
Expand Down
1 change: 1 addition & 0 deletions docs/resources/database_cluster.md
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ The following arguments are supported:
* `eviction_policy` - (Optional) A string specifying the eviction policy for a Redis cluster. Valid values are: `noeviction`, `allkeys_lru`, `allkeys_random`, `volatile_lru`, `volatile_random`, or `volatile_ttl`.
* `sql_mode` - (Optional) A comma separated string specifying the SQL modes for a MySQL cluster.
* `maintenance_window` - (Optional) Defines when the automatic maintenance should be performed for the database cluster.
* `storage_size_mib` - (Optional) Defines the disk size, in MiB, allocated to the cluster. This can be adjusted on MySQL and PostreSQL clusters based on predefined ranges for each slug/droplet size.

`maintenance_window` supports the following:

Expand Down

0 comments on commit 1b5db16

Please sign in to comment.