forked from GoogleCloudPlatform/magic-modules
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathresource_compute_attached_disk.go.erb
259 lines (221 loc) · 8.21 KB
/
resource_compute_attached_disk.go.erb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
<% autogen_exception -%>
package google
import (
"fmt"
"log"
"strings"
"time"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
<% if version == "ga" -%>
"google.golang.org/api/compute/v1"
<% else -%>
compute "google.golang.org/api/compute/v0.beta"
<% end -%>
)
func resourceComputeAttachedDisk() *schema.Resource {
return &schema.Resource{
Create: resourceAttachedDiskCreate,
Read: resourceAttachedDiskRead,
Delete: resourceAttachedDiskDelete,
Importer: &schema.ResourceImporter{
State: resourceAttachedDiskImport,
},
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(300 * time.Second),
Delete: schema.DefaultTimeout(300 * time.Second),
},
Schema: map[string]*schema.Schema{
"disk": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: `name or self_link of the disk that will be attached.`,
DiffSuppressFunc: compareSelfLinkOrResourceName,
},
"instance": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: `name or self_link of the compute instance that the disk will be attached to. If the self_link is provided then zone and project are extracted from the self link. If only the name is used then zone and project must be defined as properties on the resource or provider.`,
DiffSuppressFunc: compareSelfLinkOrResourceName,
},
"project": {
Type: schema.TypeString,
ForceNew: true,
Computed: true,
Optional: true,
Description: `The project that the referenced compute instance is a part of. If instance is referenced by its self_link the project defined in the link will take precedence.`,
},
"zone": {
Type: schema.TypeString,
ForceNew: true,
Computed: true,
Optional: true,
Description: `The zone that the referenced compute instance is located within. If instance is referenced by its self_link the zone defined in the link will take precedence.`,
},
"device_name": {
Type: schema.TypeString,
ForceNew: true,
Optional: true,
Computed: true,
Description: `Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disks-x, where x is a number assigned by Google Compute Engine.`,
},
"mode": {
Type: schema.TypeString,
ForceNew: true,
Optional: true,
Default: "READ_WRITE",
Description: `The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode.`,
ValidateFunc: validation.StringInSlice([]string{"READ_ONLY", "READ_WRITE"}, false),
},
},
UseJSONNumber: true,
}
}
func resourceAttachedDiskCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
userAgent, err := generateUserAgentString(d, config.userAgent)
if err != nil {
return err
}
zv, err := parseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false)
if err != nil {
return err
}
disk := d.Get("disk").(string)
diskName := GetResourceNameFromSelfLink(disk)
diskSrc := fmt.Sprintf("projects/%s/zones/%s/disks/%s", zv.Project, zv.Zone, diskName)
// Check if the disk is a regional disk
if strings.Contains(disk, "regions") {
rv, err := ParseRegionDiskFieldValue(disk, d, config)
if err != nil {
return err
}
diskSrc = rv.RelativeLink()
}
attachedDisk := compute.AttachedDisk{
Source: diskSrc,
Mode: d.Get("mode").(string),
DeviceName: d.Get("device_name").(string),
}
op, err := config.NewComputeClient(userAgent).Instances.AttachDisk(zv.Project, zv.Zone, zv.Name, &attachedDisk).Do()
if err != nil {
return err
}
d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s/%s", zv.Project, zv.Zone, zv.Name, diskName))
waitErr := computeOperationWaitTime(config, op, zv.Project,
"disk to attach", userAgent, d.Timeout(schema.TimeoutCreate))
if waitErr != nil {
d.SetId("")
return waitErr
}
return resourceAttachedDiskRead(d, meta)
}
func resourceAttachedDiskRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
userAgent, err := generateUserAgentString(d, config.userAgent)
if err != nil {
return err
}
zv, err := parseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false)
if err != nil {
return err
}
if err := d.Set("project", zv.Project); err != nil {
return fmt.Errorf("Error setting project: %s", err)
}
if err := d.Set("zone", zv.Zone); err != nil {
return fmt.Errorf("Error setting zone: %s", err)
}
diskName := GetResourceNameFromSelfLink(d.Get("disk").(string))
instance, err := config.NewComputeClient(userAgent).Instances.Get(zv.Project, zv.Zone, zv.Name).Do()
if err != nil {
return handleNotFoundError(err, d, fmt.Sprintf("AttachedDisk %q", d.Id()))
}
// Iterate through the instance's attached disks as this is the only way to
// confirm the disk is actually attached
ad := findDiskByName(instance.Disks, diskName)
if ad == nil {
log.Printf("[WARN] Referenced disk wasn't found attached to this compute instance. Removing from state.")
d.SetId("")
return nil
}
if err := d.Set("device_name", ad.DeviceName); err != nil {
return fmt.Errorf("Error setting device_name: %s", err)
}
if err := d.Set("mode", ad.Mode); err != nil {
return fmt.Errorf("Error setting mode: %s", err)
}
// Force the referenced resources to a self-link in state because it's more specific then name.
instancePath, err := getRelativePath(instance.SelfLink)
if err != nil {
return err
}
if err := d.Set("instance", instancePath); err != nil {
return fmt.Errorf("Error setting instance: %s", err)
}
diskPath, err := getRelativePath(ad.Source)
if err != nil {
return err
}
if err := d.Set("disk", diskPath); err != nil {
return fmt.Errorf("Error setting disk: %s", err)
}
return nil
}
func resourceAttachedDiskDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
userAgent, err := generateUserAgentString(d, config.userAgent)
if err != nil {
return err
}
zv, err := parseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false)
if err != nil {
return err
}
diskName := GetResourceNameFromSelfLink(d.Get("disk").(string))
instance, err := config.NewComputeClient(userAgent).Instances.Get(zv.Project, zv.Zone, zv.Name).Do()
if err != nil {
return err
}
// Confirm the disk is still attached before making the call to detach it. If the disk isn't listed as an attached
// disk on the compute instance then return as though the delete call succeed since this is the desired state.
ad := findDiskByName(instance.Disks, diskName)
if ad == nil {
return nil
}
op, err := config.NewComputeClient(userAgent).Instances.DetachDisk(zv.Project, zv.Zone, zv.Name, ad.DeviceName).Do()
if err != nil {
return err
}
waitErr := computeOperationWaitTime(config, op, zv.Project,
fmt.Sprintf("Detaching disk from %s", zv.Name), userAgent, d.Timeout(schema.TimeoutDelete))
if waitErr != nil {
return waitErr
}
return nil
}
func resourceAttachedDiskImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
config := meta.(*Config)
err := parseImportId(
[]string{"projects/(?P<project>[^/]+)/zones/(?P<zone>[^/]+)/instances/(?P<instance>[^/]+)/(?P<disk>[^/]+)",
"(?P<project>[^/]+)/(?P<zone>[^/]+)/(?P<instance>[^/]+)/(?P<disk>[^/]+)"}, d, config)
if err != nil {
return nil, err
}
id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instances/{{instance}}/{{disk}}")
if err != nil {
return nil, err
}
d.SetId(id)
return []*schema.ResourceData{d}, nil
}
func findDiskByName(disks []*compute.AttachedDisk, id string) *compute.AttachedDisk {
for _, disk := range disks {
if compareSelfLinkOrResourceName("", disk.Source, id, nil) {
return disk
}
}
return nil
}