Skip to content

Commit 3a7a486

Browse files
authored
[Go] Updated linting scripts to run on all Go V2 folders (#5904)
1 parent 587ee72 commit 3a7a486

File tree

8 files changed

+218
-191
lines changed

8 files changed

+218
-191
lines changed

gov2/bedrock-runtime/actions/invoke_model_test.go

+90-83
Original file line numberDiff line numberDiff line change
@@ -6,14 +6,14 @@
66
package actions
77

88
import (
9-
"testing"
109
"encoding/json"
1110
"log"
11+
"testing"
1212

1313
"github.com/aws/aws-sdk-go-v2/aws"
1414
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime"
15-
"github.com/awsdocs/aws-doc-sdk-examples/gov2/bedrock-runtime/stubs"
16-
"github.com/awsdocs/aws-doc-sdk-examples/gov2/testtools"
15+
"github.com/awsdocs/aws-doc-sdk-examples/gov2/bedrock-runtime/stubs"
16+
"github.com/awsdocs/aws-doc-sdk-examples/gov2/testtools"
1717
)
1818

1919
const CLAUDE_MODEL_ID = "anthropic.claude-v2"
@@ -24,7 +24,7 @@ const TITAN_IMAGE_MODEL_ID = "amazon.titan-image-generator-v1"
2424
const prompt = "A test prompt"
2525

2626
func CallInvokeModelActions(sdkConfig aws.Config) {
27-
defer func() {
27+
defer func() {
2828
if r := recover(); r != nil {
2929
log.Println(r)
3030
}
@@ -34,110 +34,117 @@ func CallInvokeModelActions(sdkConfig aws.Config) {
3434
wrapper := InvokeModelWrapper{client}
3535

3636
claudeCompletion, err := wrapper.InvokeClaude(prompt)
37-
if err != nil {panic(err)}
37+
if err != nil {
38+
panic(err)
39+
}
3840
log.Println(claudeCompletion)
3941

4042
jurassic2Completion, err := wrapper.InvokeJurassic2(prompt)
41-
if err != nil {panic(err)}
43+
if err != nil {
44+
panic(err)
45+
}
4246
log.Println(jurassic2Completion)
4347

4448
llama2Completion, err := wrapper.InvokeLlama2(prompt)
45-
if err != nil {panic(err)}
49+
if err != nil {
50+
panic(err)
51+
}
4652
log.Println(llama2Completion)
4753

48-
seed := int64(0)
54+
seed := int64(0)
4955
titanImageCompletion, err := wrapper.InvokeTitanImage(prompt, seed)
50-
if err != nil {panic(err)}
51-
log.Println(titanImageCompletion)
56+
if err != nil {
57+
panic(err)
58+
}
59+
log.Println(titanImageCompletion)
5260

53-
log.Printf("Thanks for watching!")
61+
log.Printf("Thanks for watching!")
5462
}
5563

5664
func TestInvokeModels(t *testing.T) {
57-
scenTest := InvokeModelActionsTest{}
58-
testtools.RunScenarioTests(&scenTest, t)
65+
scenTest := InvokeModelActionsTest{}
66+
testtools.RunScenarioTests(&scenTest, t)
5967
}
6068

61-
type InvokeModelActionsTest struct {}
62-
69+
type InvokeModelActionsTest struct{}
6370

6471
func (scenTest *InvokeModelActionsTest) SetupDataAndStubs() []testtools.Stub {
65-
var stubList []testtools.Stub
66-
stubList = append(stubList, stubInvokeModel(CLAUDE_MODEL_ID))
72+
var stubList []testtools.Stub
73+
stubList = append(stubList, stubInvokeModel(CLAUDE_MODEL_ID))
6774
stubList = append(stubList, stubInvokeModel(JURASSIC2_MODEL_ID))
6875
stubList = append(stubList, stubInvokeModel(LLAMA2_MODEL_ID))
6976
stubList = append(stubList, stubInvokeModel(TITAN_IMAGE_MODEL_ID))
70-
return stubList
77+
return stubList
7178
}
7279

7380
func (scenTest *InvokeModelActionsTest) RunSubTest(stubber *testtools.AwsmStubber) {
74-
CallInvokeModelActions(*stubber.SdkConfig)
81+
CallInvokeModelActions(*stubber.SdkConfig)
7582
}
7683

7784
func (scenTest *InvokeModelActionsTest) Cleanup() {}
7885

79-
func stubInvokeModel(modelId string) (testtools.Stub) {
80-
var request []byte
81-
var response []byte
82-
83-
switch modelId {
84-
case CLAUDE_MODEL_ID:
85-
request, _ = json.Marshal(ClaudeRequest{
86-
Prompt: "Human: " + prompt + "\n\nAssistant:",
87-
MaxTokensToSample: 200,
88-
Temperature: 0.5,
89-
StopSequences: []string{"\n\nHuman:"},
90-
})
91-
response, _ = json.Marshal(ClaudeResponse{
92-
Completion: "A fake response",
93-
})
94-
95-
case JURASSIC2_MODEL_ID:
96-
request, _ = json.Marshal(Jurassic2Request{
97-
Prompt: prompt,
98-
MaxTokens: 200,
99-
Temperature: 0.5,
100-
})
101-
response, _ = json.Marshal(Jurassic2Response{
102-
Completions: []Completion{
103-
{ Data: Data{ Text: "A fake response", }, },
104-
},
105-
})
106-
107-
case LLAMA2_MODEL_ID:
108-
request, _ = json.Marshal(Llama2Request{
109-
Prompt: prompt,
110-
MaxGenLength: 512,
111-
Temperature: 0.5,
112-
})
113-
response, _ = json.Marshal(Llama2Response{
114-
Generation: "A fake response",
115-
})
116-
117-
case TITAN_IMAGE_MODEL_ID:
118-
request, _ = json.Marshal(TitanImageRequest{
119-
TaskType: "TEXT_IMAGE",
120-
TextToImageParams: TextToImageParams{
121-
Text: prompt,
122-
},
123-
ImageGenerationConfig: ImageGenerationConfig{
124-
NumberOfImages: 1,
125-
Quality: "standard",
126-
CfgScale: 8.0,
127-
Height: 512,
128-
Width: 512,
129-
Seed: 0,
130-
},
131-
})
132-
response, _ = json.Marshal(TitanImageResponse{
133-
Images: []string{"FakeBase64String=="},
134-
})
135-
136-
default:
137-
return testtools.Stub{}
138-
}
139-
140-
return stubs.StubInvokeModel(stubs.StubInvokeModelParams{
141-
request, response, modelId, nil,
142-
})
86+
func stubInvokeModel(modelId string) testtools.Stub {
87+
var request []byte
88+
var response []byte
89+
90+
switch modelId {
91+
case CLAUDE_MODEL_ID:
92+
request, _ = json.Marshal(ClaudeRequest{
93+
Prompt: "Human: " + prompt + "\n\nAssistant:",
94+
MaxTokensToSample: 200,
95+
Temperature: 0.5,
96+
StopSequences: []string{"\n\nHuman:"},
97+
})
98+
response, _ = json.Marshal(ClaudeResponse{
99+
Completion: "A fake response",
100+
})
101+
102+
case JURASSIC2_MODEL_ID:
103+
request, _ = json.Marshal(Jurassic2Request{
104+
Prompt: prompt,
105+
MaxTokens: 200,
106+
Temperature: 0.5,
107+
})
108+
response, _ = json.Marshal(Jurassic2Response{
109+
Completions: []Completion{
110+
{Data: Data{Text: "A fake response"}},
111+
},
112+
})
113+
114+
case LLAMA2_MODEL_ID:
115+
request, _ = json.Marshal(Llama2Request{
116+
Prompt: prompt,
117+
MaxGenLength: 512,
118+
Temperature: 0.5,
119+
})
120+
response, _ = json.Marshal(Llama2Response{
121+
Generation: "A fake response",
122+
})
123+
124+
case TITAN_IMAGE_MODEL_ID:
125+
request, _ = json.Marshal(TitanImageRequest{
126+
TaskType: "TEXT_IMAGE",
127+
TextToImageParams: TextToImageParams{
128+
Text: prompt,
129+
},
130+
ImageGenerationConfig: ImageGenerationConfig{
131+
NumberOfImages: 1,
132+
Quality: "standard",
133+
CfgScale: 8.0,
134+
Height: 512,
135+
Width: 512,
136+
Seed: 0,
137+
},
138+
})
139+
response, _ = json.Marshal(TitanImageResponse{
140+
Images: []string{"FakeBase64String=="},
141+
})
142+
143+
default:
144+
return testtools.Stub{}
145+
}
146+
147+
return stubs.StubInvokeModel(stubs.StubInvokeModelParams{
148+
Request: request, Response: response, ModelId: modelId, RaiseErr: nil,
149+
})
143150
}

gov2/bedrock-runtime/actions/invoke_model_with_response_stream.go

+25-18
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ import (
1414
"github.com/aws/aws-sdk-go-v2/aws"
1515
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime"
1616
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime/types"
17-
)
17+
)
1818

1919
// snippet-start:[gov2.bedrock-runtime.InvokeModelWithResponseStreamWrapper.complete]
2020
// snippet-start:[gov2.bedrock-runtime.InvokeModelWithResponseStreamWrapper.struct]
@@ -34,9 +34,9 @@ type InvokeModelWithResponseStreamWrapper struct {
3434
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html
3535

3636
type Request struct {
37-
Prompt string `json:"prompt"`
38-
MaxTokensToSample int `json:"max_tokens_to_sample"`
39-
Temperature float64 `json:"temperature,omitempty"`
37+
Prompt string `json:"prompt"`
38+
MaxTokensToSample int `json:"max_tokens_to_sample"`
39+
Temperature float64 `json:"temperature,omitempty"`
4040
}
4141

4242
type Response struct {
@@ -48,21 +48,24 @@ type Response struct {
4848

4949
func (wrapper InvokeModelWithResponseStreamWrapper) InvokeModelWithResponseStream(prompt string) (string, error) {
5050

51-
modelId := "anthropic.claude-v2"
51+
modelId := "anthropic.claude-v2"
5252

5353
// Anthropic Claude requires you to enclose the prompt as follows:
5454
prefix := "Human: "
5555
postfix := "\n\nAssistant:"
5656
prompt = prefix + prompt + postfix
57-
58-
request := ClaudeRequest {
57+
58+
request := ClaudeRequest{
5959
Prompt: prompt,
6060
MaxTokensToSample: 200,
6161
Temperature: 0.5,
6262
StopSequences: []string{"\n\nHuman:"},
6363
}
6464

6565
body, err := json.Marshal(request)
66+
if err != nil {
67+
log.Panicln("Couldn't marshal the request: ", err)
68+
}
6669

6770
output, err := wrapper.BedrockRuntimeClient.InvokeModelWithResponseStream(context.Background(), &bedrockruntime.InvokeModelWithResponseStreamInput{
6871
Body: body,
@@ -71,15 +74,15 @@ func (wrapper InvokeModelWithResponseStreamWrapper) InvokeModelWithResponseStrea
7174
})
7275

7376
if err != nil {
74-
errMsg := err.Error()
75-
if strings.Contains(errMsg, "no such host") {
76-
log.Printf("The Bedrock service is not available in the selected region. Please double-check the service availability for your region at https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/.\n")
77-
} else if strings.Contains(errMsg, "Could not resolve the foundation model") {
78-
log.Printf("Could not resolve the foundation model from model identifier: \"%v\". Please verify that the requested model exists and is accessible within the specified region.\n", modelId)
79-
} else {
80-
log.Printf("Couldn't invoke Anthropic Claude. Here's why: %v\n", err)
81-
}
82-
}
77+
errMsg := err.Error()
78+
if strings.Contains(errMsg, "no such host") {
79+
log.Printf("The Bedrock service is not available in the selected region. Please double-check the service availability for your region at https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/.\n")
80+
} else if strings.Contains(errMsg, "Could not resolve the foundation model") {
81+
log.Printf("Could not resolve the foundation model from model identifier: \"%v\". Please verify that the requested model exists and is accessible within the specified region.\n", modelId)
82+
} else {
83+
log.Printf("Couldn't invoke Anthropic Claude. Here's why: %v\n", err)
84+
}
85+
}
8386

8487
resp, err := processStreamingOutput(output, func(ctx context.Context, part []byte) error {
8588
fmt.Print(string(part))
@@ -113,7 +116,11 @@ func processStreamingOutput(output *bedrockruntime.InvokeModelWithResponseStream
113116
return resp, err
114117
}
115118

116-
handler(context.Background(), []byte(resp.Completion))
119+
err = handler(context.Background(), []byte(resp.Completion))
120+
if err != nil {
121+
return resp, err
122+
}
123+
117124
combinedResult += resp.Completion
118125

119126
case *types.UnknownUnionMember:
@@ -131,4 +138,4 @@ func processStreamingOutput(output *bedrockruntime.InvokeModelWithResponseStream
131138

132139
// snippet-end:[gov2.bedrock-runtime.InvokeModelWithResponseStream]
133140

134-
// snippet-end:[gov2.bedrock-runtime.InvokeModelWithResponseStreamWrapper.complete]
141+
// snippet-end:[gov2.bedrock-runtime.InvokeModelWithResponseStreamWrapper.complete]

0 commit comments

Comments
 (0)