Skip to content
This repository has been archived by the owner on Jan 26, 2023. It is now read-only.

Commit

Permalink
Add parameter handling for resource type, fix SQL issue, add paging code
Browse files Browse the repository at this point in the history
  • Loading branch information
ensary committed Sep 18, 2019
1 parent 0a9641b commit 3ab2aec
Show file tree
Hide file tree
Showing 7 changed files with 180 additions and 48 deletions.
12 changes: 6 additions & 6 deletions api.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -100,14 +100,14 @@ paths:
in: "query"
description: "Maximum number of matching cloud assets to return per page. 100 by default"
required: false
default: 100
schema:
type: "integer"
minimum: 1
default: 100
- name: "type"
in: "query"
description: "Limit search to a specific AWS resource type. Currently supported values: AWS::EC2::Instance, AWS::ElasticLoadBalancing::LoadBalancer, AWS::ElasticLoadBalancingV2::LoadBalancer"
required: false
description: "AWS resource type. Currently supported values: AWS::EC2::Instance, AWS::ElasticLoadBalancing::LoadBalancer, AWS::ElasticLoadBalancingV2::LoadBalancer"
required: true
schema:
type: "string"
responses:
Expand Down Expand Up @@ -138,8 +138,8 @@ paths:
request: >
{
"time": "#!index .Request.Query.time 0!#",
"count": #!if .Request.Query.count !# #!index .Request.Query.count 0!# #! else !# 100 #! end !# #!if .Request.Query.type !# ,
"type": "#!index .Request.Query.type 0!#" #! end !#
"count": #!if .Request.Query.count !# #!index .Request.Query.count 0!# #! else !# 100 #! end !# ,
"type": "#!index .Request.Query.type 0!#"
}
success: '{"status": 200, "bodyPassthrough": true}'
error: >
Expand Down Expand Up @@ -190,7 +190,7 @@ paths:
async: false
request: >
{
"pageToken": "#!.Request.URL.PageToken!#",
"pageToken": "#!.Request.URL.PageToken!#"
}
success: '{"status": 200, "bodyPassthrough": true}'
error: >
Expand Down
20 changes: 13 additions & 7 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,11 @@ func main() {
StatFn: domain.StatFromContext,
Fetcher: dbStorage,
}
fetchAllAssetsByTimePage := &v1.CloudFetchAllAssetsByTimePageHandler{
LogFn: domain.LoggerFromContext,
StatFn: domain.StatFromContext,
Fetcher: dbStorage,
}
createPartition := &v1.CreatePartitionHandler{
LogFn: domain.LoggerFromContext,
Generator: dbStorage,
Expand All @@ -56,13 +61,14 @@ func main() {
Deleter: dbStorage,
}
handlers := map[string]serverfull.Function{
"insert": serverfull.NewFunction(insert.Handle),
"fetchByIP": serverfull.NewFunction(fetchByIP.Handle),
"fetchByHostname": serverfull.NewFunction(fetchByHostname.Handle),
"fetchAllAssetsByTime": serverfull.NewFunction(fetchAllAssetsByTime.Handle),
"createPartition": serverfull.NewFunction(createPartition.Handle),
"getPartitions": serverfull.NewFunction(getPartitions.Handle),
"deletePartitions": serverfull.NewFunction(deletePartitions.Handle),
"insert": serverfull.NewFunction(insert.Handle),
"fetchByIP": serverfull.NewFunction(fetchByIP.Handle),
"fetchByHostname": serverfull.NewFunction(fetchByHostname.Handle),
"fetchAllAssetsByTime": serverfull.NewFunction(fetchAllAssetsByTime.Handle),
"fetchMoreAssetsByPageToken": serverfull.NewFunction(fetchAllAssetsByTimePage.Handle),
"createPartition": serverfull.NewFunction(createPartition.Handle),
"getPartitions": serverfull.NewFunction(getPartitions.Handle),
"deletePartitions": serverfull.NewFunction(deletePartitions.Handle),
}

fetcher := &serverfull.StaticFetcher{Functions: handlers}
Expand Down
2 changes: 1 addition & 1 deletion pkg/domain/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ type CloudAssetByHostnameFetcher interface {

// CloudAllAssetsByTimeFetcher fetches details for all cloud assets based on limit and optional offset with a given point in time
type CloudAllAssetsByTimeFetcher interface {
FetchAll(ctx context.Context, when time.Time, count uint, offset uint) ([]CloudAssetDetails, error)
FetchAll(ctx context.Context, when time.Time, count uint, offset uint, assetType string) ([]CloudAssetDetails, error)
}

// Partition represents a database partition with the specified time range
Expand Down
146 changes: 131 additions & 15 deletions pkg/handlers/v1/cloud_fetch.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@ package v1

import (
"context"
"encoding/base32"
"encoding/json"
"errors"
"fmt"
"time"
Expand All @@ -10,11 +12,23 @@ import (
"github.com/asecurityteam/asset-inventory-api/pkg/logs"
)

const (
awsEC2 = "AWS::EC2::Instance"
awsELB = "AWS::ElasticLoadBalancing::LoadBalancer"
awsALB = "AWS::ElasticLoadBalancingV2::LoadBalancer"
)

// CloudAssets represents a list of assets
type CloudAssets struct {
Assets []CloudAssetDetails `json:"assets"`
}

// PagedCloudAssets represents a list of assets with next page token for bulk requests
type PagedCloudAssets struct {
CloudAssets
NextPageToken string `json:"nextPageToken"`
}

// CloudAssetDetails represent an asset and associated attributes
type CloudAssetDetails struct {
PrivateIPAddresses []string `json:"privateIpAddresses"`
Expand Down Expand Up @@ -42,9 +56,42 @@ type CloudAssetFetchByHostnameParameters struct {
// CloudAssetFetchAllByTimestampParameters represents the incoming payload for bulk fetching cloud assets for point in time with optional pagination
type CloudAssetFetchAllByTimestampParameters struct {
Timestamp string `json:"time"`
// we use the pointer type to detect if the value was not present in input as otherwise the int variable would be 0, which is a valid input
Count *uint `json:"count"`
Offset *uint `json:"offset"`
Count uint `json:"count"`
Offset uint `json:"offset"`
Type string `json:"type"`
}

func (p *CloudAssetFetchAllByTimestampParameters) toNextPageToken() (string, error) {
nextPageParameters := CloudAssetFetchAllByTimestampParameters{
Timestamp: p.Timestamp,
Count: p.Count,
Offset: p.Offset + p.Count,
Type: p.Type,
}
js, err := json.Marshal(nextPageParameters)
if err != nil {
return "", err
}
token := base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(js)
return token, nil
}

func fetchAllByTimeStampParametersForToken(token string) (*CloudAssetFetchAllByTimestampParameters, error) {
js, err := base32.StdEncoding.WithPadding(base32.NoPadding).DecodeString(token)
if err != nil {
return nil, err
}
ret := CloudAssetFetchAllByTimestampParameters{}
err = json.Unmarshal(js, &ret)
if err != nil {
return nil, err
}
return &ret, nil
}

// CloudAssetFetchAllByTimeStampPageParameters represents the request for subsequent pages of bulk fetching cloud assets
type CloudAssetFetchAllByTimeStampPageParameters struct {
PageToken string `json:"pageToken"`
}

// CloudFetchByIPHandler defines a lambda handler for fetching cloud assets with a given IP address
Expand Down Expand Up @@ -117,44 +164,113 @@ func (h *CloudFetchByHostnameHandler) Handle(ctx context.Context, input CloudAss
return extractOutput(assets), nil
}

func validateAssetType(input string) (string, error) {
switch input {
case awsEC2, awsELB, awsALB:
return input, nil
default:
return "", fmt.Errorf("unknown asset type %s", input)
}
}

// CloudFetchAllAssetsByTimeHandler defines a lambda handler for bulk fetching cloud assets known at specific point in time
type CloudFetchAllAssetsByTimeHandler struct {
LogFn domain.LogFn
StatFn domain.StatFn
Fetcher domain.CloudAllAssetsByTimeFetcher
}

// Handle handles fetching cloud assets by hostname
func (h *CloudFetchAllAssetsByTimeHandler) Handle(ctx context.Context, input CloudAssetFetchAllByTimestampParameters) (CloudAssets, error) {
// Handle handles fetching cloud assets with pagination
func (h *CloudFetchAllAssetsByTimeHandler) Handle(ctx context.Context, input CloudAssetFetchAllByTimestampParameters) (PagedCloudAssets, error) {
logger := h.LogFn(ctx)

ts, e := time.Parse(time.RFC3339Nano, input.Timestamp)
if e != nil {
logger.Info(logs.InvalidInput{Reason: e.Error()})
return CloudAssets{}, InvalidInput{Field: "time", Cause: e}
return PagedCloudAssets{}, InvalidInput{Field: "time", Cause: e}
}

if input.Count == nil {
if input.Count == 0 {
e = errors.New("missing or malformed required parameter count")
logger.Info(logs.InvalidInput{Reason: e.Error()})
return CloudAssets{}, InvalidInput{Field: "count", Cause: e}
return PagedCloudAssets{}, InvalidInput{Field: "count", Cause: e}
}

var offset uint
if input.Offset != nil {
offset = *input.Offset
assetType, e := validateAssetType(input.Type)
if e != nil {
logger.Info(logs.InvalidInput{Reason: e.Error()})
return PagedCloudAssets{}, InvalidInput{Field: "type", Cause: e}
}

assets, e := h.Fetcher.FetchAll(ctx, ts, *input.Count, offset)
var offset uint = 0 // do not offset as this is the first page
assets, e := h.Fetcher.FetchAll(ctx, ts, input.Count, offset, assetType)
if e != nil {
logger.Error(logs.StorageError{Reason: e.Error()})
return CloudAssets{}, e
return PagedCloudAssets{}, e
}
if len(assets) == 0 {
return CloudAssets{}, NotFound{ID: "any"}
return PagedCloudAssets{}, NotFound{ID: "any"}
}

return extractOutput(assets), nil
nextPageToken, e := input.toNextPageToken()
if e != nil {
logger.Error(logs.StorageError{Reason: e.Error()})
}
return PagedCloudAssets{extractOutput(assets), nextPageToken}, nil
}

// CloudFetchAllAssetsByTimePageHandler defines a lambda handler for bulk fetching subsequent pages of cloud assets known at specific point in time
type CloudFetchAllAssetsByTimePageHandler struct {
LogFn domain.LogFn
StatFn domain.StatFn
Fetcher domain.CloudAllAssetsByTimeFetcher
}

// Handle handles subsequent page fetching of cloud assets
func (h *CloudFetchAllAssetsByTimePageHandler) Handle(ctx context.Context, input CloudAssetFetchAllByTimeStampPageParameters) (PagedCloudAssets, error) {
logger := h.LogFn(ctx)
params, e := fetchAllByTimeStampParametersForToken(input.PageToken)
if e != nil {
logger.Info(logs.InvalidInput{Reason: e.Error()})
return PagedCloudAssets{}, InvalidInput{Field: "PageToken", Cause: e}
}
ts, e := time.Parse(time.RFC3339Nano, params.Timestamp)
if e != nil {
logger.Info(logs.InvalidInput{Reason: e.Error()})
return PagedCloudAssets{}, InvalidInput{Field: "PageToken", Cause: e}
}

if params.Count == 0 {
e = errors.New("missing or malformed required parameter count")
logger.Info(logs.InvalidInput{Reason: e.Error()})
return PagedCloudAssets{}, InvalidInput{Field: "count", Cause: e}
}

if params.Offset == 0 {
e = errors.New("missing or malformed required parameter offset")
logger.Info(logs.InvalidInput{Reason: e.Error()})
return PagedCloudAssets{}, InvalidInput{Field: "offset", Cause: e}
}

assetType, e := validateAssetType(params.Type)
if e != nil {
logger.Info(logs.InvalidInput{Reason: e.Error()})
return PagedCloudAssets{}, InvalidInput{Field: "type", Cause: e}
}

logger.Info(fmt.Sprintf("count: %d offset: %d, type: %s ,ts %v", params.Count, params.Offset, assetType, ts))
assets, e := h.Fetcher.FetchAll(ctx, ts, params.Count, params.Offset, assetType)
if e != nil {
logger.Error(logs.StorageError{Reason: e.Error()})
return PagedCloudAssets{}, e
}
logger.Info(fmt.Sprintf("count: %d offset: %d, type: %s ,ts %v, r %d", params.Count, params.Offset, assetType, ts, len(assets)))
if len(assets) == 0 {
return PagedCloudAssets{}, NotFound{ID: "any"}
}

nextPageToken, e := params.toNextPageToken()
return PagedCloudAssets{extractOutput(assets), nextPageToken}, nil
}

func extractOutput(assets []domain.CloudAssetDetails) CloudAssets {
Expand Down
22 changes: 15 additions & 7 deletions pkg/handlers/v1/cloud_fetch_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,11 +53,12 @@ func validFetchByHostnameInput() CloudAssetFetchByHostnameParameters {

func validFetchAllByTimestampInput() CloudAssetFetchAllByTimestampParameters {
var count uint = 100
var offset uint = 1
var offset uint = 0
return CloudAssetFetchAllByTimestampParameters{
Timestamp: time.Now().Format(time.RFC3339Nano),
Count: &count,
Offset: &offset,
Count: count,
Offset: offset,
Type: awsEC2,
}
}

Expand All @@ -68,9 +69,16 @@ func TestCloudFetchAllAssetsByTimeInvalidDate(t *testing.T) {
require.NotNil(t, err)
}

func TestCloudFetchAllAssetsByTimeNoCount(t *testing.T) {
func TestCloudFetchAllAssetsByTimeInvalidCount(t *testing.T) {
input := validFetchAllByTimestampInput()
input.Count = nil
input.Count = 0
_, err := newCloudFetchAllAssetsByTimeHandler(nil).Handle(context.Background(), input)
require.NotNil(t, err)
}

func TestCloudFetchAllAssetsByTimeInvalidType(t *testing.T) {
input := validFetchAllByTimestampInput()
input.Type = "Very wrong type"
_, err := newCloudFetchAllAssetsByTimeHandler(nil).Handle(context.Background(), input)
require.NotNil(t, err)
}
Expand All @@ -82,7 +90,7 @@ func TestCloudFetchAllAssetsByTimeStorageError(t *testing.T) {
fetcher := NewMockCloudAllAssetsByTimeFetcher(ctrl)
input := validFetchAllByTimestampInput()
ts, _ := time.Parse(time.RFC3339Nano, input.Timestamp)
fetcher.EXPECT().FetchAll(gomock.Any(), ts, *input.Count, *input.Offset).Return([]domain.CloudAssetDetails{}, errors.New(""))
fetcher.EXPECT().FetchAll(gomock.Any(), ts, input.Count, input.Offset, input.Type).Return([]domain.CloudAssetDetails{}, errors.New(""))

_, e := newCloudFetchAllAssetsByTimeHandler(fetcher).Handle(context.Background(), input)
require.NotNil(t, e)
Expand All @@ -94,7 +102,7 @@ func TestCloudFetchAllAssetsByTimeNoResults(t *testing.T) {
fetcher := NewMockCloudAllAssetsByTimeFetcher(ctrl)
input := validFetchAllByTimestampInput()
ts, _ := time.Parse(time.RFC3339Nano, input.Timestamp)
fetcher.EXPECT().FetchAll(gomock.Any(), ts, *input.Count, *input.Offset).Return([]domain.CloudAssetDetails{}, nil)
fetcher.EXPECT().FetchAll(gomock.Any(), ts, input.Count, input.Offset, input.Type).Return([]domain.CloudAssetDetails{}, nil)

_, e := newCloudFetchAllAssetsByTimeHandler(fetcher).Handle(context.Background(), input)
require.NotNil(t, e)
Expand Down
8 changes: 4 additions & 4 deletions pkg/handlers/v1/mock_storage_test.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit 3ab2aec

Please sign in to comment.