Skip to content

Commit 820f22d

Browse files
ciaranschutteCiaran Schutte
andcommitted
Response data & batching requests (#887)
* move response object creation into module * change type structure for gql server * change resolver code based on new gql type * adjust agg type gql resolved response * add http resp helpers * change resolveAggregation code to batch processing * add nodeInfo to resolver * change resolver method to reducer with mutations * add type * fix typedef * format response object * comments and cleanup * remove log --------- Co-authored-by: Ciaran Schutte <ciaranschutte@oicr.on.ca>
1 parent e8517ec commit 820f22d

File tree

11 files changed

+347
-128
lines changed

11 files changed

+347
-128
lines changed

modules/server/src/network/aggregations/index.ts

Lines changed: 130 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -1,28 +1,52 @@
11
import { SupportedAggregation, SUPPORTED_AGGREGATIONS } from '../common';
2-
import { Aggregations, NetworkAggregation, NumericAggregations } from '../types';
2+
import { Aggregations, NetworkAggregation, NumericAggregations, RemoteAggregation } from '../types';
3+
4+
type NetworkResult = {
5+
[key: string]: RemoteAggregation;
6+
};
7+
8+
type ResolveAggregationInput = {
9+
networkResult: NetworkResult;
10+
requestedAggregationFields: string[];
11+
accumulator: any;
12+
};
313

414
/**
5-
* Pick each field from network result aggregations and reduce into single aggregation
15+
* Resolves returned aggregations from network queries into single accumulated aggregation
616
*
7-
* @param networkResults
8-
* @param rootQueryFields
17+
* @param
18+
* @returns number - Total bucket count for node
919
*/
10-
export const resolveAggregations = (networkResults, rootQueryFields) => {
11-
const resolvedNetworkAggregations = rootQueryFields.map((fieldName) => {
12-
const fieldAggregations = networkResults.map((networkResult) => {
13-
const documentName = Object.keys(networkResult)[0];
14-
return networkResult[documentName][fieldName];
15-
});
16-
const aggregationType = fieldAggregations[0].__typename;
17-
const resolvedAggregation = resolveToNetworkAggregation(aggregationType, fieldAggregations);
18-
return { fieldName: fieldName, aggregation: resolvedAggregation };
19-
});
20+
export const resolveAggregations = ({
21+
networkResult,
22+
requestedAggregationFields,
23+
accumulator,
24+
}: ResolveAggregationInput): number => {
25+
const documentName = Object.keys(networkResult)[0];
26+
27+
const nodeBucketCount = requestedAggregationFields.reduce((bucketCountAcc, fieldName) => {
28+
const fieldAggregations = networkResult[documentName][fieldName];
29+
const fieldBucketCount = fieldAggregations.bucket_count;
30+
const aggregationType = fieldAggregations.__typename;
2031

21-
return resolvedNetworkAggregations;
32+
const accumulatedFieldAggregations = accumulator[fieldName];
33+
const resolvedAggregation = resolveToNetworkAggregation(aggregationType, [
34+
fieldAggregations,
35+
accumulatedFieldAggregations,
36+
]);
37+
38+
// mutation - updates accumulator
39+
accumulator[fieldName] = resolvedAggregation;
40+
// returns total bucket count for node
41+
return bucketCountAcc + fieldBucketCount;
42+
}, 0);
43+
44+
return nodeBucketCount;
2245
};
2346

2447
/**
2548
* Resolve aggregation based on aggregation type
49+
*
2650
* @param type
2751
* @param aggregations
2852
*/
@@ -41,48 +65,105 @@ export const resolveToNetworkAggregation = (
4165
};
4266

4367
/**
44-
* Takes an array of the same aggregation type and computes the singular type
45-
* eg. NumericAggregation => NetworkNumericAggregation
68+
* Mutation
69+
* Updates existing or adds additional bucket to computed buckets
4670
*
47-
* Note for operations on Buckets - the size of the array can be large (e.g. total bucket count), complicating lookups, etc.
71+
* @param bucket - Bucket being processed
72+
* @param computedBuckets - Existing buckets
73+
*/
74+
const updateComputedBuckets = (bucket, computedBuckets) => {
75+
/*
76+
* Unable to use lookup key eg. buckets[key]
77+
* "buckets": [
78+
* {
79+
* "doc_count": 140,
80+
* "key": "Dog"
81+
* },
82+
*/
83+
const { key, doc_count } = bucket;
84+
const existingBucketIndex = computedBuckets.findIndex((bucket) => bucket.key === key);
85+
if (existingBucketIndex !== -1) {
86+
const existingBucket = computedBuckets[existingBucketIndex];
87+
if (existingBucket) {
88+
// update existing bucket
89+
computedBuckets[existingBucketIndex] = {
90+
...existingBucket,
91+
doc_count: existingBucket.doc_count + doc_count,
92+
};
93+
}
94+
} else {
95+
computedBuckets.push(bucket);
96+
}
97+
};
98+
99+
/**
100+
* Resolves multiple aggregations into single
48101
*
49102
* @param aggregations
50103
* @returns
104+
*
105+
* @example
106+
* #### Input
107+
* ```javascript
108+
*[
109+
* {
110+
* bucket_count: 2,
111+
* buckets: [
112+
* {
113+
* key: 'Male',
114+
* doc_count: 15,
115+
* },
116+
* {
117+
* key: 'Female',
118+
* doc_count: 700,
119+
* },
120+
* {
121+
* key: 'Unknown',
122+
* doc_count: 5,
123+
* },
124+
* ],
125+
* },
126+
* {
127+
* bucket_count: 2,
128+
* buckets: [
129+
* {
130+
* key: 'Male',
131+
* doc_count: 25,
132+
* },
133+
* {
134+
* key: 'Female',
135+
* doc_count: 100,
136+
* },
137+
* ],
138+
* }];
139+
* ```
140+
*
141+
* #### Output
142+
* ```javascript
143+
* {
144+
* bucket_count: 3,
145+
* buckets: [
146+
* {
147+
* key: 'Male',
148+
* doc_count: 40,
149+
* },
150+
* {
151+
* key: 'Female',
152+
* doc_count: 800,
153+
* },
154+
* {
155+
* key: 'Unknown',
156+
* doc_count: 5,
157+
* }]
158+
* }
159+
* ```
51160
*/
52161
export const resolveAggregation = (aggregations: Aggregations[]): NetworkAggregation => {
53-
const emptyAggregation: NetworkAggregation = { bucket_count: 0, buckets: [] };
54-
55162
const resolvedAggregation = aggregations.reduce((resolvedAggregation, agg) => {
56-
const bucketCountAccumulator = resolvedAggregation.bucket_count + agg.bucket_count;
57-
58-
/*
59-
* Unable to use lookup key eg. buckets[key]
60-
* "buckets": [
61-
* {
62-
* "doc_count": 140,
63-
* "key": "Dog"
64-
* },
65-
*/
66163
const computedBuckets = resolvedAggregation.buckets;
67-
68-
agg.buckets.forEach((bucket) => {
69-
const { key, doc_count } = bucket;
70-
const existingBucketIndex = computedBuckets.findIndex((bucket) => bucket.key === key);
71-
if (existingBucketIndex !== -1) {
72-
const existingBucket = computedBuckets[existingBucketIndex];
73-
if (existingBucket) {
74-
// update existing bucket
75-
computedBuckets[existingBucketIndex] = {
76-
...existingBucket,
77-
doc_count: existingBucket.doc_count + doc_count,
78-
};
79-
}
80-
} else {
81-
computedBuckets.push(bucket);
82-
}
83-
});
84-
return { bucket_count: bucketCountAccumulator, buckets: computedBuckets };
85-
}, emptyAggregation);
164+
agg.buckets.forEach((bucket) => updateComputedBuckets(bucket, computedBuckets));
165+
return { bucket_count: computedBuckets.length, buckets: computedBuckets };
166+
});
86167

87168
return resolvedAggregation;
88169
};
Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
// Success and Failure types
2+
export type Success<T> = { status: 'SUCCESS'; data: T };
3+
export type Failure<FailureStatus extends string, T = void> = {
4+
status: FailureStatus;
5+
message: string;
6+
data: T;
7+
};
8+
9+
/**
10+
* Represents a response that on success will include data of type T,
11+
* otherwise a message will be returned in place of the data explaining the failure with optional fallback data.
12+
* The failure object has data type of void by default.
13+
*/
14+
export type Result<T, FailureStatus extends string, FailureData = void> =
15+
| Success<T>
16+
| Failure<FailureStatus, FailureData>;
17+
18+
/* ******************* *
19+
Convenience Methods
20+
* ******************* */
21+
22+
/**
23+
* Determines if the Result is a Success type by its status
24+
* and returns the type predicate so TS can infer the Result as a Success
25+
* @param result
26+
* @returns {boolean} Whether the Result was a Success or not
27+
*/
28+
export function isSuccess<T, FailureStatus extends string, FailureData>(
29+
result: Result<T, FailureStatus, FailureData>,
30+
): result is Success<T> {
31+
return result.status === 'SUCCESS';
32+
}
33+
34+
/**
35+
* Create a successful response for a Result or Either type, with data of the success type
36+
* @param {T} data
37+
* @returns {Success<T>} `{status: 'SUCCESS', data}`
38+
*/
39+
export const success = <T>(data: T): Success<T> => ({ status: 'SUCCESS', data });
40+
41+
/**
42+
* Create a response indicating a failure with a status naming the reason and message describing the failure.
43+
* @param {string} message
44+
* @returns {Failure} `{status: string, message: string, data: undefined}`
45+
*/
46+
export const failure = <FailureStatus extends string>(
47+
status: FailureStatus,
48+
message: string,
49+
): Failure<FailureStatus, void> => ({
50+
status,
51+
message,
52+
data: undefined,
53+
});

modules/server/src/network/index.ts

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -47,9 +47,6 @@ const fetchRemoteSchema = async (
4747
`Unexpected data in response object. Please verify the endpoint at ${graphqlUrl} is returning a valid GQL Schema.`,
4848
);
4949
} catch (error) {
50-
/**
51-
* TODO: expand on error handling for instance of Axios error for example
52-
*/
5350
console.error(`Failed to retrieve schema from url: ${config.graphqlUrl}`);
5451
return;
5552
}

modules/server/src/network/queries/index.ts

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,6 @@ export const gqlAggregationTypeQuery = `#graphql
4747
}
4848
`;
4949

50-
// TODO: queries with variables eg. top_hits(_source:[String], size:Int): JSON
5150
export const aggregationsQuery = /* GraphQL */ `
5251
#graphql
5352
{
@@ -56,7 +55,6 @@ export const aggregationsQuery = /* GraphQL */ `
5655
buckets {
5756
key
5857
doc_count
59-
key_as_string
6058
}
6159
}
6260
`;

0 commit comments

Comments
 (0)