Skip to content
This repository has been archived by the owner on Aug 2, 2022. It is now read-only.

Commit

Permalink
Revert changes ahead of develop branch in master (#551)
Browse files Browse the repository at this point in the history
* Revert "Rename release notes to use 4 digit versions (#547)"

This reverts commit 33c6d3e.

* Revert "Opendistro Release 1.9.0 (#532)"

This reverts commit 254f2e0.

* Revert "Bug fix, support long type for aggregation (#522)"

This reverts commit fb2ed91.
  • Loading branch information
joshuali925 authored Jul 9, 2020
1 parent 33c6d3e commit 09132da
Show file tree
Hide file tree
Showing 49 changed files with 144 additions and 297 deletions.
12 changes: 3 additions & 9 deletions build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

buildscript {
ext {
es_version = "7.8.0"
es_version = "7.7.0"
}
// This isn't applying from repositories.gradle so repeating it here
repositories {
Expand All @@ -40,7 +40,7 @@ repositories {
}

ext {
opendistroVersion = '1.9.0'
opendistroVersion = '1.8.0'
isSnapshot = "true" == System.getProperty("build.snapshot", "true")
}

Expand Down Expand Up @@ -97,10 +97,6 @@ licenseHeaders {
excludes = ['com/amazon/opendistroforelasticsearch/sql/antlr/parser/**']
}

tasks.withType(licenseHeaders.class) {
additionalLicense 'AL ', 'Apache', 'Licensed under the Apache License, Version 2.0 (the "License")'
}

// TODO: need to fix java doc to enable JavaDoc
javadoc.enabled = false
esplugin {
Expand Down Expand Up @@ -276,16 +272,14 @@ dependencies {
testCompile group: "org.elasticsearch.client", name: 'transport', version: "${es_version}"

// JDBC drivers for comparison test. Somehow Apache Derby throws security permission exception.
testCompile group: 'com.amazon.opendistroforelasticsearch.client', name: 'opendistro-sql-jdbc', version: '1.8.0.0'
testCompile group: 'com.amazon.opendistroforelasticsearch.client', name: 'opendistro-sql-jdbc', version: '1.3.0.0'
testCompile group: 'com.h2database', name: 'h2', version: '1.4.200'
testCompile group: 'org.xerial', name: 'sqlite-jdbc', version: '3.28.0'
//testCompile group: 'org.apache.derby', name: 'derby', version: '10.15.1.3'
}

apply plugin: 'nebula.ospackage'

validateNebulaPom.enabled = false

// This is afterEvaluate because the bundlePlugin ZIP task is updated afterEvaluate and changes the ZIP name to match the plugin name
afterEvaluate {
ospackage {
Expand Down
2 changes: 1 addition & 1 deletion gradle.properties
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,4 @@
# permissions and limitations under the License.
#

version=1.9.0
version=1.8.0
2 changes: 1 addition & 1 deletion gradle/wrapper/gradle-wrapper.properties
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# permissions and limitations under the License.
#

distributionUrl=https\://services.gradle.org/distributions/gradle-6.5-all.zip
distributionUrl=https\://services.gradle.org/distributions/gradle-6.4-all.zip
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStorePath=wrapper/dists
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -109,11 +109,11 @@ public void setClusterService(ClusterService clusterService) {
this.clusterService = clusterService;

clusterService.addListener(event -> {
if (event.metadataChanged()) {
if (event.metaDataChanged()) {
// State in cluster service is already changed to event.state() before listener fired
if (LOG.isDebugEnabled()) {
LOG.debug("Metadata in cluster state changed: {}",
new IndexMappings(clusterService.state().metadata()));
new IndexMappings(clusterService.state().metaData()));
}
cache.invalidateAll();
}
Expand Down Expand Up @@ -169,8 +169,8 @@ public IndexMappings getFieldMappings(String[] indices, String[] types) {
}

/**
* Get field mappings by index expressions, type and field filter. Because IndexMetadata/MappingMetadata
* is hard to convert to FieldMappingMetadata, custom mapping domain objects are being used here. In future,
* Get field mappings by index expressions, type and field filter. Because IndexMetaData/MappingMetaData
* is hard to convert to FieldMappingMetaData, custom mapping domain objects are being used here. In future,
* it should be moved to domain model layer for all ES specific knowledge.
* <p>
* Note that cluster state may be change inside ES so it's possible to read different state in 2 accesses
Expand Down Expand Up @@ -222,7 +222,7 @@ private IndexMappings findMappings(ClusterState state, String[] indices, String[
Function<String, Predicate<String>> fieldFilter) throws IOException {
LOG.debug("Cache didn't help. Load and parse mapping in cluster state");
return new IndexMappings(
state.metadata().findMappings(indices, types, fieldFilter)
state.metaData().findMappings(indices, types, fieldFilter)
);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import java.util.Map;

import static java.util.Collections.emptyMap;
import static org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetadata;
import static org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetaData;

/**
* Field mapping that parses native ES mapping.
Expand All @@ -39,7 +39,7 @@ public class FieldMapping {
/**
* Native mapping information returned from ES
*/
private final Map<String, FieldMappingMetadata> typeMappings;
private final Map<String, FieldMappingMetaData> typeMappings;

/**
* Maps a field name to Field object that specified in query explicitly
Expand All @@ -51,7 +51,7 @@ public FieldMapping(String fieldName) {
}

public FieldMapping(String fieldName,
Map<String, FieldMappingMetadata> typeMappings,
Map<String, FieldMappingMetaData> typeMappings,
Map<String, Field> specifiedFieldByNames) {

this.fieldName = fieldName;
Expand Down Expand Up @@ -119,16 +119,16 @@ public String path() {
}

/**
* Used to retrieve the type of fields from metadata map structures for both regular and nested fields
* Used to retrieve the type of fields from metaData map structures for both regular and nested fields
*/
@SuppressWarnings("unchecked")
public String type() {
FieldMappingMetadata metadata = typeMappings.get(fieldName);
Map<String, Object> source = metadata.sourceAsMap();
FieldMappingMetaData metaData = typeMappings.get(fieldName);
Map<String, Object> source = metaData.sourceAsMap();
String[] fieldPath = fieldName.split("\\.");

/*
* When field is not nested the metadata source is fieldName -> type
* When field is not nested the metaData source is fieldName -> type
* When it is nested or contains "." in general (ex. fieldName.nestedName) the source is nestedName -> type
*/
String root = (fieldPath.length == 1) ? fieldName : fieldPath[1];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

package com.amazon.opendistroforelasticsearch.sql.esdomain.mapping;

import org.elasticsearch.cluster.metadata.MappingMetadata;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.json.JSONObject;

import java.util.HashMap;
Expand Down Expand Up @@ -61,7 +61,7 @@ public class FieldMappings implements Mappings<Map<String, Object>> {
*/
private final Map<String, Object> fieldMappings;

public FieldMappings(MappingMetadata mappings) {
public FieldMappings(MappingMetaData mappings) {
fieldMappings = mappings.sourceAsMap();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@

package com.amazon.opendistroforelasticsearch.sql.esdomain.mapping;

import org.elasticsearch.cluster.metadata.MappingMetadata;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.collect.ImmutableOpenMap;

import java.util.Map;
Expand Down Expand Up @@ -55,12 +55,12 @@ public IndexMappings() {
this.indexMappings = emptyMap();
}

public IndexMappings(Metadata metadata) {
this.indexMappings = buildMappings(metadata.indices(),
indexMetadata -> new TypeMappings(indexMetadata.getMappings()));
public IndexMappings(MetaData metaData) {
this.indexMappings = buildMappings(metaData.indices(),
indexMetaData -> new TypeMappings(indexMetaData.getMappings()));
}

public IndexMappings(ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetadata>> mappings) {
public IndexMappings(ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings) {
this.indexMappings = buildMappings(mappings, TypeMappings::new);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

package com.amazon.opendistroforelasticsearch.sql.esdomain.mapping;

import org.elasticsearch.cluster.metadata.MappingMetadata;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.common.collect.ImmutableOpenMap;

import java.util.Map;
Expand All @@ -36,7 +36,7 @@ public class TypeMappings implements Mappings<FieldMappings> {
*/
private final Map<String, FieldMappings> typeMappings;

public TypeMappings(ImmutableOpenMap<String, MappingMetadata> mappings) {
public TypeMappings(ImmutableOpenMap<String, MappingMetaData> mappings) {
typeMappings = buildMappings(mappings, FieldMappings::new);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ private void async(Client client, Map<String, String> params, QueryAction queryA

// Preserve context of calling thread to ensure headers of requests are forwarded when running blocking actions
threadPool.schedule(
LogUtils.withCurrentContext(runnable),
threadPool.preserveContext(LogUtils.withCurrentContext(runnable)),
new TimeValue(0L),
SQL_WORKER_THREAD_POOL_NAME
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
import org.elasticsearch.cluster.metadata.AliasMetadata;
import org.elasticsearch.cluster.metadata.MappingMetadata;
import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
Expand Down Expand Up @@ -77,22 +77,22 @@ public RestResponse buildResponse(GetIndexResponse getIndexResponse, XContentBui
return new BytesRestResponse(RestStatus.OK, builder);
}

private void writeAliases(List<AliasMetadata> aliases, XContentBuilder builder, ToXContent.Params params)
private void writeAliases(List<AliasMetaData> aliases, XContentBuilder builder, ToXContent.Params params)
throws IOException {
builder.startObject(Fields.ALIASES);
if (aliases != null) {
for (AliasMetadata alias : aliases) {
AliasMetadata.Builder.toXContent(alias, builder, params);
for (AliasMetaData alias : aliases) {
AliasMetaData.Builder.toXContent(alias, builder, params);
}
}
builder.endObject();
}

private void writeMappings(ImmutableOpenMap<String, MappingMetadata> mappings,
private void writeMappings(ImmutableOpenMap<String, MappingMetaData> mappings,
XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject(Fields.MAPPINGS);
if (mappings != null) {
for (ObjectObjectCursor<String, MappingMetadata> typeEntry : mappings) {
for (ObjectObjectCursor<String, MappingMetaData> typeEntry : mappings) {
builder.field(typeEntry.key);
builder.map(typeEntry.value.sourceAsMap());
}
Expand All @@ -114,4 +114,4 @@ static class Fields {
static final String SETTINGS = "settings";
static final String WARMERS = "warmers";
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ private void async(Client client, Map<String, String> params, RestChannel channe

// Preserve context of calling thread to ensure headers of requests are forwarded when running blocking actions
threadPool.schedule(
LogUtils.withCurrentContext(runnable),
threadPool.preserveContext(LogUtils.withCurrentContext(runnable)),
new TimeValue(0L),
SQL_WORKER_THREAD_POOL_NAME
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,22 +20,19 @@
import com.amazon.opendistroforelasticsearch.sql.query.planner.core.ColumnNode;
import com.google.common.annotations.VisibleForTesting;

import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;

import static com.amazon.opendistroforelasticsearch.sql.executor.format.DateFieldFormatter.FORMAT_JDBC;

/**
* The definition of BindingTuple ResultSet.
*/
public class BindingTupleResultSet extends ResultSet {

public BindingTupleResultSet(List<ColumnNode> columnNodes, List<BindingTuple> bindingTuples) {
this.schema = buildSchema(columnNodes);
this.dataRows = buildDataRows(columnNodes, bindingTuples);
this.dataRows = buildDataRows(bindingTuples);
}

@VisibleForTesting
Expand All @@ -50,17 +47,12 @@ public static Schema buildSchema(List<ColumnNode> columnNodes) {
}

@VisibleForTesting
public static DataRows buildDataRows(List<ColumnNode> columnNodes, List<BindingTuple> bindingTuples) {
public static DataRows buildDataRows(List<BindingTuple> bindingTuples) {
List<DataRows.Row> rowList = bindingTuples.stream().map(tuple -> {
Map<String, ExprValue> bindingMap = tuple.getBindingMap();
Map<String, Object> rowMap = new HashMap<>();
for (ColumnNode column : columnNodes) {
String columnName = column.columnName();
Object value = bindingMap.get(columnName).value();
if (column.getType() == Schema.Type.DATE) {
value = DateFormat.getFormattedDate(new Date((Long) value), FORMAT_JDBC);
}
rowMap.put(columnName, value);
for (String s : bindingMap.keySet()) {
rowMap.put(s, bindingMap.get(s).value());
}
return new DataRows.Row(rowMap);
}).collect(Collectors.toList());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
*/
public class DateFieldFormatter {
private static final Logger LOG = LogManager.getLogger(DateFieldFormatter.class);
public static final String FORMAT_JDBC = "yyyy-MM-dd HH:mm:ss.SSS";
private static final String FORMAT_JDBC = "yyyy-MM-dd HH:mm:ss.SSS";
private static final String FORMAT_DELIMITER = "\\|\\|";

private static final String FORMAT_DOT_DATE_AND_TIME = "yyyy-MM-dd'T'HH:mm:ss.SSSZ";
Expand Down
Loading

0 comments on commit 09132da

Please sign in to comment.