diff --git a/.github/workflows/test-v3.yml b/.github/workflows/test-v3.yml index 4af10d7eb68..05e6c080c21 100644 --- a/.github/workflows/test-v3.yml +++ b/.github/workflows/test-v3.yml @@ -52,6 +52,7 @@ jobs: - name: receiver-zipkin-kafka - name: receiver-zipkin-rabbitmq - name: receiver-zipkin-scribe + - name: storage-cassandra steps: - name: Checkout Repository uses: actions/checkout@v2 diff --git a/zipkin-server/pom.xml b/zipkin-server/pom.xml index 2415d960f4c..4986f705667 100644 --- a/zipkin-server/pom.xml +++ b/zipkin-server/pom.xml @@ -62,6 +62,7 @@ receiver-zipkin-activemq receiver-zipkin-rabbitmq receiver-zipkin-scribe + storage-cassandra diff --git a/zipkin-server/server-core/src/main/java/zipkin/server/core/CoreModuleConfig.java b/zipkin-server/server-core/src/main/java/zipkin/server/core/CoreModuleConfig.java index 79bca876bd1..e93e1a1cfd8 100644 --- a/zipkin-server/server-core/src/main/java/zipkin/server/core/CoreModuleConfig.java +++ b/zipkin-server/server-core/src/main/java/zipkin/server/core/CoreModuleConfig.java @@ -1,5 +1,5 @@ /* - * Copyright 2015-2019 The OpenZipkin Authors + * Copyright 2015-2023 The OpenZipkin Authors * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at @@ -64,6 +64,16 @@ public class CoreModuleConfig extends ModuleConfig { */ private int traceSampleRate = 10000; + /** + * The number of threads used to prepare metrics data to the storage. + */ + private int prepareThreads = 2; + + /** + * The period of doing data persistence. Unit is second. + */ + private int persistentPeriod = 25; + private static final String DEFAULT_SEARCHABLE_TAG_KEYS = String.join( Const.COMMA, "http.method" @@ -72,6 +82,8 @@ public class CoreModuleConfig extends ModuleConfig { public org.apache.skywalking.oap.server.core.CoreModuleConfig toSkyWalkingConfig() { final org.apache.skywalking.oap.server.core.CoreModuleConfig result = new org.apache.skywalking.oap.server.core.CoreModuleConfig(); result.setServiceCacheRefreshInterval(serviceCacheRefreshInterval); + result.setPrepareThreads(prepareThreads); + result.setPersistentPeriod(persistentPeriod); return result; } @@ -154,4 +166,20 @@ public int getTraceSampleRate() { public void setTraceSampleRate(int traceSampleRate) { this.traceSampleRate = traceSampleRate; } + + public int getPrepareThreads() { + return prepareThreads; + } + + public void setPrepareThreads(int prepareThreads) { + this.prepareThreads = prepareThreads; + } + + public int getPersistentPeriod() { + return persistentPeriod; + } + + public void setPersistentPeriod(int persistentPeriod) { + this.persistentPeriod = persistentPeriod; + } } diff --git a/zipkin-server/server-core/src/main/java/zipkin/server/core/CoreModuleProvider.java b/zipkin-server/server-core/src/main/java/zipkin/server/core/CoreModuleProvider.java index 75eb7781fdb..45542b73019 100644 --- a/zipkin-server/server-core/src/main/java/zipkin/server/core/CoreModuleProvider.java +++ b/zipkin-server/server-core/src/main/java/zipkin/server/core/CoreModuleProvider.java @@ -1,5 +1,5 @@ /* - * Copyright 2015-2019 The OpenZipkin Authors + * Copyright 2015-2023 The OpenZipkin Authors * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at @@ -16,6 +16,7 @@ import org.apache.skywalking.oap.server.core.CoreModule; import org.apache.skywalking.oap.server.core.analysis.meter.MeterSystem; import org.apache.skywalking.oap.server.core.analysis.worker.MetricsStreamProcessor; +import org.apache.skywalking.oap.server.core.annotation.AnnotationScan; import org.apache.skywalking.oap.server.core.cache.NetworkAddressAliasCache; import org.apache.skywalking.oap.server.core.cache.ProfileTaskCache; import org.apache.skywalking.oap.server.core.command.CommandService; @@ -50,13 +51,17 @@ import org.apache.skywalking.oap.server.core.remote.client.RemoteClientManager; import org.apache.skywalking.oap.server.core.server.GRPCHandlerRegister; import org.apache.skywalking.oap.server.core.server.HTTPHandlerRegister; +import org.apache.skywalking.oap.server.core.source.DefaultScopeDefine; import org.apache.skywalking.oap.server.core.source.SourceReceiver; import org.apache.skywalking.oap.server.core.source.SourceReceiverImpl; import org.apache.skywalking.oap.server.core.status.ServerStatusService; +import org.apache.skywalking.oap.server.core.storage.PersistenceTimer; +import org.apache.skywalking.oap.server.core.storage.StorageException; import org.apache.skywalking.oap.server.core.storage.model.IModelManager; import org.apache.skywalking.oap.server.core.storage.model.ModelCreator; import org.apache.skywalking.oap.server.core.storage.model.ModelManipulator; import org.apache.skywalking.oap.server.core.storage.model.StorageModels; +import org.apache.skywalking.oap.server.core.storage.ttl.DataTTLKeeperTimer; import org.apache.skywalking.oap.server.core.worker.IWorkerInstanceGetter; import org.apache.skywalking.oap.server.core.worker.IWorkerInstanceSetter; import org.apache.skywalking.oap.server.core.worker.WorkerInstancesService; @@ -69,19 +74,23 @@ import zipkin.server.core.services.EmptyGRPCHandlerRegister; import zipkin.server.core.services.EmptyHTTPHandlerRegister; import zipkin.server.core.services.EmptyNetworkAddressAliasCache; +import zipkin.server.core.services.SelfSenderService; import zipkin.server.core.services.ZipkinConfigService; +import java.io.IOException; import java.util.Collections; public class CoreModuleProvider extends ModuleProvider { private CoreModuleConfig moduleConfig; private EndpointNameGrouping endpointNameGrouping; - private final SourceReceiverImpl receiver; + private final ZipkinSourceReceiverImpl receiver; + private final AnnotationScan annotationScan; private final StorageModels storageModels; public CoreModuleProvider() { - this.receiver = new SourceReceiverImpl(); + this.annotationScan = new AnnotationScan(); + this.receiver = new ZipkinSourceReceiverImpl(); this.storageModels = new StorageModels(); } @@ -121,6 +130,16 @@ public void prepare() throws ServiceNotProvidedException, ModuleStartException { ); this.registerServiceImplementation(NamingControl.class, namingControl); + annotationScan.registerListener(new ZipkinStreamAnnotationListener(getManager())); + + AnnotationScan scopeScan = new AnnotationScan(); + scopeScan.registerListener(new DefaultScopeDefine.Listener()); + try { + scopeScan.scan(); + } catch (Exception e) { + throw new ModuleStartException(e.getMessage(), e); + } + final org.apache.skywalking.oap.server.core.CoreModuleConfig swConfig = this.moduleConfig.toSkyWalkingConfig(); this.registerServiceImplementation(MeterSystem.class, new MeterSystem(getManager())); this.registerServiceImplementation(ConfigService.class, new ZipkinConfigService(moduleConfig, this)); @@ -133,8 +152,8 @@ public void prepare() throws ServiceNotProvidedException, ModuleStartException { final WorkerInstancesService instancesService = new WorkerInstancesService(); this.registerServiceImplementation(IWorkerInstanceGetter.class, instancesService); this.registerServiceImplementation(IWorkerInstanceSetter.class, instancesService); - this.registerServiceImplementation(RemoteSenderService.class, new RemoteSenderService(getManager())); - this.registerServiceImplementation(RemoteSenderService.class, new RemoteSenderService(getManager())); + // no cluster mode for zipkin, for sending the streaming data to the local + this.registerServiceImplementation(RemoteSenderService.class, new SelfSenderService(getManager())); this.registerServiceImplementation(ModelCreator.class, storageModels); this.registerServiceImplementation(IModelManager.class, storageModels); this.registerServiceImplementation(ModelManipulator.class, storageModels); @@ -182,12 +201,19 @@ public void prepare() throws ServiceNotProvidedException, ModuleStartException { @Override public void start() throws ServiceNotProvidedException, ModuleStartException { - + try { + receiver.scan(); + annotationScan.scan(); + } catch (IOException | IllegalAccessException | InstantiationException | StorageException e) { + throw new ModuleStartException(e.getMessage(), e); + } } @Override public void notifyAfterCompleted() throws ServiceNotProvidedException, ModuleStartException { - + final org.apache.skywalking.oap.server.core.CoreModuleConfig swConfig = this.moduleConfig.toSkyWalkingConfig(); + PersistenceTimer.INSTANCE.start(getManager(), swConfig); + DataTTLKeeperTimer.INSTANCE.start(getManager(), swConfig); } @Override diff --git a/zipkin-server/server-core/src/main/java/zipkin/server/core/ZipkinDispatcherManager.java b/zipkin-server/server-core/src/main/java/zipkin/server/core/ZipkinDispatcherManager.java new file mode 100644 index 00000000000..c4c33d79d2e --- /dev/null +++ b/zipkin-server/server-core/src/main/java/zipkin/server/core/ZipkinDispatcherManager.java @@ -0,0 +1,28 @@ +/* + * Copyright 2015-2023 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package zipkin.server.core; + +import org.apache.skywalking.oap.server.core.analysis.DispatcherManager; +import org.apache.skywalking.oap.server.core.analysis.manual.searchtag.TagAutocompleteDispatcher; + +public class ZipkinDispatcherManager extends DispatcherManager { + + @Override + public void addIfAsSourceDispatcher(Class aClass) throws IllegalAccessException, InstantiationException { + if (aClass.getSimpleName().startsWith("Zipkin") || aClass.equals(TagAutocompleteDispatcher.class)) { + super.addIfAsSourceDispatcher(aClass); + } + } +} diff --git a/zipkin-server/server-core/src/main/java/zipkin/server/core/ZipkinSourceReceiverImpl.java b/zipkin-server/server-core/src/main/java/zipkin/server/core/ZipkinSourceReceiverImpl.java new file mode 100644 index 00000000000..ce29494b666 --- /dev/null +++ b/zipkin-server/server-core/src/main/java/zipkin/server/core/ZipkinSourceReceiverImpl.java @@ -0,0 +1,43 @@ +/* + * Copyright 2015-2023 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package zipkin.server.core; + +import org.apache.skywalking.oap.server.core.analysis.DispatcherDetectorListener; +import org.apache.skywalking.oap.server.core.source.ISource; +import org.apache.skywalking.oap.server.core.source.SourceReceiver; + +import java.io.IOException; + +public class ZipkinSourceReceiverImpl implements SourceReceiver { + private final ZipkinDispatcherManager mgr; + + public ZipkinSourceReceiverImpl() { + mgr = new ZipkinDispatcherManager(); + } + + @Override + public void receive(ISource source) { + mgr.forward(source); + } + + @Override + public DispatcherDetectorListener getDispatcherDetectorListener() { + return mgr; + } + + public void scan() throws IOException, IllegalAccessException, InstantiationException { + mgr.scan(); + } +} diff --git a/zipkin-server/server-core/src/main/java/zipkin/server/core/ZipkinStreamAnnotationListener.java b/zipkin-server/server-core/src/main/java/zipkin/server/core/ZipkinStreamAnnotationListener.java new file mode 100644 index 00000000000..a1ba4398eb6 --- /dev/null +++ b/zipkin-server/server-core/src/main/java/zipkin/server/core/ZipkinStreamAnnotationListener.java @@ -0,0 +1,35 @@ +/* + * Copyright 2015-2023 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package zipkin.server.core; + +import org.apache.skywalking.oap.server.core.analysis.StreamAnnotationListener; +import org.apache.skywalking.oap.server.core.analysis.manual.searchtag.TagAutocompleteData; +import org.apache.skywalking.oap.server.core.storage.StorageException; +import org.apache.skywalking.oap.server.library.module.ModuleDefineHolder; + +public class ZipkinStreamAnnotationListener extends StreamAnnotationListener { + + public ZipkinStreamAnnotationListener(ModuleDefineHolder moduleDefineHolder) { + super(moduleDefineHolder); + } + + @Override + public void notify(Class aClass) throws StorageException { + // only including all zipkin streaming + if (aClass.getSimpleName().startsWith("Zipkin") || aClass.equals(TagAutocompleteData.class)) { + super.notify(aClass); + } + } +} diff --git a/zipkin-server/server-core/src/main/java/zipkin/server/core/services/SelfSenderService.java b/zipkin-server/server-core/src/main/java/zipkin/server/core/services/SelfSenderService.java new file mode 100644 index 00000000000..b94a720da87 --- /dev/null +++ b/zipkin-server/server-core/src/main/java/zipkin/server/core/services/SelfSenderService.java @@ -0,0 +1,44 @@ +/* + * Copyright 2015-2023 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package zipkin.server.core.services; + +import org.apache.skywalking.oap.server.core.remote.RemoteSenderService; +import org.apache.skywalking.oap.server.core.remote.client.Address; +import org.apache.skywalking.oap.server.core.remote.client.SelfRemoteClient; +import org.apache.skywalking.oap.server.core.remote.data.StreamData; +import org.apache.skywalking.oap.server.core.remote.selector.Selector; +import org.apache.skywalking.oap.server.library.module.ModuleManager; + +public class SelfSenderService extends RemoteSenderService { + private final ModuleManager moduleManager; + private SelfRemoteClient self; + + public SelfSenderService(ModuleManager moduleManager) { + super(moduleManager); + this.moduleManager = moduleManager; + } + + private SelfRemoteClient getSelf() { + if (self == null) { + self = new SelfRemoteClient(moduleManager, new Address("127.0.0.1", 0, true)); + } + return self; + } + + @Override + public void send(String nextWorkName, StreamData streamData, Selector selector) { + getSelf().push(nextWorkName, streamData); + } +} diff --git a/zipkin-server/server-starter/pom.xml b/zipkin-server/server-starter/pom.xml index 7f4344dfe8b..5422c60061d 100644 --- a/zipkin-server/server-starter/pom.xml +++ b/zipkin-server/server-starter/pom.xml @@ -55,6 +55,11 @@ storage-elasticsearch-plugin ${skywalking.version} + + io.zipkin + storage-cassandra + ${project.version} + diff --git a/zipkin-server/server-starter/src/main/resources/application.yml b/zipkin-server/server-starter/src/main/resources/application.yml index 60e03dce20a..91c8c99e50c 100644 --- a/zipkin-server/server-starter/src/main/resources/application.yml +++ b/zipkin-server/server-starter/src/main/resources/application.yml @@ -34,6 +34,10 @@ core: searchableTracesTags: ${ZIPKIN_SEARCHABLE_TAG_KEYS:http.method} # The trace sample rate precision is 1/10000, should be between 0 and 10000 traceSampleRate: ${ZIPKIN_SAMPLE_RATE:10000} + # The number of threads used to prepare metrics data to the storage. + prepareThreads: ${ZIPKIN_PREPARE_THREADS:2} + # The period of doing data persistence. Unit is second.Default value is 25s + persistentPeriod: ${ZIPKIN_PERSISTENT_PERIOD:25} storage: selector: ${ZIPKIN_STORAGE:h2} @@ -125,6 +129,21 @@ storage: superDatasetBlockIntervalHours: ${ZIPKIN_STORAGE_BANYANDB_SUPER_DATASET_BLOCK_INTERVAL_HOURS:4} # Unit is hour superDatasetSegmentIntervalDays: ${ZIPKIN_STORAGE_BANYANDB_SUPER_DATASET_SEGMENT_INTERVAL_DAYS:1} # Unit is day specificGroupSettings: ${ZIPKIN_STORAGE_BANYANDB_SPECIFIC_GROUP_SETTINGS:""} # For example, {"group1": {"blockIntervalHours": 4, "segmentIntervalDays": 1}} + cassandra: + keyspace: ${ZIPKIN_STORAGE_CASSANDRA_KEYSPACE:zipkin} + # Comma separated list of host addresses part of Cassandra cluster. Ports default to 9042 but you can also specify a custom port with 'host:port'. + contactPoints: ${ZIPKIN_STORAGE_CASSANDRA_CONTACT_POINTS:localhost} + # Name of the datacenter that will be considered "local" for load balancing. + localDc: ${ZIPKIN_STORAGE_CASSANDRA_LOCAL_DC:datacenter1} + # Will throw an exception on startup if authentication fails. + username: ${ZIPKIN_STORAGE_CASSANDRA_USERNAME:} + password: ${ZIPKIN_STORAGE_CASSANDRA_PASSWORD:} + # Max pooled connections per datacenter-local host. + maxConnections: ${ZIPKIN_STORAGE_CASSANDRA_MAX_CONNECTIONS:8} + # Using ssl for connection, rely on Keystore + use-ssl: ${ZIPKIN_STORAGE_CASSANDRA_USE_SSL:false} + maxSizeOfBatchCql: ${ZIPKIN_STORAGE_CASSANDRA_MAX_SIZE_OF_BATCH_CQL:2000} + asyncBatchPersistentPoolSize: ${ZIPKIN_STORAGE_CASSANDRA_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4} receiver-zipkin-http: selector: ${ZIPKIN_RECEIVER_ZIPKIN_HTTP:default} diff --git a/zipkin-server/storage-cassandra/pom.xml b/zipkin-server/storage-cassandra/pom.xml new file mode 100644 index 00000000000..3c96072f802 --- /dev/null +++ b/zipkin-server/storage-cassandra/pom.xml @@ -0,0 +1,73 @@ + + + 4.0.0 + + + zipkin-server-parent + io.zipkin + 2.24.4-SNAPSHOT + + + storage-cassandra + Storage: Cassandra + + + + io.zipkin + zipkin-server-core + ${project.version} + + + org.apache.skywalking + storage-jdbc-hikaricp-plugin + ${skywalking.version} + + + + io.zipkin + zipkin-server-core + ${project.version} + + + + com.google.auto.value + auto-value-annotations + ${auto-value.version} + + + com.google.auto.value + auto-value + ${auto-value.version} + provided + + + + com.datastax.oss + java-driver-core + ${java-driver.version} + + + + com.esri.geometry + * + + + org.apache.tinkerpop + * + + + + + + + org.apache.skywalking + cluster-standalone-plugin + ${skywalking.version} + test + + + + \ No newline at end of file diff --git a/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/CQLExecutor.java b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/CQLExecutor.java new file mode 100644 index 00000000000..46ea7c34ea4 --- /dev/null +++ b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/CQLExecutor.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package zipkin.server.storage.cassandra; + +import org.apache.skywalking.oap.server.core.storage.SessionCacheCallback; +import org.apache.skywalking.oap.server.library.client.request.InsertRequest; +import org.apache.skywalking.oap.server.library.client.request.UpdateRequest; + +import java.util.ArrayList; +import java.util.List; + +public class CQLExecutor implements InsertRequest, UpdateRequest { + private final String cql; + private final List params; + private final SessionCacheCallback callback; + private List additionalCQLs; + + public CQLExecutor(String cql, List params, SessionCacheCallback callback, List additionalCQLs) { + this.cql = cql; + this.params = params; + this.callback = callback; + this.additionalCQLs = additionalCQLs; + } + + public void appendAdditionalCQLs(List cqlExecutors) { + if (additionalCQLs == null) { + additionalCQLs = new ArrayList<>(); + } + additionalCQLs.addAll(cqlExecutors); + } + + @Override + public String toString() { + return cql; + } + + @Override + public void onInsertCompleted() { + if (callback != null) + callback.onInsertCompleted(); + } + + @Override + public void onUpdateFailure() { + if (callback != null) + callback.onUpdateFailure(); + } + + public List getAdditionalCQLs() { + return additionalCQLs; + } + + public String getCql() { + return cql; + } + + public List getParams() { + return params; + } +} diff --git a/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/CassandraClient.java b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/CassandraClient.java new file mode 100644 index 00000000000..67e8052ce88 --- /dev/null +++ b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/CassandraClient.java @@ -0,0 +1,153 @@ +/* + * Copyright 2015-2023 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package zipkin.server.storage.cassandra; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.auth.AuthProvider; +import com.datastax.oss.driver.api.core.config.DriverOption; +import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.BoundStatement; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; +import com.datastax.oss.driver.internal.core.auth.ProgrammaticPlainTextAuthProvider; +import org.apache.skywalking.oap.server.library.client.Client; +import org.apache.skywalking.oap.server.library.client.healthcheck.DelegatedHealthChecker; +import org.apache.skywalking.oap.server.library.util.HealthChecker; +import org.apache.skywalking.oap.server.library.util.StringUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import zipkin.server.storage.cassandra.internal.SessionBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletionStage; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_MAX_REQUESTS; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE; + +public class CassandraClient implements Client { + static final Logger LOG = LoggerFactory.getLogger(CassandraClient.class); + + public static final String RECORD_UNIQUE_UUID_COLUMN = "uuid_unique"; + + private final CassandraConfig config; + private final DelegatedHealthChecker healthChecker; + + private volatile CqlSession cqlSession; + + public CassandraClient(CassandraConfig config) { + this.config = config; + this.healthChecker = new DelegatedHealthChecker(); + } + + public KeyspaceMetadata getMetadata() { + return cqlSession.getMetadata().getKeyspace(config.getKeyspace()).orElse(null); + } + + public CqlSession getSession() { + return cqlSession; + } + + public List executeQuery(String cql, ResultHandler resultHandler, Object... params) { + if (LOG.isDebugEnabled()) { + LOG.debug("Executing CQL: {}", cql); + LOG.debug("CQL parameters: {}", Arrays.toString(params)); + } + final BoundStatement stmt = cqlSession.prepare(cql).bind(params); + final ResultSet resultSet = cqlSession.execute(stmt); + healthChecker.health(); + if (resultHandler != null) { + return StreamSupport.stream(resultSet.spliterator(), false) + .map(resultHandler::handle).collect(Collectors.toList()); + } + return null; + } + + public CompletionStage> executeAsyncQuery(String cql, ResultHandler resultHandler, Object... params) { + if (LOG.isDebugEnabled()) { + LOG.debug("Executing CQL: {}", cql); + LOG.debug("CQL parameters: {}", Arrays.toString(params)); + } + final BoundStatement stmt = cqlSession.prepare(cql).bind(params); + final CompletionStage resultSet = cqlSession.executeAsync(stmt); + healthChecker.health(); + if (resultHandler != null) { + return resultSet.thenApply(s -> StreamSupport.stream(s.currentPage().spliterator(), false) + .map(resultHandler::handle).collect(Collectors.toList())); + } + return null; + } + + public void execute(String cql) { + if (LOG.isDebugEnabled()) { + LOG.debug("Executing CQL: {}", cql); + } + cqlSession.execute(cql); + healthChecker.health(); + } + + public void registerChecker(HealthChecker healthChecker) { + this.healthChecker.register(healthChecker); + } + + @Override + public void connect() throws Exception { + AuthProvider authProvider = null; + if (StringUtil.isNotEmpty(config.getUsername())) { + authProvider = new ProgrammaticPlainTextAuthProvider(config.getUsername(), config.getPassword()); + } + this.cqlSession = SessionBuilder.buildSession(config.getContactPoints(), + config.getLocalDc(), + poolingOptions(), + authProvider, + config.getUseSsl()); + + // create keyspace if needs + final String keyspace = config.getKeyspace(); + KeyspaceMetadata keyspaceMetadata = this.cqlSession.getMetadata().getKeyspace(keyspace).orElse(null); + if (keyspaceMetadata == null) { + String createKeyspaceCql = String.format( + "CREATE KEYSPACE IF NOT EXISTS %s " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} " + + "AND durable_writes = false;", + keyspace); + this.cqlSession.execute(createKeyspaceCql); + } + + this.cqlSession.execute("USE " + keyspace); + } + + @Override + public void shutdown() throws IOException { + } + + private Map poolingOptions() { + Map result = new LinkedHashMap<>(); + result.put(CONNECTION_POOL_LOCAL_SIZE, config.getMaxConnections()); + result.put(CONNECTION_MAX_REQUESTS, 40960 / config.getMaxConnections()); + return result; + } + + @FunctionalInterface + public interface ResultHandler { + T handle(Row resultSet); + } +} diff --git a/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/CassandraConfig.java b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/CassandraConfig.java new file mode 100644 index 00000000000..b24a8355063 --- /dev/null +++ b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/CassandraConfig.java @@ -0,0 +1,109 @@ +/* + * Copyright 2015-2023 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package zipkin.server.storage.cassandra; + +import org.apache.skywalking.oap.server.library.module.ModuleConfig; + +public class CassandraConfig extends ModuleConfig { + + private String keyspace = "zipkin3"; + private String contactPoints = "localhost"; + private String localDc = "datacenter1"; + private int maxConnections = 8; + private boolean useSsl = false; + private String username; + private String password; + + /** + * The maximum size of batch size of CQL execution + */ + protected int maxSizeOfBatchCql = 2000; + /** + * async batch execute pool size + */ + protected int asyncBatchPersistentPoolSize = 4; + + public String getKeyspace() { + return keyspace; + } + + public void setKeyspace(String keyspace) { + this.keyspace = keyspace; + } + + public String getContactPoints() { + return contactPoints; + } + + public void setContactPoints(String contactPoints) { + this.contactPoints = contactPoints; + } + + public String getLocalDc() { + return localDc; + } + + public void setLocalDc(String localDc) { + this.localDc = localDc; + } + + public int getMaxConnections() { + return maxConnections; + } + + public void setMaxConnections(int maxConnections) { + this.maxConnections = maxConnections; + } + + public boolean getUseSsl() { + return useSsl; + } + + public void setUseSsl(boolean useSsl) { + this.useSsl = useSsl; + } + + public String getUsername() { + return username; + } + + public void setUsername(String username) { + this.username = username; + } + + public String getPassword() { + return password; + } + + public void setPassword(String password) { + this.password = password; + } + + public int getMaxSizeOfBatchCql() { + return maxSizeOfBatchCql; + } + + public void setMaxSizeOfBatchCql(int maxSizeOfBatchCql) { + this.maxSizeOfBatchCql = maxSizeOfBatchCql; + } + + public int getAsyncBatchPersistentPoolSize() { + return asyncBatchPersistentPoolSize; + } + + public void setAsyncBatchPersistentPoolSize(int asyncBatchPersistentPoolSize) { + this.asyncBatchPersistentPoolSize = asyncBatchPersistentPoolSize; + } +} diff --git a/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/CassandraProvider.java b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/CassandraProvider.java new file mode 100644 index 00000000000..a1bb42aa33f --- /dev/null +++ b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/CassandraProvider.java @@ -0,0 +1,182 @@ +/* + * Copyright 2015-2023 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package zipkin.server.storage.cassandra; + +import org.apache.skywalking.oap.server.core.CoreModule; +import org.apache.skywalking.oap.server.core.storage.IBatchDAO; +import org.apache.skywalking.oap.server.core.storage.IHistoryDeleteDAO; +import org.apache.skywalking.oap.server.core.storage.StorageBuilderFactory; +import org.apache.skywalking.oap.server.core.storage.StorageDAO; +import org.apache.skywalking.oap.server.core.storage.StorageModule; +import org.apache.skywalking.oap.server.core.storage.cache.INetworkAddressAliasDAO; +import org.apache.skywalking.oap.server.core.storage.management.UIMenuManagementDAO; +import org.apache.skywalking.oap.server.core.storage.management.UITemplateManagementDAO; +import org.apache.skywalking.oap.server.core.storage.model.ModelCreator; +import org.apache.skywalking.oap.server.core.storage.profiling.continuous.IContinuousProfilingPolicyDAO; +import org.apache.skywalking.oap.server.core.storage.profiling.ebpf.IEBPFProfilingDataDAO; +import org.apache.skywalking.oap.server.core.storage.profiling.ebpf.IEBPFProfilingScheduleDAO; +import org.apache.skywalking.oap.server.core.storage.profiling.ebpf.IEBPFProfilingTaskDAO; +import org.apache.skywalking.oap.server.core.storage.profiling.ebpf.IServiceLabelDAO; +import org.apache.skywalking.oap.server.core.storage.profiling.trace.IProfileTaskLogQueryDAO; +import org.apache.skywalking.oap.server.core.storage.profiling.trace.IProfileTaskQueryDAO; +import org.apache.skywalking.oap.server.core.storage.profiling.trace.IProfileThreadSnapshotQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.IAggregationQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.IAlarmQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.IBrowserLogQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.IEventQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.ILogQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.IMetadataQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.IMetricsQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.IRecordsQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.ISpanAttachedEventQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.ITagAutoCompleteQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.ITopologyQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.ITraceQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.IZipkinQueryDAO; +import org.apache.skywalking.oap.server.library.module.ModuleConfig; +import org.apache.skywalking.oap.server.library.module.ModuleDefine; +import org.apache.skywalking.oap.server.library.module.ModuleProvider; +import org.apache.skywalking.oap.server.library.module.ModuleStartException; +import org.apache.skywalking.oap.server.library.module.ServiceNotProvidedException; +import org.apache.skywalking.oap.server.telemetry.TelemetryModule; +import org.apache.skywalking.oap.server.telemetry.api.HealthCheckMetrics; +import org.apache.skywalking.oap.server.telemetry.api.MetricsCreator; +import org.apache.skywalking.oap.server.telemetry.api.MetricsTag; +import zipkin.server.storage.cassandra.dao.CassandraBatchDAO; +import zipkin.server.storage.cassandra.dao.CassandraHistoryDeleteDAO; +import zipkin.server.storage.cassandra.dao.CassandraStorageDAO; +import zipkin.server.storage.cassandra.dao.CassandraTagAutocompleteDAO; +import zipkin.server.storage.cassandra.dao.CassandraZipkinQueryDAO; +import zipkin.server.storage.cassandra.dao.EmptyDAO; + +import java.time.Clock; + +public class CassandraProvider extends ModuleProvider { + private CassandraConfig moduleConfig; + private CassandraClient client; + private CassandraTableInstaller modelInstaller; + private CassandraTableHelper tableHelper; + + @Override + public String name() { + return "cassandra"; + } + + @Override + public Class module() { + return StorageModule.class; + } + + @Override + public ConfigCreator newConfigCreator() { + return new ConfigCreator() { + @Override + public Class type() { + return CassandraConfig.class; + } + + @Override + public void onInitialized(CassandraConfig initialized) { + moduleConfig = initialized; + } + }; + } + + @Override + public void prepare() throws ServiceNotProvidedException, ModuleStartException { + client = new CassandraClient(moduleConfig); + modelInstaller = new CassandraTableInstaller(client, getManager()); + tableHelper = new CassandraTableHelper(getManager(), client); + + this.registerServiceImplementation( + StorageBuilderFactory.class, + new StorageBuilderFactory.Default()); + this.registerServiceImplementation( + IBatchDAO.class, + new CassandraBatchDAO(client, moduleConfig.getMaxSizeOfBatchCql(), moduleConfig.getAsyncBatchPersistentPoolSize()) + ); + this.registerServiceImplementation( + StorageDAO.class, + new CassandraStorageDAO(client) + ); + + final EmptyDAO emptyDAO = new EmptyDAO(); + this.registerServiceImplementation(INetworkAddressAliasDAO.class, emptyDAO); + this.registerServiceImplementation(ITopologyQueryDAO.class, emptyDAO); + this.registerServiceImplementation(ITraceQueryDAO.class, emptyDAO); + this.registerServiceImplementation(IMetricsQueryDAO.class, emptyDAO); + this.registerServiceImplementation(ILogQueryDAO.class, emptyDAO); + this.registerServiceImplementation(IMetadataQueryDAO.class, emptyDAO); + this.registerServiceImplementation(IAggregationQueryDAO.class, emptyDAO); + this.registerServiceImplementation(IAlarmQueryDAO.class, emptyDAO); + this.registerServiceImplementation(IRecordsQueryDAO.class, emptyDAO); + this.registerServiceImplementation(IBrowserLogQueryDAO.class, emptyDAO); + this.registerServiceImplementation(IProfileTaskQueryDAO.class, emptyDAO); + this.registerServiceImplementation(IProfileTaskLogQueryDAO.class, emptyDAO); + this.registerServiceImplementation(IProfileThreadSnapshotQueryDAO.class, emptyDAO); + this.registerServiceImplementation(UITemplateManagementDAO.class, emptyDAO); + this.registerServiceImplementation(UIMenuManagementDAO.class, emptyDAO); + this.registerServiceImplementation(IEventQueryDAO.class, emptyDAO); + this.registerServiceImplementation(IEBPFProfilingTaskDAO.class, emptyDAO); + this.registerServiceImplementation(IEBPFProfilingScheduleDAO.class, emptyDAO); + this.registerServiceImplementation(IEBPFProfilingDataDAO.class, emptyDAO); + this.registerServiceImplementation(IContinuousProfilingPolicyDAO.class, emptyDAO); + this.registerServiceImplementation(IServiceLabelDAO.class, emptyDAO); + this.registerServiceImplementation(ITagAutoCompleteQueryDAO.class, emptyDAO); + this.registerServiceImplementation(ISpanAttachedEventQueryDAO.class, emptyDAO); + + this.registerServiceImplementation(IHistoryDeleteDAO.class, new CassandraHistoryDeleteDAO(client, tableHelper, modelInstaller, Clock.systemDefaultZone())); + this.registerServiceImplementation(IZipkinQueryDAO.class, new CassandraZipkinQueryDAO(client, tableHelper)); + this.registerServiceImplementation(ITagAutoCompleteQueryDAO.class, new CassandraTagAutocompleteDAO(client, tableHelper)); + + } + + @Override + public void start() throws ServiceNotProvidedException, ModuleStartException { + MetricsCreator metricCreator = + getManager() + .find(TelemetryModule.NAME) + .provider() + .getService(MetricsCreator.class); + HealthCheckMetrics healthChecker = + metricCreator.createHealthCheckerGauge( + "storage_" + name(), + MetricsTag.EMPTY_KEY, + MetricsTag.EMPTY_VALUE); + client.registerChecker(healthChecker); + try { + client.connect(); + modelInstaller.start(); + + getManager() + .find(CoreModule.NAME) + .provider() + .getService(ModelCreator.class) + .addModelListener(modelInstaller); + } catch (Exception e) { + throw new ModuleStartException(e.getMessage(), e); + } + } + + @Override + public void notifyAfterCompleted() throws ServiceNotProvidedException, ModuleStartException { + + } + + @Override + public String[] requiredModules() { + return new String[] {CoreModule.NAME}; + } +} diff --git a/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/CassandraTableHelper.java b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/CassandraTableHelper.java new file mode 100644 index 00000000000..e12aa8b4fe0 --- /dev/null +++ b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/CassandraTableHelper.java @@ -0,0 +1,117 @@ +/* + * Copyright 2015-2023 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package zipkin.server.storage.cassandra; + +import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import org.apache.skywalking.oap.server.core.CoreModule; +import org.apache.skywalking.oap.server.core.analysis.DownSampling; +import org.apache.skywalking.oap.server.core.analysis.TimeBucket; +import org.apache.skywalking.oap.server.core.config.ConfigService; +import org.apache.skywalking.oap.server.core.storage.model.Model; +import org.apache.skywalking.oap.server.library.module.ModuleManager; +import org.apache.skywalking.oap.server.storage.plugin.jdbc.TableMetaInfo; +import org.apache.skywalking.oap.server.storage.plugin.jdbc.common.TableHelper; + +import java.time.Duration; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.stream.LongStream; + +import static java.util.stream.Collectors.toList; + +public class CassandraTableHelper extends TableHelper { + private ModuleManager moduleManager; + private final CassandraClient client; + + private final LoadingCache tableExistence = + CacheBuilder.newBuilder() + .expireAfterWrite(Duration.ofMinutes(10)) + .build(new CacheLoader() { + @Override + public Boolean load(String tableName) throws Exception { + final KeyspaceMetadata metadata = client.getMetadata(); + return metadata != null && metadata.getTable(tableName).isPresent(); + } + }); + + public CassandraTableHelper(ModuleManager moduleManager, CassandraClient client) { + super(moduleManager, null); + this.moduleManager = moduleManager; + this.client = client; + } + + public List getTablesForRead(String modelName, long timeBucketStart, long timeBucketEnd) { + final Model model = TableMetaInfo.get(modelName); + final String rawTableName = getTableName(model); + + if (!model.isTimeSeries()) { + return Collections.singletonList(rawTableName); + } + + final List ttlTables = getTablesWithinTTL(modelName); + return getTablesInTimeBucketRange(modelName, timeBucketStart, timeBucketEnd) + .stream() + .filter(ttlTables::contains) + .filter(table -> { + try { + return tableExistence.get(table); + } catch (Exception e) { + throw new RuntimeException(e); + } + }) + .collect(toList()); + } + + public List getTablesWithinTTL(String modelName) { + final Model model = TableMetaInfo.get(modelName); + final String rawTableName = getTableName(model); + + if (!model.isTimeSeries()) { + return Collections.singletonList(rawTableName); + } + + final List ttlTimeBuckets = getTTLTimeBuckets(model); + return ttlTimeBuckets + .stream() + .map(it -> getTable(rawTableName, it)) + .filter(table -> { + try { + return tableExistence.get(table); + } catch (Exception e) { + throw new RuntimeException(e); + } + }) + .collect(toList()); + } + + List getTTLTimeBuckets(Model model) { + final int ttl = model.isRecord() ? + getConfigService().getRecordDataTTL() : + getConfigService().getMetricsDataTTL(); + return LongStream + .rangeClosed(0, ttl) + .mapToObj(it -> TimeBucket.getTimeBucket(System.currentTimeMillis() - TimeUnit.DAYS.toMillis(it), DownSampling.Day)) + .distinct() + .collect(toList()); + } + + ConfigService getConfigService() { + return moduleManager.find(CoreModule.NAME).provider().getService(ConfigService.class); + } +} diff --git a/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/CassandraTableInstaller.java b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/CassandraTableInstaller.java new file mode 100644 index 00000000000..f88ebac4700 --- /dev/null +++ b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/CassandraTableInstaller.java @@ -0,0 +1,261 @@ +/* + * Copyright 2015-2023 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package zipkin.server.storage.cassandra; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; +import com.google.common.base.Joiner; +import com.google.gson.JsonObject; +import org.apache.skywalking.oap.server.core.analysis.Layer; +import org.apache.skywalking.oap.server.core.storage.StorageData; +import org.apache.skywalking.oap.server.core.storage.model.ColumnName; +import org.apache.skywalking.oap.server.core.storage.model.Model; +import org.apache.skywalking.oap.server.core.storage.model.ModelColumn; +import org.apache.skywalking.oap.server.core.storage.model.SQLDatabaseModelExtension; +import org.apache.skywalking.oap.server.core.storage.type.StorageDataComplexObject; +import org.apache.skywalking.oap.server.library.client.Client; +import org.apache.skywalking.oap.server.library.module.ModuleManager; +import org.apache.skywalking.oap.server.library.util.CollectionUtils; +import org.apache.skywalking.oap.server.storage.plugin.jdbc.SQLBuilder; +import org.apache.skywalking.oap.server.storage.plugin.jdbc.TableMetaInfo; +import org.apache.skywalking.oap.server.storage.plugin.jdbc.common.JDBCTableInstaller; +import org.apache.skywalking.oap.server.storage.plugin.jdbc.common.TableHelper; + +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; + +import static java.util.stream.Collectors.joining; +import static java.util.stream.Collectors.toList; + +public class CassandraTableInstaller extends JDBCTableInstaller { + public CassandraTableInstaller(Client client, ModuleManager moduleManager) { + super(client, moduleManager); + } + + @Override + public boolean isExists(Model model) { + TableMetaInfo.addModel(model); + + final String table = TableHelper.getLatestTableForWrite(model); + + final Optional tableMetadata = ((CassandraClient) client).getMetadata().getTable(table); + if (!tableMetadata.isPresent()) { + return false; + } + + final Set databaseColumns = getDatabaseColumns(table); + final boolean isAnyColumnNotCreated = + model + .getColumns().stream() + .map(ModelColumn::getColumnName) + .map(ColumnName::getStorageName) + .anyMatch(c -> !databaseColumns.contains(c)); + + return !isAnyColumnNotCreated; + } + + public void createTable(Model model, long timeBucket) { + try { + final String table = TableHelper.getTable(model, timeBucket); + createOrUpdateTable(model, table, model.getColumns(), false); + createOrUpdateTableIndexes(model, table, model.getColumns(), false); + createAdditionalTable(model, timeBucket); + } catch (Exception e) { + throw new IllegalStateException(e); + } + } + + public void createAdditionalTable(Model model, long timeBucket) throws SQLException { + final Map additionalTables = model.getSqlDBModelExtension().getAdditionalTables(); + for (final SQLDatabaseModelExtension.AdditionalTable table : additionalTables.values()) { + final String tableName = TableHelper.getTable(table.getName(), timeBucket); + createOrUpdateTable(model, tableName, table.getColumns(), true); + createOrUpdateTableIndexes(model, tableName, table.getColumns(), true); + } + } + + public void createOrUpdateTable(Model model, String table, List columns, boolean isAdditionalTable) { + try { + final List columnsToBeAdded = new ArrayList<>(columns); + final Set existingColumns = getDatabaseColumns(table); + + columnsToBeAdded.removeIf(it -> existingColumns.contains(it.getColumnName().getStorageName())); + + final KeyspaceMetadata metadata = ((CassandraClient) this.client).getMetadata(); + if (!metadata.getTable(table).isPresent()) { + createTable(model, table, columnsToBeAdded, isAdditionalTable); + } else { + updateTable(table, columnsToBeAdded); + } + } catch (Exception e) { + throw new IllegalStateException(e); + } + } + + public void createOrUpdateTableIndexes(Model model, String table, List columns, boolean isAdditionalTable) throws SQLException { + final CassandraClient cassandraClient = (CassandraClient) this.client; + + final List columnsMissingIndex = + columns + .stream() + .filter(ModelColumn::shouldIndex) + .filter(it -> it.getLength() < 256) + .filter(c -> !model.isRecord() || c.getBanyanDBExtension().getShardingKeyIdx() > 0) + .map(ModelColumn::getColumnName) + .map(ColumnName::getStorageName) + .collect(toList()); + + // adding the time_bucket as an index column when querying zipkin_query + columnsMissingIndex.add("time_bucket"); + for (String column : columnsMissingIndex) { + final String index = "idx_" + table + "_" + column; + if (!indexExists(cassandraClient, table, index)) { + executeSQL( + new SQLBuilder("CREATE INDEX ") + .append(index) + .append(" ON ").append(table).append("(") + .append(column) + .append(")") + ); + } + } + } + + private boolean indexExists(CassandraClient client, String tableName, String indexName) { + final TableMetadata tableMetadata = client.getMetadata().getTable(tableName).orElse(null); + if (tableMetadata == null) { + return false; + } + return tableMetadata.getIndex(indexName).isPresent(); + } + + private void createTable(Model model, String table, List columns, boolean isAdditionalTable) throws SQLException { + final List columnDefinitions = new ArrayList<>(); + columnDefinitions.add(ID_COLUMN + " text"); + if (!isAdditionalTable) { + columnDefinitions.add(JDBCTableInstaller.TABLE_COLUMN + " text"); + } + + columns + .stream() + .map(this::getColumnDefinition) + .forEach(columnDefinitions::add); + + List shardKeys = columns.stream() + .filter(column -> column.getBanyanDBExtension() != null && column.getBanyanDBExtension().getShardingKeyIdx() >= 0) + .map(t -> t.getColumnName().getStorageName()) + .collect(Collectors.toList()); + + // if existing time bucket field, then add it to the primary key for filtering + if (columns.stream().anyMatch(s -> s.getColumnName().getStorageName().equals(StorageData.TIME_BUCKET))) { + shardKeys.add(StorageData.TIME_BUCKET); + } + + // make sure all the record can be inserted(ignore primary check) + if (model.isRecord() && !isAdditionalTable) { + columnDefinitions.add(CassandraClient.RECORD_UNIQUE_UUID_COLUMN + " text"); + shardKeys.add(CassandraClient.RECORD_UNIQUE_UUID_COLUMN); + } + + // record don't need to add the ID Column as partition key(for query performance) + if (model.isRecord()) { + columnDefinitions.add("PRIMARY KEY (" + (CollectionUtils.isEmpty(shardKeys) ? ID_COLUMN : Joiner.on(", ").join(shardKeys)) + ")"); + } else { + columnDefinitions.add("PRIMARY KEY (" + ID_COLUMN + (CollectionUtils.isEmpty(shardKeys) ? "" : "," + Joiner.on(", ").join(shardKeys)) + ")"); + } + + final SQLBuilder sql = new SQLBuilder("CREATE TABLE IF NOT EXISTS " + table) + .append(columnDefinitions.stream().collect(joining(", ", " (", ")"))) + .append(" WITH "); + + if (CollectionUtils.isNotEmpty(shardKeys)) { + if (model.isRecord()) shardKeys.remove(0); + sql.append(" CLUSTERING ORDER BY (") + .append(shardKeys.stream().map(s -> s + " DESC").collect(joining(", "))) + .append(") AND "); + } + + sql + .append(" compaction = {'class': 'org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy'} ") + .append(" AND gc_grace_seconds = 3600") + .append(" AND speculative_retry = '95percentile';"); + + executeSQL(sql); + } + + private void updateTable(String table, List columns) throws SQLException { + final List alterSqls = columns + .stream() + .map(this::getColumnDefinition) + .map(definition -> "ALTER TABLE " + table + " ADD COLUMN " + definition + "; ") + .collect(toList()); + + for (String alterSql : alterSqls) { + executeSQL(new SQLBuilder(alterSql)); + } + } + + @Override + public void executeSQL(SQLBuilder sql) throws SQLException { + ((CassandraClient) this.client).execute(sql.toString()); + } + + @Override + protected String getColumnDefinition(ModelColumn column, Class type, Type genericType) { + final String storageName = column.getColumnName().getStorageName(); + if (Integer.class.equals(type) || int.class.equals(type) || Layer.class.equals(type)) { + return storageName + " int"; + } else if (Long.class.equals(type) || long.class.equals(type)) { + return storageName + " bigint"; + } else if (Double.class.equals(type) || double.class.equals(type)) { + return storageName + " DOUBLE"; + } else if (String.class.equals(type)) { + return storageName + " text"; + } else if (StorageDataComplexObject.class.isAssignableFrom(type)) { + return storageName + " text"; + } else if (byte[].class.equals(type)) { + return storageName + " blob"; + } else if (JsonObject.class.equals(type)) { + return storageName + " text"; + } else if (List.class.isAssignableFrom(type)) { + final Type elementType = ((ParameterizedType) genericType).getActualTypeArguments()[0]; + return getColumnDefinition(column, (Class) elementType, elementType); + } else { + throw new IllegalArgumentException("Unsupported data type: " + type.getName()); + } + } + + protected Set getDatabaseColumns(String table) { + final KeyspaceMetadata metadata = ((CassandraClient) this.client).getMetadata(); + if (metadata == null) { + return Collections.emptySet(); + } + final TableMetadata tableMetadata = metadata.getTable(table).orElse(null); + if (tableMetadata == null) { + return Collections.emptySet(); + } + return tableMetadata.getColumns().keySet().stream().map(CqlIdentifier::asInternal).collect(Collectors.toSet()); + } + +} diff --git a/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/BatchCQLExecutor.java b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/BatchCQLExecutor.java new file mode 100644 index 00000000000..23d31194f15 --- /dev/null +++ b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/BatchCQLExecutor.java @@ -0,0 +1,112 @@ +/* + * Copyright 2015-2023 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package zipkin.server.storage.cassandra.dao; + +import com.datastax.oss.driver.api.core.cql.BatchStatement; +import com.datastax.oss.driver.api.core.cql.BatchStatementBuilder; +import com.datastax.oss.driver.api.core.cql.BatchType; +import org.apache.skywalking.oap.server.core.UnexpectedException; +import org.apache.skywalking.oap.server.library.client.request.InsertRequest; +import org.apache.skywalking.oap.server.library.client.request.PrepareRequest; +import org.apache.skywalking.oap.server.library.client.request.UpdateRequest; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import zipkin.server.storage.cassandra.CQLExecutor; +import zipkin.server.storage.cassandra.CassandraClient; + +import java.util.ArrayList; +import java.util.List; + +public class BatchCQLExecutor implements InsertRequest, UpdateRequest { + static final Logger LOG = LoggerFactory.getLogger(BatchCQLExecutor.class); + + private final CassandraClient client; + private final List requests; + + public BatchCQLExecutor(CassandraClient client, List requests) { + this.client = client; + this.requests = requests; + } + + public void invoke(int maxBatchCqlSize) throws Exception { + if (LOG.isDebugEnabled()) { + LOG.debug("execute cql batch. sql by key size: {}", requests.size()); + } + if (requests.size() == 0) { + return; + } + final String sql = requests.get(0).toString(); + final BatchStatementBuilder batchBuilder = BatchStatement.builder(BatchType.LOGGED); + int pendingCount = 0; + final ArrayList executors = new ArrayList<>(); + for (PrepareRequest request : requests) { + final CQLExecutor executor = (CQLExecutor) request; + if (LOG.isDebugEnabled()) { + LOG.debug("Executing CQL: {}", executor.getCql()); + LOG.debug("CQL parameters: {}", executor.getParams()); + } + executors.add(executor); + batchBuilder.addStatement(client.getSession().prepare(executor.getCql()).bind(executor.getParams().toArray())); + if (batchBuilder.getStatementsCount() == maxBatchCqlSize) { + executeBatch(maxBatchCqlSize, batchBuilder.build(), executors, sql); + client.getSession().execute(batchBuilder.build()); + batchBuilder.clearStatements(); + executors.clear(); + pendingCount = 0; + } else { + pendingCount++; + } + } + + if (pendingCount > 0) { + executeBatch(pendingCount, batchBuilder.build(), executors, sql); + batchBuilder.clearStatements(); + } + } + + private void executeBatch(int pendingCount, BatchStatement stmt, List bulkExecutors, String sql) { + final long start = System.currentTimeMillis(); + boolean success = true; + try { + client.getSession().execute(stmt); + } catch (Exception e) { + success = false; + LOG.warn("execute batch cql failure", e); + } + final boolean isInsert = bulkExecutors.get(0) instanceof InsertRequest; + for (CQLExecutor executor : bulkExecutors) { + if (isInsert) { + ((InsertRequest) executor).onInsertCompleted(); + } else if (!success) { + ((UpdateRequest) executor).onUpdateFailure(); + } + } + if (LOG.isDebugEnabled()) { + long end = System.currentTimeMillis(); + long cost = end - start; + LOG.debug("execute batch cql, batch size: {}, cost:{}ms, sql: {}", pendingCount, cost, sql); + } + } + + @Override + public void onInsertCompleted() { + throw new UnexpectedException("BatchCQLExecutor.onInsertCompleted should not be called"); + } + + @Override + public void onUpdateFailure() { + throw new UnexpectedException("BatchCQLExecutor.onUpdateFailure should not be called"); + } +} diff --git a/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraBatchDAO.java b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraBatchDAO.java new file mode 100644 index 00000000000..6d4527892e6 --- /dev/null +++ b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraBatchDAO.java @@ -0,0 +1,105 @@ +/* + * Copyright 2015-2023 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package zipkin.server.storage.cassandra.dao; + +import org.apache.skywalking.oap.server.core.storage.IBatchDAO; +import org.apache.skywalking.oap.server.library.client.request.InsertRequest; +import org.apache.skywalking.oap.server.library.client.request.PrepareRequest; +import org.apache.skywalking.oap.server.library.datacarrier.DataCarrier; +import org.apache.skywalking.oap.server.library.datacarrier.consumer.IConsumer; +import org.apache.skywalking.oap.server.library.util.CollectionUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import zipkin.server.storage.cassandra.CQLExecutor; +import zipkin.server.storage.cassandra.CassandraClient; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; + +public class CassandraBatchDAO implements IBatchDAO { + static final Logger LOG = LoggerFactory.getLogger(CassandraBatchDAO.class); + + private CassandraClient client; + private final DataCarrier dataCarrier; + private final int maxBatchSqlSize; + + public CassandraBatchDAO(CassandraClient client, int maxBatchCqlSize, int asyncBatchPersistentPoolSize) { + this.client = client; + String name = "CASSANDRA_ASYNCHRONOUS_BATCH_PERSISTENT"; + if (LOG.isDebugEnabled()) { + LOG.debug("CASSANDRA_ASYNCHRONOUS_BATCH_PERSISTENT poolSize: {}, maxBatchCqlSize:{}", asyncBatchPersistentPoolSize, maxBatchCqlSize); + } + this.maxBatchSqlSize = maxBatchCqlSize; + this.dataCarrier = new DataCarrier<>(name, asyncBatchPersistentPoolSize, 10000); + this.dataCarrier.consume(new CassandraBatchConsumer(this), asyncBatchPersistentPoolSize, 20); + } + + @Override + public void insert(InsertRequest insertRequest) { + this.dataCarrier.produce(insertRequest); + } + + @Override + public CompletableFuture flush(List prepareRequests) { + if (CollectionUtils.isEmpty(prepareRequests)) { + return CompletableFuture.completedFuture(null); + } + + List sqls = new ArrayList<>(); + prepareRequests.forEach(prepareRequest -> { + sqls.add(prepareRequest); + CQLExecutor cqlExecutor = (CQLExecutor) prepareRequest; + if (!CollectionUtils.isEmpty(cqlExecutor.getAdditionalCQLs())) { + sqls.addAll(cqlExecutor.getAdditionalCQLs()); + } + }); + + if (LOG.isDebugEnabled()) { + LOG.debug("to execute sql statements execute, data size: {}, maxBatchSqlSize: {}", sqls.size(), maxBatchSqlSize); + } + + try { + final BatchCQLExecutor batchSQLExecutor = new BatchCQLExecutor(client, sqls); + batchSQLExecutor.invoke(maxBatchSqlSize); + } catch (Exception e) { + // Just to avoid one execution failure makes the rest of batch failure. + LOG.error(e.getMessage(), e); + } + if (LOG.isDebugEnabled()) { + LOG.debug("execute sql statements done, data size: {}, maxBatchSqlSize: {}", prepareRequests.size(), maxBatchSqlSize); + } + return CompletableFuture.completedFuture(null); + } + + + private static class CassandraBatchConsumer implements IConsumer { + + private final CassandraBatchDAO batchDAO; + + private CassandraBatchConsumer(CassandraBatchDAO batchDAO) { + this.batchDAO = batchDAO; + } + + @Override + public void consume(List prepareRequests) { + batchDAO.flush(prepareRequests); + } + + @Override + public void onError(List prepareRequests, Throwable t) { + LOG.error(t.getMessage(), t); + } + } +} diff --git a/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraCqlExecutor.java b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraCqlExecutor.java new file mode 100644 index 00000000000..87df6b02f3f --- /dev/null +++ b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraCqlExecutor.java @@ -0,0 +1,251 @@ +/* + * Copyright 2015-2023 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package zipkin.server.storage.cassandra.dao; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.cql.Row; +import org.apache.skywalking.oap.server.core.storage.SessionCacheCallback; +import org.apache.skywalking.oap.server.core.storage.StorageData; +import org.apache.skywalking.oap.server.core.storage.model.ColumnName; +import org.apache.skywalking.oap.server.core.storage.model.Model; +import org.apache.skywalking.oap.server.core.storage.model.ModelColumn; +import org.apache.skywalking.oap.server.core.storage.model.SQLDatabaseModelExtension; +import org.apache.skywalking.oap.server.core.storage.type.Convert2Storage; +import org.apache.skywalking.oap.server.core.storage.type.HashMapConverter; +import org.apache.skywalking.oap.server.core.storage.type.StorageBuilder; +import org.apache.skywalking.oap.server.core.storage.type.StorageDataComplexObject; +import org.apache.skywalking.oap.server.library.util.CollectionUtils; +import org.apache.skywalking.oap.server.storage.plugin.jdbc.SQLBuilder; +import org.apache.skywalking.oap.server.storage.plugin.jdbc.TableMetaInfo; +import org.apache.skywalking.oap.server.storage.plugin.jdbc.common.JDBCTableInstaller; +import org.apache.skywalking.oap.server.storage.plugin.jdbc.common.TableHelper; +import zipkin.server.storage.cassandra.CQLExecutor; +import zipkin.server.storage.cassandra.CassandraClient; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.apache.skywalking.oap.server.storage.plugin.jdbc.common.JDBCTableInstaller.ID_COLUMN; + +public class CassandraCqlExecutor { + + protected List getByIDs(CassandraClient client, + String modelName, + List ids, + StorageBuilder storageBuilder) { + final List modelTables = getModelTables(client, modelName); + List storageDataList = new ArrayList<>(); + + for (String table : modelTables) { + final SQLBuilder sql = new SQLBuilder("SELECT * FROM " + table + " WHERE id in ") + .append(ids.stream().map(it -> "?").collect(Collectors.joining(",", "(", ")"))); + storageDataList.addAll(client.executeQuery(sql.toString(), new CassandraClient.ResultHandler() { + @Override + public StorageData handle(Row resultSet) { + return toStorageData(resultSet, modelName, storageBuilder); + } + }, ids.toArray())); + } + + return storageDataList; + } + + protected CQLExecutor getInsertExecutor(Model model, T metrics, long timeBucket, + StorageBuilder storageBuilder, + Convert2Storage> converter, + SessionCacheCallback callback) { + storageBuilder.entity2Storage(metrics, converter); + // adding the uuid column + Map objectMap = converter.obtain(); + //build main table cql + Map mainEntity = new HashMap<>(); + model.getColumns().forEach(column -> { + mainEntity.put(column.getColumnName().getName(), objectMap.get(column.getColumnName().getName())); + }); + CQLExecutor sqlExecutor = buildInsertExecutor( + model, metrics, timeBucket, mainEntity, callback); + //build additional table cql + for (final SQLDatabaseModelExtension.AdditionalTable additionalTable : model.getSqlDBModelExtension().getAdditionalTables().values()) { + Map additionalEntity = new HashMap<>(); + additionalTable.getColumns().forEach(column -> { + additionalEntity.put(column.getColumnName().getName(), objectMap.get(column.getColumnName().getName())); + }); + + List additionalSQLExecutors = buildAdditionalInsertExecutor( + model, additionalTable.getName(), additionalTable.getColumns(), metrics, + timeBucket, additionalEntity, callback + ); + sqlExecutor.appendAdditionalCQLs(additionalSQLExecutors); + } + return sqlExecutor; + } + + protected CQLExecutor getUpdateExecutor(Model model, T metrics, + long timeBucket, + StorageBuilder storageBuilder, + SessionCacheCallback callback) { + final Convert2Storage> toStorage = new HashMapConverter.ToStorage(); + storageBuilder.entity2Storage(metrics, toStorage); + final Map objectMap = toStorage.obtain(); + final String table = TableHelper.getTable(model, timeBucket); + final StringBuilder sqlBuilder = new StringBuilder("UPDATE " + table + " SET "); + final List columns = model.getColumns(); + final List queries = new ArrayList<>(); + final List param = new ArrayList<>(); + for (final ModelColumn column : columns) { + final String columnName = column.getColumnName().getName(); + queries.add(column.getColumnName().getStorageName() + " = ?"); + + final Object value = objectMap.get(columnName); + if (value instanceof StorageDataComplexObject) { + param.add(((StorageDataComplexObject) value).toStorageData()); + } else { + param.add(value); + } + } + sqlBuilder.append(queries.stream().collect(Collectors.joining(", "))); + sqlBuilder.append(" WHERE id = ?"); + param.add(TableHelper.generateId(model, metrics.id().build())); + + return new CQLExecutor(sqlBuilder.toString(), param, callback, null); + } + + private List buildAdditionalInsertExecutor(Model model, String tableName, + List columns, + T metrics, + long timeBucket, + Map objectMap, + SessionCacheCallback callback) { + + List sqlExecutors = new ArrayList<>(); + List columnNames = new ArrayList<>(); + List values = new ArrayList<>(); + List param = new ArrayList<>(); + final SQLBuilder sqlBuilder = new SQLBuilder("INSERT INTO ") + .append(TableHelper.getTable(tableName, timeBucket)); + + columnNames.add(ID_COLUMN); + values.add("?"); + param.add(TableHelper.generateId(model, metrics.id().build())); + + int position = 0; + List valueList = new ArrayList(); + for (int i = 0; i < columns.size(); i++) { + ModelColumn column = columns.get(i); + if (List.class.isAssignableFrom(column.getType())) { + valueList = (List) objectMap.get(column.getColumnName().getName()); + + columnNames.add(column.getColumnName().getStorageName()); + values.add("?"); + param.add(null); + + position = i + 1; + } else { + columnNames.add(column.getColumnName().getStorageName()); + values.add("?"); + + Object value = objectMap.get(column.getColumnName().getName()); + if (value instanceof StorageDataComplexObject) { + param.add(((StorageDataComplexObject) value).toStorageData()); + } else { + param.add(value); + } + } + } + + sqlBuilder.append("(").append(columnNames.stream().collect(Collectors.joining(", "))).append(")") + .append(" VALUES (").append(values.stream().collect(Collectors.joining(", "))).append(")"); + String sql = sqlBuilder.toString(); + if (!CollectionUtils.isEmpty(valueList)) { + for (Object object : valueList) { + List paramCopy = new ArrayList<>(param); + paramCopy.set(position, object); + sqlExecutors.add(new CQLExecutor(sql, paramCopy, callback, null)); + } + } else { + sqlExecutors.add(new CQLExecutor(sql, param, callback, null)); + } + + return sqlExecutors; + } + + private CQLExecutor buildInsertExecutor(Model model, + T metrics, + long timeBucket, + Map objectMap, + SessionCacheCallback onCompleteCallback) { + final String table = TableHelper.getTable(model, timeBucket); + final SQLBuilder sqlBuilder = new SQLBuilder("INSERT INTO " + table); + final List columns = model.getColumns(); + final List columnNames = + Stream.concat( + Stream.of(ID_COLUMN, JDBCTableInstaller.TABLE_COLUMN), + columns + .stream() + .map(ModelColumn::getColumnName) + .map(ColumnName::getStorageName)) + .collect(Collectors.toList()); + if (model.isRecord()) { + columnNames.add(CassandraClient.RECORD_UNIQUE_UUID_COLUMN); + } + sqlBuilder.append(columnNames.stream().collect(Collectors.joining(",", "(", ")"))); + sqlBuilder.append(" VALUES "); + sqlBuilder.append(columnNames.stream().map(it -> "?").collect(Collectors.joining(",", "(", ")"))); + + final List params = Stream.concat( + Stream.of(TableHelper.generateId(model, metrics.id().build()), model.getName()), + columns + .stream() + .map(ModelColumn::getColumnName) + .map(ColumnName::getName) + .map(objectMap::get) + .map(it -> { + if (it instanceof StorageDataComplexObject) { + return ((StorageDataComplexObject) it).toStorageData(); + } + return it; + })) + .collect(Collectors.toList()); + if (model.isRecord()) { + params.add(UUID.randomUUID().toString()); + } + + return new CQLExecutor(sqlBuilder.toString(), params, onCompleteCallback, null); + } + + + protected StorageData toStorageData(Row row, String modelName, + StorageBuilder storageBuilder) { + Map data = new HashMap<>(); + List columns = TableMetaInfo.get(modelName).getColumns(); + for (ModelColumn column : columns) { + data.put(column.getColumnName().getName(), row.getObject(column.getColumnName().getStorageName())); + } + return storageBuilder.storage2Entity(new HashMapConverter.ToEntity(data)); + } + + private static List getModelTables(CassandraClient client, String modelName) { + final Model model = TableMetaInfo.get(modelName); + final String tableName = TableHelper.getTableName(model); + return client.getMetadata().getTables().keySet().stream() + .filter(t -> t.asInternal().startsWith(tableName)) + .map(CqlIdentifier::asInternal).collect(Collectors.toList()); + } +} diff --git a/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraHistoryDeleteDAO.java b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraHistoryDeleteDAO.java new file mode 100644 index 00000000000..6916fcf05db --- /dev/null +++ b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraHistoryDeleteDAO.java @@ -0,0 +1,113 @@ +/* + * Copyright 2015-2023 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package zipkin.server.storage.cassandra.dao; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import org.apache.skywalking.oap.server.core.analysis.DownSampling; +import org.apache.skywalking.oap.server.core.analysis.TimeBucket; +import org.apache.skywalking.oap.server.core.storage.IHistoryDeleteDAO; +import org.apache.skywalking.oap.server.core.storage.model.Model; +import org.apache.skywalking.oap.server.core.storage.model.SQLDatabaseModelExtension; +import org.apache.skywalking.oap.server.storage.plugin.jdbc.SQLBuilder; +import org.apache.skywalking.oap.server.storage.plugin.jdbc.common.TableHelper; +import org.joda.time.DateTime; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import zipkin.server.storage.cassandra.CassandraClient; +import zipkin.server.storage.cassandra.CassandraTableInstaller; + +import java.io.IOException; +import java.time.Clock; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; + +public class CassandraHistoryDeleteDAO implements IHistoryDeleteDAO { + static final Logger LOG = LoggerFactory.getLogger(CassandraHistoryDeleteDAO.class); + + private final CassandraClient client; + private final TableHelper tableHelper; + private final CassandraTableInstaller modelInstaller; + private final Clock clock; + + private final Map lastDeletedTimeBucket = new ConcurrentHashMap<>(); + + public CassandraHistoryDeleteDAO(CassandraClient client, TableHelper tableHelper, CassandraTableInstaller modelInstaller, Clock clock) { + this.client = client; + this.tableHelper = tableHelper; + this.modelInstaller = modelInstaller; + this.clock = clock; + } + + @Override + public void deleteHistory(Model model, String timeBucketColumnName, int ttl) throws IOException { + final long endTimeBucket = TimeBucket.getTimeBucket(clock.millis() + TimeUnit.DAYS.toMillis(1), DownSampling.Day); + final long startTimeBucket = TimeBucket.getTimeBucket(clock.millis() - TimeUnit.DAYS.toMillis(ttl), DownSampling.Day); + LOG.info( + "Deleting history data, ttl: {}, now: {}. Keep [{}, {}]", + ttl, + clock.millis(), + startTimeBucket, + endTimeBucket + ); + + final long deadline = Long.parseLong(new DateTime().minusDays(ttl).toString("yyyyMMdd")); + final long lastSuccessDeadline = lastDeletedTimeBucket.getOrDefault(model.getName(), 0L); + if (deadline <= lastSuccessDeadline) { + if (LOG.isDebugEnabled()) { + LOG.debug( + "The deadline {} is less than the last success deadline {}, skip deleting history data", + deadline, + lastSuccessDeadline + ); + } + return; + } + + final List ttlTables = tableHelper.getTablesInTimeBucketRange(model.getName(), startTimeBucket, endTimeBucket); + final HashSet tablesToDrop = new HashSet(); + final String tableName = TableHelper.getTableName(model); + + client.getMetadata().getTables().keySet().stream() + .map(CqlIdentifier::asInternal) + .filter(s -> s.startsWith(tableName)) + .forEach(tablesToDrop::add); + + ttlTables.forEach(tablesToDrop::remove); + tablesToDrop.removeIf(it -> !it.matches(tableName + "_\\d{8}$")); + for (final String table : tablesToDrop) { + final SQLBuilder dropSql = new SQLBuilder("drop table if exists ").append(table); + client.execute(dropSql.toString()); + } + + // Drop additional tables + for (final String table : tablesToDrop) { + final long timeBucket = TableHelper.getTimeBucket(table); + for (final SQLDatabaseModelExtension.AdditionalTable additionalTable : model.getSqlDBModelExtension().getAdditionalTables().values()) { + final String additionalTableToDrop = TableHelper.getTable(additionalTable.getName(), timeBucket); + final SQLBuilder dropSql = new SQLBuilder("drop table if exists ").append(additionalTableToDrop); + client.execute(dropSql.toString()); + } + } + + // Create tables for the next day. + final long nextTimeBucket = TimeBucket.getTimeBucket(clock.millis() + TimeUnit.DAYS.toMillis(1), DownSampling.Day); + modelInstaller.createTable(model, nextTimeBucket); + + lastDeletedTimeBucket.put(model.getName(), deadline); + } +} diff --git a/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraMetricsDAO.java b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraMetricsDAO.java new file mode 100644 index 00000000000..3a8b9761740 --- /dev/null +++ b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraMetricsDAO.java @@ -0,0 +1,64 @@ +/* + * Copyright 2015-2023 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package zipkin.server.storage.cassandra.dao; + +import org.apache.skywalking.oap.server.core.analysis.metrics.Metrics; +import org.apache.skywalking.oap.server.core.storage.IMetricsDAO; +import org.apache.skywalking.oap.server.core.storage.SessionCacheCallback; +import org.apache.skywalking.oap.server.core.storage.StorageData; +import org.apache.skywalking.oap.server.core.storage.model.Model; +import org.apache.skywalking.oap.server.core.storage.type.HashMapConverter; +import org.apache.skywalking.oap.server.core.storage.type.StorageBuilder; +import org.apache.skywalking.oap.server.library.client.request.InsertRequest; +import org.apache.skywalking.oap.server.library.client.request.UpdateRequest; +import org.apache.skywalking.oap.server.storage.plugin.jdbc.common.TableHelper; +import zipkin.server.storage.cassandra.CassandraClient; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static java.util.stream.Collectors.toList; + +public class CassandraMetricsDAO extends CassandraCqlExecutor implements IMetricsDAO { + private final CassandraClient client; + private final StorageBuilder storageBuilder; + + public CassandraMetricsDAO(CassandraClient client, StorageBuilder storageBuilder) { + this.client = client; + this.storageBuilder = storageBuilder; + } + + @Override + public List multiGet(Model model, List metrics) throws Exception { + final List ids = metrics.stream().map(m -> TableHelper.generateId(model, m.id().build())).collect(toList()); + final List storageDataList = getByIDs(client, model.getName(), ids, storageBuilder); + final List result = new ArrayList<>(storageDataList.size()); + for (StorageData storageData : storageDataList) { + result.add((Metrics) storageData); + } + return result; + } + + @Override + public InsertRequest prepareBatchInsert(Model model, Metrics metrics, SessionCacheCallback callback) throws IOException { + return getInsertExecutor(model, metrics, metrics.getTimeBucket(), storageBuilder, new HashMapConverter.ToStorage(), callback); + } + + @Override + public UpdateRequest prepareBatchUpdate(Model model, Metrics metrics, SessionCacheCallback callback) throws IOException { + return getUpdateExecutor(model, metrics, metrics.getTimeBucket(), storageBuilder, callback); + } +} diff --git a/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraRecordDAO.java b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraRecordDAO.java new file mode 100644 index 00000000000..b4c90d56833 --- /dev/null +++ b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraRecordDAO.java @@ -0,0 +1,37 @@ +/* + * Copyright 2015-2023 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package zipkin.server.storage.cassandra.dao; + +import org.apache.skywalking.oap.server.core.analysis.record.Record; +import org.apache.skywalking.oap.server.core.storage.IRecordDAO; +import org.apache.skywalking.oap.server.core.storage.model.Model; +import org.apache.skywalking.oap.server.core.storage.type.HashMapConverter; +import org.apache.skywalking.oap.server.core.storage.type.StorageBuilder; +import org.apache.skywalking.oap.server.library.client.request.InsertRequest; + +import java.io.IOException; + +public class CassandraRecordDAO extends CassandraCqlExecutor implements IRecordDAO { + private final StorageBuilder storageBuilder; + + public CassandraRecordDAO(StorageBuilder storageBuilder) { + this.storageBuilder = storageBuilder; + } + + @Override + public InsertRequest prepareBatchInsert(Model model, Record record) throws IOException { + return getInsertExecutor(model, record, record.getTimeBucket(), storageBuilder, new HashMapConverter.ToStorage(), null); + } +} diff --git a/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraStorageDAO.java b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraStorageDAO.java new file mode 100644 index 00000000000..8b1e2d46c10 --- /dev/null +++ b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraStorageDAO.java @@ -0,0 +1,51 @@ +/* + * Copyright 2015-2023 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package zipkin.server.storage.cassandra.dao; + +import org.apache.skywalking.oap.server.core.storage.IManagementDAO; +import org.apache.skywalking.oap.server.core.storage.IMetricsDAO; +import org.apache.skywalking.oap.server.core.storage.INoneStreamDAO; +import org.apache.skywalking.oap.server.core.storage.IRecordDAO; +import org.apache.skywalking.oap.server.core.storage.StorageDAO; +import org.apache.skywalking.oap.server.core.storage.type.StorageBuilder; +import zipkin.server.storage.cassandra.CassandraClient; + +public class CassandraStorageDAO implements StorageDAO { + private final CassandraClient client; + + public CassandraStorageDAO(CassandraClient client) { + this.client = client; + } + + @Override + public IMetricsDAO newMetricsDao(StorageBuilder storageBuilder) { + return new CassandraMetricsDAO(client, storageBuilder); + } + + @Override + public IRecordDAO newRecordDao(StorageBuilder storageBuilder) { + return new CassandraRecordDAO(storageBuilder); + } + + @Override + public INoneStreamDAO newNoneStreamDao(StorageBuilder storageBuilder) { + throw new IllegalStateException("Cassandra does not support NoneStreamDAO"); + } + + @Override + public IManagementDAO newManagementDao(StorageBuilder storageBuilder) { + throw new IllegalStateException("Cassandra does not support ManagementDAO"); + } +} diff --git a/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraTagAutocompleteDAO.java b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraTagAutocompleteDAO.java new file mode 100644 index 00000000000..e250551585f --- /dev/null +++ b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraTagAutocompleteDAO.java @@ -0,0 +1,70 @@ +/* + * Copyright 2015-2023 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package zipkin.server.storage.cassandra.dao; + +import org.apache.skywalking.oap.server.core.analysis.manual.searchtag.TagAutocompleteData; +import org.apache.skywalking.oap.server.core.analysis.manual.searchtag.TagType; +import org.apache.skywalking.oap.server.core.query.input.Duration; +import org.apache.skywalking.oap.server.storage.plugin.jdbc.common.SQLAndParameters; +import org.apache.skywalking.oap.server.storage.plugin.jdbc.common.TableHelper; +import org.apache.skywalking.oap.server.storage.plugin.jdbc.common.dao.JDBCTagAutoCompleteQueryDAO; +import zipkin.server.storage.cassandra.CassandraClient; + +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; + +public class CassandraTagAutocompleteDAO extends JDBCTagAutoCompleteQueryDAO { + private final CassandraClient client; + private final TableHelper tableHelper; + + public CassandraTagAutocompleteDAO(CassandraClient client, TableHelper tableHelper) { + super(null, tableHelper); + this.client = client; + this.tableHelper = tableHelper; + } + + @Override + public Set queryTagAutocompleteKeys(TagType tagType, int limit, Duration duration) { + final Set results = new HashSet<>(); + + for (String table : tableHelper.getTablesForRead( + TagAutocompleteData.INDEX_NAME, + duration.getStartTimeBucket(), + duration.getEndTimeBucket() + )) { + final SQLAndParameters sqlAndParameters = buildSQLForQueryKeys(tagType, Integer.MAX_VALUE, duration, table); + results.addAll(client.executeQuery(sqlAndParameters.sql().replaceAll("(1=1\\s+and)|(distinct)", "") + " ALLOW FILTERING", + row -> row.getString(TagAutocompleteData.TAG_KEY), sqlAndParameters.parameters())); + } + return results.stream().distinct().limit(limit).collect(Collectors.toSet()); + } + + @Override + public Set queryTagAutocompleteValues(TagType tagType, String tagKey, int limit, Duration duration) { + final Set results = new HashSet<>(); + + for (String table : tableHelper.getTablesForRead( + TagAutocompleteData.INDEX_NAME, + duration.getStartTimeBucket(), + duration.getEndTimeBucket() + )) { + final SQLAndParameters sqlAndParameters = buildSQLForQueryValues(tagType, tagKey, limit, duration, table); + results.addAll(client.executeQuery(sqlAndParameters.sql() + " ALLOW FILTERING", + row -> row.getString(TagAutocompleteData.TAG_VALUE), sqlAndParameters.parameters())); + } + return results; + } +} diff --git a/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraZipkinQueryDAO.java b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraZipkinQueryDAO.java new file mode 100644 index 00000000000..428a20b4946 --- /dev/null +++ b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/CassandraZipkinQueryDAO.java @@ -0,0 +1,297 @@ +/* + * Copyright 2015-2023 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package zipkin.server.storage.cassandra.dao; + +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Row; +import com.google.gson.Gson; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import org.apache.skywalking.oap.server.core.query.input.Duration; +import org.apache.skywalking.oap.server.core.storage.query.IZipkinQueryDAO; +import org.apache.skywalking.oap.server.core.zipkin.ZipkinServiceRelationTraffic; +import org.apache.skywalking.oap.server.core.zipkin.ZipkinServiceSpanTraffic; +import org.apache.skywalking.oap.server.core.zipkin.ZipkinServiceTraffic; +import org.apache.skywalking.oap.server.core.zipkin.ZipkinSpanRecord; +import org.apache.skywalking.oap.server.library.util.CollectionUtils; +import org.apache.skywalking.oap.server.library.util.StringUtil; +import org.apache.skywalking.oap.server.storage.plugin.jdbc.common.TableHelper; +import zipkin.server.storage.cassandra.CassandraClient; +import zipkin.server.storage.cassandra.CassandraTableHelper; +import zipkin2.Endpoint; +import zipkin2.Span; +import zipkin2.storage.QueryRequest; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CompletionStage; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +import static java.util.stream.Collectors.toList; + +public class CassandraZipkinQueryDAO implements IZipkinQueryDAO { + private final static int NAME_QUERY_MAX_SIZE = Integer.MAX_VALUE; + private static final Gson GSON = new Gson(); + + private final CassandraClient client; + private final CassandraTableHelper tableHelper; + + public CassandraZipkinQueryDAO(CassandraClient client, CassandraTableHelper tableHelper) { + this.client = client; + this.tableHelper = tableHelper; + } + + @Override + public List getServiceNames() throws IOException { + final List services = new ArrayList<>(); + + for (String table : tableHelper.getTablesWithinTTL(ZipkinServiceTraffic.INDEX_NAME)) { + services.addAll(client.executeQuery("select " + ZipkinServiceTraffic.SERVICE_NAME + " from " + table + " limit " + NAME_QUERY_MAX_SIZE, + row -> row.getString(ZipkinServiceTraffic.SERVICE_NAME))); + } + + return services + .stream() + .limit(NAME_QUERY_MAX_SIZE) + .collect(toList()); + } + + @Override + public List getRemoteServiceNames(String serviceName) throws IOException { + final Set services = new HashSet<>(); + + for (String table : tableHelper.getTablesWithinTTL(ZipkinServiceRelationTraffic.INDEX_NAME)) { + services.addAll(client.executeQuery("select " + ZipkinServiceRelationTraffic.REMOTE_SERVICE_NAME + + " from " + table + + " where " + ZipkinServiceRelationTraffic.SERVICE_NAME + " = ?" + + " limit " + NAME_QUERY_MAX_SIZE, + row -> row.getString(ZipkinServiceRelationTraffic.REMOTE_SERVICE_NAME), + serviceName)); + } + + return services + .stream() + .limit(NAME_QUERY_MAX_SIZE) + .collect(toList()); + } + + @Override + public List getSpanNames(String serviceName) throws IOException { + final Set names = new HashSet<>(); + + for (String table : tableHelper.getTablesWithinTTL(ZipkinServiceSpanTraffic.INDEX_NAME)) { + names.addAll(client.executeQuery("select " + ZipkinServiceSpanTraffic.SPAN_NAME + + " from " + table + + " where " + ZipkinServiceSpanTraffic.SERVICE_NAME + " = ?" + + " limit " + NAME_QUERY_MAX_SIZE, + row -> row.getString(ZipkinServiceSpanTraffic.SPAN_NAME), + serviceName)); + } + + return names + .stream() + .limit(NAME_QUERY_MAX_SIZE) + .collect(toList()); + } + + @Override + public List getTrace(String traceId) { + final List spans = new ArrayList<>(); + + for (String table : tableHelper.getTablesWithinTTL(ZipkinSpanRecord.INDEX_NAME)) { + spans.addAll(client.executeQuery("select * from " + table + + " where " + ZipkinSpanRecord.TRACE_ID + " = ?" + + " limit " + NAME_QUERY_MAX_SIZE, + this::buildSpan, traceId)); + } + + return spans; + } + + @Override + public List> getTraces(QueryRequest request, Duration duration) throws IOException { + Set traceIdSet = new HashSet<>(); + for (String table : tableHelper.getTablesForRead( + ZipkinSpanRecord.INDEX_NAME, + duration.getStartTimeBucket(), + duration.getEndTimeBucket() + )) { + List>> completionTraceIds = new ArrayList<>(); + if (CollectionUtils.isNotEmpty(request.annotationQuery())) { + final long timeBucket = TableHelper.getTimeBucket(table); + final String tagTable = TableHelper.getTable(ZipkinSpanRecord.ADDITIONAL_QUERY_TABLE, timeBucket); + for (Map.Entry entry : request.annotationQuery().entrySet()) { + completionTraceIds.add(client.executeAsyncQuery("select " + ZipkinSpanRecord.TRACE_ID + " from " + tagTable + + " where " + ZipkinSpanRecord.QUERY + " = ?" + + " and " + ZipkinSpanRecord.TIME_BUCKET + " >= ?" + + " and " + ZipkinSpanRecord.TIME_BUCKET + " <= ? ALLOW FILTERING", + row -> row.getString(ZipkinSpanRecord.TRACE_ID), + entry.getValue().isEmpty() ? entry.getKey() : entry.getKey() + "=" + entry.getValue(), + duration.getStartTimeBucket(), duration.getEndTimeBucket())); + } + } + if (request.minDuration() != null) { + completionTraceIds.add(client.executeAsyncQuery("select " + ZipkinSpanRecord.TRACE_ID + " from " + table + + " where " + ZipkinSpanRecord.DURATION + " >= ?" + + " and " + ZipkinSpanRecord.TIME_BUCKET + " >= ?" + + " and " + ZipkinSpanRecord.TIME_BUCKET + " <= ? ALLOW FILTERING", + row -> row.getString(ZipkinSpanRecord.TRACE_ID), + request.minDuration(), duration.getStartTimeBucket(), duration.getEndTimeBucket() + )); + } + if (request.maxDuration() != null) { + completionTraceIds.add(client.executeAsyncQuery("select " + ZipkinSpanRecord.TRACE_ID + " from " + table + + " where " + ZipkinSpanRecord.DURATION + " <= ?" + + " and " + ZipkinSpanRecord.TIME_BUCKET + " >= ?" + + " and " + ZipkinSpanRecord.TIME_BUCKET + " <= ? ALLOW FILTERING", + row -> row.getString(ZipkinSpanRecord.TRACE_ID), + request.maxDuration(), duration.getStartTimeBucket(), duration.getEndTimeBucket() + )); + } + if (StringUtil.isNotEmpty(request.serviceName())) { + completionTraceIds.add(client.executeAsyncQuery("select " + ZipkinSpanRecord.TRACE_ID + " from " + table + + " where " + ZipkinSpanRecord.LOCAL_ENDPOINT_SERVICE_NAME + " = ?" + + " and " + ZipkinSpanRecord.TIME_BUCKET + " >= ?" + + " and " + ZipkinSpanRecord.TIME_BUCKET + " <= ? ALLOW FILTERING", + row -> row.getString(ZipkinSpanRecord.TRACE_ID), + request.serviceName(), duration.getStartTimeBucket(), duration.getEndTimeBucket() + )); + } + if (StringUtil.isNotEmpty(request.remoteServiceName())) { + completionTraceIds.add(client.executeAsyncQuery("select " + ZipkinSpanRecord.TRACE_ID + " from " + table + + " where " + ZipkinSpanRecord.REMOTE_ENDPOINT_SERVICE_NAME + " = ?" + + " and " + ZipkinSpanRecord.TIME_BUCKET + " >= ?" + + " and " + ZipkinSpanRecord.TIME_BUCKET + " <= ? ALLOW FILTERING", + row -> row.getString(ZipkinSpanRecord.TRACE_ID), + request.remoteServiceName(), duration.getStartTimeBucket(), duration.getEndTimeBucket() + )); + } + if (StringUtil.isNotEmpty(request.spanName())) { + completionTraceIds.add(client.executeAsyncQuery("select " + ZipkinSpanRecord.TRACE_ID + " from " + table + + " where " + ZipkinSpanRecord.NAME + " = ?" + + " and " + ZipkinSpanRecord.TIME_BUCKET + " >= ?" + + " and " + ZipkinSpanRecord.TIME_BUCKET + " <= ? ALLOW FILTERING", + row -> row.getString(ZipkinSpanRecord.TRACE_ID), + request.spanName(), duration.getStartTimeBucket(), duration.getEndTimeBucket() + )); + } + + traceIdSet.addAll(retainTraceIdList(completionTraceIds)); + } + + return getTraces(request.limit() > 0 ? traceIdSet.stream().limit(request.limit()).collect(Collectors.toSet()) : traceIdSet); + } + + private Set retainTraceIdList(List>> completionStages) { + Iterator>> iterator = completionStages.iterator(); + if (!iterator.hasNext()) return Collections.emptySet(); + Set result = new HashSet<>(iterator.next().toCompletableFuture().join()); + while (iterator.hasNext() && result.size() > 0) { + Set nextSet = new HashSet<>(iterator.next().toCompletableFuture().join()); + result.retainAll(nextSet); + } + + return result; + } + + @Override + public List> getTraces(Set traceIds) throws IOException { + if (CollectionUtils.isEmpty(traceIds)) { + return Collections.emptyList(); + } + + final List> result = new ArrayList<>(); + for (String table : tableHelper.getTablesWithinTTL(ZipkinSpanRecord.INDEX_NAME)) { + final PreparedStatement stmt = client.getSession().prepare("select * from " + table + " where " + + ZipkinSpanRecord.TRACE_ID + " in ?"); + final ResultSet execute = client.getSession().execute(stmt.boundStatementBuilder() + .setList(0, new ArrayList<>(traceIds), String.class).build()); + + result.addAll(StreamSupport.stream(execute.spliterator(), false) + .map(this::buildSpan).collect(Collectors.toMap(Span::traceId, s -> new ArrayList<>(Collections.singleton(s)), (s1, s2) -> { + s1.addAll(s2); + return s1; + })).values()); + } + return result; + } + + private Span buildSpan(Row row) { + Span.Builder span = Span.newBuilder(); + span.traceId(row.getString(ZipkinSpanRecord.TRACE_ID)); + span.id(row.getString(ZipkinSpanRecord.SPAN_ID)); + span.parentId(row.getString(ZipkinSpanRecord.PARENT_ID)); + String kind = row.getString(ZipkinSpanRecord.KIND); + if (!StringUtil.isEmpty(kind)) { + span.kind(Span.Kind.valueOf(kind)); + } + span.timestamp(row.getLong(ZipkinSpanRecord.TIMESTAMP)); + span.duration(row.getLong(ZipkinSpanRecord.DURATION)); + span.name(row.getString(ZipkinSpanRecord.NAME)); + + if (row.getInt(ZipkinSpanRecord.DEBUG) > 0) { + span.debug(Boolean.TRUE); + } + if (row.getInt(ZipkinSpanRecord.SHARED) > 0) { + span.shared(Boolean.TRUE); + } + //Build localEndpoint + Endpoint.Builder localEndpoint = Endpoint.newBuilder(); + localEndpoint.serviceName(row.getString(ZipkinSpanRecord.LOCAL_ENDPOINT_SERVICE_NAME)); + if (!StringUtil.isEmpty(row.getString(ZipkinSpanRecord.LOCAL_ENDPOINT_IPV4))) { + localEndpoint.parseIp(row.getString(ZipkinSpanRecord.LOCAL_ENDPOINT_IPV4)); + } else { + localEndpoint.parseIp(row.getString(ZipkinSpanRecord.LOCAL_ENDPOINT_IPV6)); + } + localEndpoint.port(row.getInt(ZipkinSpanRecord.LOCAL_ENDPOINT_PORT)); + span.localEndpoint(localEndpoint.build()); + //Build remoteEndpoint + Endpoint.Builder remoteEndpoint = Endpoint.newBuilder(); + remoteEndpoint.serviceName(row.getString(ZipkinSpanRecord.REMOTE_ENDPOINT_SERVICE_NAME)); + if (!StringUtil.isEmpty(row.getString(ZipkinSpanRecord.REMOTE_ENDPOINT_IPV4))) { + remoteEndpoint.parseIp(row.getString(ZipkinSpanRecord.REMOTE_ENDPOINT_IPV4)); + } else { + remoteEndpoint.parseIp(row.getString(ZipkinSpanRecord.REMOTE_ENDPOINT_IPV6)); + } + remoteEndpoint.port(row.getInt(ZipkinSpanRecord.REMOTE_ENDPOINT_PORT)); + span.remoteEndpoint(remoteEndpoint.build()); + + //Build tags + String tagsString = row.getString(ZipkinSpanRecord.TAGS); + if (!StringUtil.isEmpty(tagsString)) { + JsonObject tagsJson = GSON.fromJson(tagsString, JsonObject.class); + for (Map.Entry tag : tagsJson.entrySet()) { + span.putTag(tag.getKey(), tag.getValue().getAsString()); + } + } + //Build annotation + String annotationString = row.getString(ZipkinSpanRecord.ANNOTATIONS); + if (!StringUtil.isEmpty(annotationString)) { + JsonObject annotationJson = GSON.fromJson(annotationString, JsonObject.class); + for (Map.Entry annotation : annotationJson.entrySet()) { + span.addAnnotation(Long.parseLong(annotation.getKey()), annotation.getValue().getAsString()); + } + } + return span.build(); + } +} diff --git a/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/EmptyDAO.java b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/EmptyDAO.java new file mode 100644 index 00000000000..f3ce8e824c0 --- /dev/null +++ b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/dao/EmptyDAO.java @@ -0,0 +1,489 @@ +/* + * Copyright 2015-2023 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package zipkin.server.storage.cassandra.dao; + +import org.apache.skywalking.oap.server.core.analysis.manual.networkalias.NetworkAddressAlias; +import org.apache.skywalking.oap.server.core.analysis.manual.searchtag.Tag; +import org.apache.skywalking.oap.server.core.analysis.manual.searchtag.TagType; +import org.apache.skywalking.oap.server.core.analysis.manual.segment.SegmentRecord; +import org.apache.skywalking.oap.server.core.analysis.manual.spanattach.SpanAttachedEventRecord; +import org.apache.skywalking.oap.server.core.analysis.manual.spanattach.SpanAttachedEventTraceType; +import org.apache.skywalking.oap.server.core.analysis.metrics.Metrics; +import org.apache.skywalking.oap.server.core.analysis.record.Record; +import org.apache.skywalking.oap.server.core.browser.source.BrowserErrorCategory; +import org.apache.skywalking.oap.server.core.management.ui.menu.UIMenu; +import org.apache.skywalking.oap.server.core.profiling.continuous.storage.ContinuousProfilingPolicy; +import org.apache.skywalking.oap.server.core.profiling.ebpf.storage.EBPFProfilingDataRecord; +import org.apache.skywalking.oap.server.core.profiling.ebpf.storage.EBPFProfilingTargetType; +import org.apache.skywalking.oap.server.core.profiling.ebpf.storage.EBPFProfilingTaskRecord; +import org.apache.skywalking.oap.server.core.profiling.ebpf.storage.EBPFProfilingTriggerType; +import org.apache.skywalking.oap.server.core.profiling.trace.ProfileThreadSnapshotRecord; +import org.apache.skywalking.oap.server.core.query.enumeration.Order; +import org.apache.skywalking.oap.server.core.query.enumeration.ProfilingSupportStatus; +import org.apache.skywalking.oap.server.core.query.input.DashboardSetting; +import org.apache.skywalking.oap.server.core.query.input.Duration; +import org.apache.skywalking.oap.server.core.query.input.MetricsCondition; +import org.apache.skywalking.oap.server.core.query.input.RecordCondition; +import org.apache.skywalking.oap.server.core.query.input.TopNCondition; +import org.apache.skywalking.oap.server.core.query.input.TraceScopeCondition; +import org.apache.skywalking.oap.server.core.query.type.Alarms; +import org.apache.skywalking.oap.server.core.query.type.BrowserErrorLog; +import org.apache.skywalking.oap.server.core.query.type.BrowserErrorLogs; +import org.apache.skywalking.oap.server.core.query.type.Call; +import org.apache.skywalking.oap.server.core.query.type.DashboardConfiguration; +import org.apache.skywalking.oap.server.core.query.type.EBPFProfilingSchedule; +import org.apache.skywalking.oap.server.core.query.type.Endpoint; +import org.apache.skywalking.oap.server.core.query.type.HeatMap; +import org.apache.skywalking.oap.server.core.query.type.KeyValue; +import org.apache.skywalking.oap.server.core.query.type.Logs; +import org.apache.skywalking.oap.server.core.query.type.MetricsValues; +import org.apache.skywalking.oap.server.core.query.type.NullableValue; +import org.apache.skywalking.oap.server.core.query.type.Process; +import org.apache.skywalking.oap.server.core.query.type.ProfileTask; +import org.apache.skywalking.oap.server.core.query.type.ProfileTaskLog; +import org.apache.skywalking.oap.server.core.query.type.QueryOrder; +import org.apache.skywalking.oap.server.core.query.type.SelectedRecord; +import org.apache.skywalking.oap.server.core.query.type.Service; +import org.apache.skywalking.oap.server.core.query.type.ServiceInstance; +import org.apache.skywalking.oap.server.core.query.type.Span; +import org.apache.skywalking.oap.server.core.query.type.TemplateChangeStatus; +import org.apache.skywalking.oap.server.core.query.type.TraceBrief; +import org.apache.skywalking.oap.server.core.query.type.TraceState; +import org.apache.skywalking.oap.server.core.query.type.event.EventQueryCondition; +import org.apache.skywalking.oap.server.core.query.type.event.Events; +import org.apache.skywalking.oap.server.core.storage.IMetricsDAO; +import org.apache.skywalking.oap.server.core.storage.IRecordDAO; +import org.apache.skywalking.oap.server.core.storage.SessionCacheCallback; +import org.apache.skywalking.oap.server.core.storage.cache.INetworkAddressAliasDAO; +import org.apache.skywalking.oap.server.core.storage.management.UIMenuManagementDAO; +import org.apache.skywalking.oap.server.core.storage.management.UITemplateManagementDAO; +import org.apache.skywalking.oap.server.core.storage.model.Model; +import org.apache.skywalking.oap.server.core.storage.profiling.continuous.IContinuousProfilingPolicyDAO; +import org.apache.skywalking.oap.server.core.storage.profiling.ebpf.IEBPFProfilingDataDAO; +import org.apache.skywalking.oap.server.core.storage.profiling.ebpf.IEBPFProfilingScheduleDAO; +import org.apache.skywalking.oap.server.core.storage.profiling.ebpf.IEBPFProfilingTaskDAO; +import org.apache.skywalking.oap.server.core.storage.profiling.ebpf.IServiceLabelDAO; +import org.apache.skywalking.oap.server.core.storage.profiling.trace.IProfileTaskLogQueryDAO; +import org.apache.skywalking.oap.server.core.storage.profiling.trace.IProfileTaskQueryDAO; +import org.apache.skywalking.oap.server.core.storage.profiling.trace.IProfileThreadSnapshotQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.IAggregationQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.IAlarmQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.IBrowserLogQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.IEventQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.ILogQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.IMetadataQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.IMetricsQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.IRecordsQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.ISpanAttachedEventQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.ITagAutoCompleteQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.ITopologyQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.ITraceQueryDAO; +import org.apache.skywalking.oap.server.library.client.request.InsertRequest; +import org.apache.skywalking.oap.server.library.client.request.UpdateRequest; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Set; + +public class EmptyDAO implements + INetworkAddressAliasDAO, + ITopologyQueryDAO, + IMetricsDAO, + ILogQueryDAO, + ITraceQueryDAO, + IMetricsQueryDAO, + IAggregationQueryDAO, + IAlarmQueryDAO, + IRecordsQueryDAO, + IRecordDAO, + IBrowserLogQueryDAO, + IMetadataQueryDAO, + IProfileTaskQueryDAO, + IProfileTaskLogQueryDAO, + IProfileThreadSnapshotQueryDAO, + UITemplateManagementDAO, + UIMenuManagementDAO, + IEventQueryDAO, + IEBPFProfilingTaskDAO, + IEBPFProfilingScheduleDAO, + IEBPFProfilingDataDAO, + IContinuousProfilingPolicyDAO, + IServiceLabelDAO, + ITagAutoCompleteQueryDAO, + ISpanAttachedEventQueryDAO +{ + @Override + public List loadLastUpdate(long timeBucket) { + return null; + } + + @Override + public InsertRequest prepareBatchInsert(Model model, Record record) throws IOException { + return null; + } + + @Override + public UIMenu getMenu(String id) throws IOException { + return null; + } + + @Override + public void saveMenu(UIMenu menu) throws IOException { + + } + + @Override + public DashboardConfiguration getTemplate(String id) throws IOException { + return null; + } + + @Override + public List getAllTemplates(Boolean includingDisabled) throws IOException { + return null; + } + + @Override + public TemplateChangeStatus addTemplate(DashboardSetting setting) throws IOException { + return null; + } + + @Override + public TemplateChangeStatus changeTemplate(DashboardSetting setting) throws IOException { + return null; + } + + @Override + public TemplateChangeStatus disableTemplate(String id) throws IOException { + return null; + } + + @Override + public void savePolicy(ContinuousProfilingPolicy policy) throws IOException { + + } + + @Override + public List queryPolicies(List serviceIdList) throws IOException { + return null; + } + + @Override + public List queryData(List scheduleIdList, long beginTime, long endTime) throws IOException { + return null; + } + + @Override + public List querySchedules(String taskId) throws IOException { + return null; + } + + @Override + public List queryTasksByServices(List serviceIdList, EBPFProfilingTriggerType triggerType, long taskStartTime, long latestUpdateTime) throws IOException { + return null; + } + + @Override + public List queryTasksByTargets(String serviceId, String serviceInstanceId, List targetTypes, EBPFProfilingTriggerType triggerType, long taskStartTime, long latestUpdateTime) throws IOException { + return null; + } + + @Override + public List getTaskRecord(String id) throws IOException { + return null; + } + + @Override + public List queryAllLabels(String serviceId) throws IOException { + return null; + } + + @Override + public List getTaskLogList() throws IOException { + return null; + } + + @Override + public List queryProfiledSegmentIdList(String taskId) throws IOException { + return null; + } + + @Override + public int queryMinSequence(String segmentId, long start, long end) throws IOException { + return 0; + } + + @Override + public int queryMaxSequence(String segmentId, long start, long end) throws IOException { + return 0; + } + + @Override + public List queryRecords(String segmentId, int minSequence, int maxSequence) throws IOException { + return null; + } + + @Override + public List sortMetrics(TopNCondition condition, String valueColumnName, Duration duration, List additionalConditions) throws IOException { + return null; + } + + @Override + public Alarms getAlarm(Integer scopeId, String keyword, int limit, int from, Duration duration, List tags) throws IOException { + return null; + } + + @Override + public void parserDataBinaryBase64(String dataBinaryBase64, List tags) { + } + + @Override + public BrowserErrorLogs queryBrowserErrorLogs(String serviceId, String serviceVersionId, String pagePathId, BrowserErrorCategory category, Duration duration, int limit, int from) throws IOException { + return null; + } + + @Override + public BrowserErrorLog parserDataBinary(String dataBinaryBase64) { + return null; + } + + @Override + public BrowserErrorLog parserDataBinary(byte[] dataBinary) { + return null; + } + + @Override + public Events queryEvents(EventQueryCondition condition) throws Exception { + return null; + } + + @Override + public Events queryEvents(List conditionList) throws Exception { + return null; + } + + @Override + public List querySpanAttachedEvents(SpanAttachedEventTraceType type, List traceIds) throws IOException { + return Collections.emptyList(); + } + + @Override + public Set queryTagAutocompleteKeys(TagType tagType, int limit, Duration duration) throws IOException { + return null; + } + + @Override + public Set queryTagAutocompleteValues(TagType tagType, String tagKey, int limit, Duration duration) throws IOException { + return null; + } + + @Override + public List loadServiceRelationsDetectedAtServerSide(Duration duration, List serviceIds) throws IOException { + return null; + } + + @Override + public List loadServiceRelationDetectedAtClientSide(Duration duration, List serviceIds) throws IOException { + return null; + } + + @Override + public List loadServiceRelationsDetectedAtServerSide(Duration duration) throws IOException { + return null; + } + + @Override + public List loadServiceRelationDetectedAtClientSide(Duration duration) throws IOException { + return null; + } + + @Override + public List loadInstanceRelationDetectedAtServerSide(String clientServiceId, String serverServiceId, Duration duration) throws IOException { + return null; + } + + @Override + public List loadInstanceRelationDetectedAtClientSide(String clientServiceId, String serverServiceId, Duration duration) throws IOException { + return null; + } + + @Override + public List loadEndpointRelation(Duration duration, String destEndpointId) throws IOException { + return null; + } + + @Override + public List loadProcessRelationDetectedAtClientSide(String serviceInstanceId, Duration duration) throws IOException { + return null; + } + + @Override + public List loadProcessRelationDetectedAtServerSide(String serviceInstanceId, Duration duration) throws IOException { + return null; + } + + @Override + public TraceBrief queryBasicTraces(Duration duration, long minDuration, long maxDuration, String serviceId, String serviceInstanceId, String endpointId, String traceId, int limit, int from, TraceState traceState, QueryOrder queryOrder, List tags) throws IOException { + return null; + } + + @Override + public List queryByTraceId(String traceId) throws IOException { + return null; + } + + @Override + public List queryBySegmentIdList(List segmentIdList) throws IOException { + return null; + } + + @Override + public List queryByTraceIdWithInstanceId(List traceIdList, List instanceIdList) throws IOException { + return null; + } + + @Override + public List doFlexibleTraceQuery(String traceId) throws IOException { + return null; + } + + @Override + public List getTaskList(String serviceId, String endpointName, Long startTimeBucket, Long endTimeBucket, Integer limit) throws IOException { + return null; + } + + @Override + public ProfileTask getById(String id) throws IOException { + return null; + } + + @Override + public List multiGet(Model model, List metrics) throws Exception { + return null; + } + + @Override + public InsertRequest prepareBatchInsert(Model model, Metrics metrics, SessionCacheCallback callback) throws IOException { + return null; + } + + @Override + public UpdateRequest prepareBatchUpdate(Model model, Metrics metrics, SessionCacheCallback callback) throws IOException { + return null; + } + + @Override + public boolean isExpiredCache(Model model, Metrics cachedValue, long currentTimeMillis, int ttl) { + return IMetricsDAO.super.isExpiredCache(model, cachedValue, currentTimeMillis, ttl); + } + + @Override + public List listServices() throws IOException { + return null; + } + + @Override + public List listInstances(Duration duration, String serviceId) throws IOException { + return null; + } + + @Override + public ServiceInstance getInstance(String instanceId) throws IOException { + return null; + } + + @Override + public List getInstances(List instanceIds) throws IOException { + return null; + } + + @Override + public List findEndpoint(String keyword, String serviceId, int limit) throws IOException { + return null; + } + + @Override + public List listProcesses(String serviceId, ProfilingSupportStatus supportStatus, long lastPingStartTimeBucket, long lastPingEndTimeBucket) throws IOException { + return null; + } + + @Override + public List listProcesses(String serviceInstanceId, Duration duration, boolean includeVirtual) throws IOException { + return null; + } + + @Override + public List listProcesses(String agentId) throws IOException { + return null; + } + + @Override + public long getProcessCount(String serviceId, ProfilingSupportStatus profilingSupportStatus, long lastPingStartTimeBucket, long lastPingEndTimeBucket) throws IOException { + return 0; + } + + @Override + public long getProcessCount(String instanceId) throws IOException { + return 0; + } + + @Override + public Process getProcess(String processId) throws IOException { + return null; + } + + @Override + public List readRecords(RecordCondition condition, String valueColumnName, Duration duration) throws IOException { + return null; + } + + @Override + public NullableValue readMetricsValue(MetricsCondition condition, String valueColumnName, Duration duration) throws IOException { + return null; + } + + @Override + public MetricsValues readMetricsValues(MetricsCondition condition, String valueColumnName, Duration duration) throws IOException { + return null; + } + + @Override + public List readLabeledMetricsValues(MetricsCondition condition, String valueColumnName, List labels, Duration duration) throws IOException { + return null; + } + + @Override + public HeatMap readHeatMap(MetricsCondition condition, String valueColumnName, Duration duration) throws IOException { + return null; + } + + @Override + public boolean supportQueryLogsByKeywords() { + return false; + } + + @Override + public Logs queryLogs(String serviceId, String serviceInstanceId, String endpointId, TraceScopeCondition relatedTrace, Order queryOrder, int from, int limit, Duration duration, List tags, List keywordsOfContent, List excludingKeywordsOfContent) throws IOException { + return null; + } + + @Override + public void parserDataBinary(String dataBinaryBase64, List tags) { + } + + @Override + public void parserDataBinary(byte[] dataBinary, List tags) { + } +} diff --git a/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/internal/HostAndPort.java b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/internal/HostAndPort.java new file mode 100644 index 00000000000..593621cb3b7 --- /dev/null +++ b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/internal/HostAndPort.java @@ -0,0 +1,102 @@ +/* + * Copyright 2015-2019 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package zipkin.server.storage.cassandra.internal; + +import zipkin2.Endpoint; + +// Similar to com.google.common.net.HostAndPort, but no guava dep +public final class HostAndPort { + final String host; + final int port; + + HostAndPort(String host, int port) { + this.host = host; + this.port = port; + } + + /** Returns the unvalidated hostname or IP literal */ + public String getHost() { + return host; + } + + /** Returns the port */ + public int getPort() { + return port; + } + + @Override public boolean equals(Object o) { + if (o == this) return true; + if (!(o instanceof HostAndPort)) return false; + HostAndPort that = (HostAndPort) o; + return host.equals(that.host) && port == that.port; + } + + @Override public int hashCode() { + int h = 1; + h *= 1000003; + h ^= (host == null) ? 0 : host.hashCode(); + h *= 1000003; + h ^= port; + return h; + } + + @Override public String toString() { + return "HostAndPort{host=" + host + ", port=" + port + "}"; + } + + /** + * Constructs a host-port pair from the given string, defaulting to the indicated port if absent + */ + public static HostAndPort fromString(String hostPort, int defaultPort) { + if (hostPort == null) throw new NullPointerException("hostPort == null"); + + String host = hostPort; + int endHostIndex = hostPort.length(); + if (hostPort.startsWith("[")) { // Bracketed IPv6 + endHostIndex = hostPort.lastIndexOf(']') + 1; + host = hostPort.substring(1, endHostIndex == 0 ? 1 : endHostIndex - 1); + if (!Endpoint.newBuilder().parseIp(host)) { // reuse our IPv6 validator + throw new IllegalArgumentException(hostPort + " contains an invalid IPv6 literal"); + } + } else { + int colonIndex = hostPort.indexOf(':'), nextColonIndex = hostPort.lastIndexOf(':'); + if (colonIndex >= 0) { + if (colonIndex == nextColonIndex) { // only 1 colon + host = hostPort.substring(0, colonIndex); + endHostIndex = colonIndex; + } else if (!Endpoint.newBuilder().parseIp(hostPort)) { // reuse our IPv6 validator + throw new IllegalArgumentException(hostPort + " is an invalid IPv6 literal"); + } + } + } + if (host.isEmpty()) throw new IllegalArgumentException(hostPort + " has an empty host"); + if (endHostIndex + 1 < hostPort.length() && hostPort.charAt(endHostIndex) == ':') { + return new HostAndPort(host, validatePort(hostPort.substring(endHostIndex + 1), hostPort)); + } + return new HostAndPort(host, defaultPort); + } + + static int validatePort(String portString, String hostPort) { + for (int i = 0, length = portString.length(); i < length; i++) { + char c = portString.charAt(i); + if (c >= '0' && c <= '9') continue; // isDigit + throw new IllegalArgumentException(hostPort + " has an invalid port"); + } + int result = Integer.parseInt(portString); + if (result == 0 || result > 0xffff) { + throw new IllegalArgumentException(hostPort + " has an invalid port"); + } + return result; + } +} diff --git a/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/internal/SessionBuilder.java b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/internal/SessionBuilder.java new file mode 100644 index 00000000000..f050f28333f --- /dev/null +++ b/zipkin-server/storage-cassandra/src/main/java/zipkin/server/storage/cassandra/internal/SessionBuilder.java @@ -0,0 +1,105 @@ +/* + * Copyright 2015-2020 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package zipkin.server.storage.cassandra.internal; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.CqlSessionBuilder; +import com.datastax.oss.driver.api.core.auth.AuthProvider; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.config.DriverOption; +import com.datastax.oss.driver.api.core.config.ProgrammaticDriverConfigLoaderBuilder; +import com.datastax.oss.driver.internal.core.ssl.DefaultSslEngineFactory; +import com.datastax.oss.driver.internal.core.tracker.RequestLogger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import zipkin2.internal.Nullable; + +import java.net.InetSocketAddress; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_CONSISTENCY; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_LOGGER_SUCCESS_ENABLED; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_LOGGER_VALUES; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_TIMEOUT; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_TRACKER_CLASS; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_WARN_IF_SET_KEYSPACE; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS; + +public final class SessionBuilder { + /** Returns a connected session. Closes the cluster if any exception occurred. */ + public static CqlSession buildSession( + String contactPoints, + String localDc, + Map poolingOptions, + @Nullable AuthProvider authProvider, + boolean useSsl + ) { + // Some options aren't supported by builder methods. In these cases, we use driver config + // See https://groups.google.com/a/lists.datastax.com/forum/#!topic/java-driver-user/Z8HrCDX47Q0 + ProgrammaticDriverConfigLoaderBuilder config = + // We aren't reading any resources from the classpath, but this prevents errors running in the + // server, where Thread.currentThread().getContextClassLoader() returns null + DriverConfigLoader.programmaticBuilder(SessionBuilder.class.getClassLoader()); + + // Ported from java-driver v3 PoolingOptions.setPoolTimeoutMillis as request timeout includes that + config.withDuration(REQUEST_TIMEOUT, Duration.ofMinutes(1)); + + CqlSessionBuilder builder = CqlSession.builder(); + builder.addContactPoints(parseContactPoints(contactPoints)); + if (authProvider != null) builder.withAuthProvider(authProvider); + + // In java-driver v3, we used LatencyAwarePolicy(DCAwareRoundRobinPolicy|RoundRobinPolicy) + // where DCAwareRoundRobinPolicy was used if localDc != null + // + // In java-driver v4, the default policy is token-aware and localDc is required. Hence, we + // use the default load balancing policy + // * https://github.com/datastax/java-driver/blob/master/manual/core/load_balancing/README.md + builder.withLocalDatacenter(localDc); + config = config.withString(REQUEST_CONSISTENCY, "LOCAL_ONE"); + // Pooling options changed dramatically from v3->v4. This is a close match. + poolingOptions.forEach(config::withInt); + + // All Zipkin CQL writes are idempotent + config = config.withBoolean(REQUEST_DEFAULT_IDEMPOTENCE, true); + + if (useSsl) config = config.withClass(SSL_ENGINE_FACTORY_CLASS, DefaultSslEngineFactory.class); + + // Log categories can enable query logging + Logger requestLogger = LoggerFactory.getLogger(RequestLogger.class); + if (requestLogger.isDebugEnabled()) { + config = config.withClass(REQUEST_TRACKER_CLASS, RequestLogger.class); + config = config.withBoolean(REQUEST_LOGGER_SUCCESS_ENABLED, true); + // Only show bodies when TRACE is enabled + config = config.withBoolean(REQUEST_LOGGER_VALUES, requestLogger.isTraceEnabled()); + } + + // Don't warn: ensureSchema creates the keyspace. Hence, we need to "use" it later. + config = config.withBoolean(REQUEST_WARN_IF_SET_KEYSPACE, false); + + return builder.withConfigLoader(config.build()).build(); + } + + static List parseContactPoints(String contactPoints) { + List result = new ArrayList<>(); + for (String contactPoint : contactPoints.split(",", 100)) { + HostAndPort parsed = HostAndPort.fromString(contactPoint, 9042); + result.add(new InetSocketAddress(parsed.getHost(), parsed.getPort())); + } + return result; + } +} diff --git a/zipkin-server/storage-cassandra/src/main/resources/META-INF/services/org.apache.skywalking.oap.server.library.module.ModuleProvider b/zipkin-server/storage-cassandra/src/main/resources/META-INF/services/org.apache.skywalking.oap.server.library.module.ModuleProvider new file mode 100755 index 00000000000..38538a46d12 --- /dev/null +++ b/zipkin-server/storage-cassandra/src/main/resources/META-INF/services/org.apache.skywalking.oap.server.library.module.ModuleProvider @@ -0,0 +1,19 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +zipkin.server.storage.cassandra.CassandraProvider \ No newline at end of file diff --git a/zipkin-server/storage-cassandra/src/test/java/zipkin/server/storage/cassandra/ApplicationConfigLoader.java b/zipkin-server/storage-cassandra/src/test/java/zipkin/server/storage/cassandra/ApplicationConfigLoader.java new file mode 100644 index 00000000000..fa536c3c099 --- /dev/null +++ b/zipkin-server/storage-cassandra/src/test/java/zipkin/server/storage/cassandra/ApplicationConfigLoader.java @@ -0,0 +1,215 @@ +/* + * Copyright 2015-2023 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package zipkin.server.storage.cassandra; + +import org.apache.skywalking.oap.server.library.module.ApplicationConfiguration; +import org.apache.skywalking.oap.server.library.module.ProviderNotFoundException; +import org.apache.skywalking.oap.server.library.util.CollectionUtils; +import org.apache.skywalking.oap.server.library.util.PropertyPlaceholderHelper; +import org.apache.skywalking.oap.server.library.util.ResourceUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.yaml.snakeyaml.Yaml; + +import java.io.FileNotFoundException; +import java.io.Reader; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Map; +import java.util.Properties; + +/** + * Initialize collector settings with following sources. Use application.yml as primary setting, and fix missing setting + * by default settings in application-default.yml. + *

+ * At last, override setting by system.properties and system.envs if the key matches moduleName.provideName.settingKey. + */ +public class ApplicationConfigLoader { + static final Logger log = LoggerFactory.getLogger(ApplicationConfigLoader.class.getName()); + + private static final String DISABLE_SELECTOR = "-"; + private static final String SELECTOR = "selector"; + + private final Yaml yaml = new Yaml(); + + public ApplicationConfiguration load() { + ApplicationConfiguration configuration = new ApplicationConfiguration(); + this.loadConfig(configuration); + this.overrideConfigBySystemEnv(configuration); + return configuration; + } + + @SuppressWarnings("unchecked") + private void loadConfig(ApplicationConfiguration configuration) { + try { + Reader applicationReader = ResourceUtils.read("application.yml"); + Map> moduleConfig = yaml.loadAs(applicationReader, Map.class); + if (CollectionUtils.isNotEmpty(moduleConfig)) { + selectConfig(moduleConfig); + moduleConfig.forEach((moduleName, providerConfig) -> { + if (providerConfig.size() > 0) { + log.info("Get a module define from application.yml, module name: {}", moduleName); + ApplicationConfiguration.ModuleConfiguration moduleConfiguration = configuration.addModule( + moduleName); + providerConfig.forEach((providerName, config) -> { + log.info( + "Get a provider define belong to {} module, provider name: {}", moduleName, + providerName + ); + final Map propertiesConfig = (Map) config; + final Properties properties = new Properties(); + if (propertiesConfig != null) { + propertiesConfig.forEach((propertyName, propertyValue) -> { + if (propertyValue instanceof Map) { + Properties subProperties = new Properties(); + ((Map) propertyValue).forEach((key, value) -> { + subProperties.put(key, value); + replacePropertyAndLog(key, value, subProperties, providerName); + }); + properties.put(propertyName, subProperties); + } else { + properties.put(propertyName, propertyValue); + replacePropertyAndLog(propertyName, propertyValue, properties, providerName); + } + }); + } + moduleConfiguration.addProviderConfiguration(providerName, properties); + }); + } else { + log.warn( + "Get a module define from application.yml, but no provider define, use default, module name: {}", + moduleName + ); + } + }); + } + } catch (FileNotFoundException e) { + throw new IllegalStateException(e); + } + } + + private void replacePropertyAndLog(final String propertyName, final Object propertyValue, final Properties target, + final Object providerName) { + final String valueString = PropertyPlaceholderHelper.INSTANCE + .replacePlaceholders(String.valueOf(propertyValue), target); + if (valueString.trim().length() == 0) { + target.replace(propertyName, valueString); + log.info("Provider={} config={} has been set as an empty string", providerName, propertyName); + } else { + // Use YAML to do data type conversion. + final Object replaceValue = convertValueString(valueString); + if (replaceValue != null) { + target.replace(propertyName, replaceValue); + } + } + } + + private Object convertValueString(String valueString) { + try { + Object replaceValue = yaml.load(valueString); + if (replaceValue instanceof String || replaceValue instanceof Integer || replaceValue instanceof Long || replaceValue instanceof Boolean || replaceValue instanceof ArrayList) { + return replaceValue; + } else { + return valueString; + } + } catch (Exception e) { + log.warn("yaml convert value type error, use origin values string. valueString={}", valueString, e); + return valueString; + } + } + + private void overrideConfigBySystemEnv(ApplicationConfiguration configuration) { + for (Map.Entry prop : System.getProperties().entrySet()) { + overrideModuleSettings(configuration, prop.getKey().toString(), prop.getValue().toString()); + } + } + + private void selectConfig(final Map> moduleConfiguration) { + Iterator>> moduleIterator = moduleConfiguration.entrySet().iterator(); + while (moduleIterator.hasNext()) { + Map.Entry> entry = moduleIterator.next(); + final String moduleName = entry.getKey(); + final Map providerConfig = entry.getValue(); + if (!providerConfig.containsKey(SELECTOR)) { + continue; + } + final String selector = (String) providerConfig.get(SELECTOR); + final String resolvedSelector = PropertyPlaceholderHelper.INSTANCE.replacePlaceholders( + selector, System.getProperties() + ); + providerConfig.entrySet().removeIf(e -> !resolvedSelector.equals(e.getKey())); + + if (!providerConfig.isEmpty()) { + continue; + } + + if (!DISABLE_SELECTOR.equals(resolvedSelector)) { + throw new ProviderNotFoundException( + "no provider found for module " + moduleName + ", " + + "if you're sure it's not required module and want to remove it, " + + "set the selector to -" + ); + } + + // now the module can be safely removed + moduleIterator.remove(); + log.info("Remove module {} without any provider", moduleName); + } + } + + private void overrideModuleSettings(ApplicationConfiguration configuration, String key, String value) { + int moduleAndConfigSeparator = key.indexOf('.'); + if (moduleAndConfigSeparator <= 0) { + return; + } + String moduleName = key.substring(0, moduleAndConfigSeparator); + String providerSettingSubKey = key.substring(moduleAndConfigSeparator + 1); + ApplicationConfiguration.ModuleConfiguration moduleConfiguration = configuration.getModuleConfiguration( + moduleName); + if (moduleConfiguration == null) { + return; + } + int providerAndConfigSeparator = providerSettingSubKey.indexOf('.'); + if (providerAndConfigSeparator <= 0) { + return; + } + String providerName = providerSettingSubKey.substring(0, providerAndConfigSeparator); + String settingKey = providerSettingSubKey.substring(providerAndConfigSeparator + 1); + if (!moduleConfiguration.has(providerName)) { + return; + } + Properties providerSettings = moduleConfiguration.getProviderConfiguration(providerName); + if (!providerSettings.containsKey(settingKey)) { + return; + } + Object originValue = providerSettings.get(settingKey); + Class type = originValue.getClass(); + if (type.equals(int.class) || type.equals(Integer.class)) + providerSettings.put(settingKey, Integer.valueOf(value)); + else if (type.equals(String.class)) + providerSettings.put(settingKey, value); + else if (type.equals(long.class) || type.equals(Long.class)) + providerSettings.put(settingKey, Long.valueOf(value)); + else if (type.equals(boolean.class) || type.equals(Boolean.class)) { + providerSettings.put(settingKey, Boolean.valueOf(value)); + } else { + return; + } + + log.info( + "The setting has been override by key: {}, value: {}, in {} provider of {} module through {}", settingKey, + value, providerName, moduleName, "System.properties" + ); + } +} diff --git a/zipkin-server/storage-cassandra/src/test/java/zipkin/server/storage/cassandra/CassandraExtension.java b/zipkin-server/storage-cassandra/src/test/java/zipkin/server/storage/cassandra/CassandraExtension.java new file mode 100644 index 00000000000..3209158d350 --- /dev/null +++ b/zipkin-server/storage-cassandra/src/test/java/zipkin/server/storage/cassandra/CassandraExtension.java @@ -0,0 +1,69 @@ +/* + * Copyright 2015-2023 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package zipkin.server.storage.cassandra; + +import org.junit.jupiter.api.extension.AfterAllCallback; +import org.junit.jupiter.api.extension.BeforeAllCallback; +import org.junit.jupiter.api.extension.ExtensionContext; +import org.opentest4j.TestAbortedException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.InternetProtocol; +import org.testcontainers.containers.output.Slf4jLogConsumer; +import org.testcontainers.containers.wait.strategy.Wait; + +import java.time.Duration; + +import static org.testcontainers.utility.DockerImageName.parse; + +public class CassandraExtension implements BeforeAllCallback, AfterAllCallback { + static final Logger LOGGER = LoggerFactory.getLogger(CassandraExtension.class); + static final int CASSANDRA_PORT = 9042; + + final CassandraContainer container = new CassandraContainer(); + + @Override + public void afterAll(ExtensionContext context) throws Exception { + container.stop(); + } + + @Override + public void beforeAll(ExtensionContext context) throws Exception { + if (context.getRequiredTestClass().getEnclosingClass() != null) { + // Only run once in outermost scope. + return; + } + + container.start(); + LOGGER.info("Using bootstrapServer " + bootstrapServer()); + } + + String bootstrapServer() { + return container.getHost() + ":" + container.getMappedPort(CASSANDRA_PORT); + } + + static final class CassandraContainer extends GenericContainer { + CassandraContainer() { + super(parse("cassandra:4.1.3")); + if ("true".equals(System.getProperty("docker.skip"))) { + throw new TestAbortedException("${docker.skip} == true"); + } + waitStrategy = Wait.forSuccessfulCommand("cqlsh -e \"describe keyspaces\"").withStartupTimeout(Duration.ofMinutes(5)); + addFixedExposedPort(CASSANDRA_PORT, CASSANDRA_PORT, InternetProtocol.TCP); + withLogConsumer(new Slf4jLogConsumer(LOGGER)); + } + } +} diff --git a/zipkin-server/storage-cassandra/src/test/java/zipkin/server/storage/cassandra/ITCassandraStorage.java b/zipkin-server/storage-cassandra/src/test/java/zipkin/server/storage/cassandra/ITCassandraStorage.java new file mode 100644 index 00000000000..04b4bdc628b --- /dev/null +++ b/zipkin-server/storage-cassandra/src/test/java/zipkin/server/storage/cassandra/ITCassandraStorage.java @@ -0,0 +1,129 @@ +/* + * Copyright 2015-2023 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package zipkin.server.storage.cassandra; + +import org.apache.skywalking.oap.server.core.analysis.manual.searchtag.TagType; +import org.apache.skywalking.oap.server.core.query.enumeration.Step; +import org.apache.skywalking.oap.server.core.query.input.Duration; +import org.apache.skywalking.oap.server.core.storage.StorageModule; +import org.apache.skywalking.oap.server.core.storage.query.ITagAutoCompleteQueryDAO; +import org.apache.skywalking.oap.server.core.storage.query.IZipkinQueryDAO; +import org.apache.skywalking.oap.server.library.module.ModuleConfigException; +import org.apache.skywalking.oap.server.library.module.ModuleManager; +import org.apache.skywalking.oap.server.library.module.ModuleNotFoundException; +import org.apache.skywalking.oap.server.library.module.ModuleStartException; +import org.apache.skywalking.oap.server.receiver.zipkin.ZipkinReceiverConfig; +import org.apache.skywalking.oap.server.receiver.zipkin.trace.SpanForward; +import org.joda.time.DateTime; +import org.junit.Assert; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.extension.RegisterExtension; +import org.mockito.junit.jupiter.MockitoExtension; +import zipkin2.Span; +import zipkin2.TestObjects; +import zipkin2.storage.QueryRequest; + +import java.io.IOException; +import java.util.List; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +@Timeout(120) +@ExtendWith(MockitoExtension.class) +public class ITCassandraStorage { + @RegisterExtension + CassandraExtension cassandra = new CassandraExtension(); + + private final ModuleManager moduleManager = new ModuleManager(); + private SpanForward forward; + private ITagAutoCompleteQueryDAO tagAutoCompleteQueryDAO; + private IZipkinQueryDAO zipkinQueryDAO; + @BeforeAll + public void setup() throws ModuleNotFoundException, ModuleConfigException, ModuleStartException { + final ApplicationConfigLoader loader = new ApplicationConfigLoader(); + moduleManager.init(loader.load()); + final ZipkinReceiverConfig config = new ZipkinReceiverConfig(); + config.setSearchableTracesTags("http.path"); + this.forward = new SpanForward(config, moduleManager); + this.tagAutoCompleteQueryDAO = moduleManager.find(StorageModule.NAME).provider().getService(ITagAutoCompleteQueryDAO.class); + this.zipkinQueryDAO = moduleManager.find(StorageModule.NAME).provider().getService(IZipkinQueryDAO.class); + } + + @Test + public void test() throws InterruptedException, IOException { + final List traceSpans = TestObjects.newTrace(""); + forward.send(traceSpans); + Thread.sleep(TimeUnit.SECONDS.toMillis(5)); + + // service names + final List serviceNames = zipkinQueryDAO.getServiceNames(); + Assert.assertEquals(2, serviceNames.size()); + Assert.assertTrue(serviceNames.contains(TestObjects.FRONTEND.serviceName())); + Assert.assertTrue(serviceNames.contains(TestObjects.BACKEND.serviceName())); + + // remote service names + final List remoteServiceNames = zipkinQueryDAO.getRemoteServiceNames(TestObjects.FRONTEND.serviceName()); + Assert.assertEquals(1, remoteServiceNames.size()); + Assert.assertEquals(TestObjects.BACKEND.serviceName(), remoteServiceNames.get(0)); + + // span names + final List spanNames = zipkinQueryDAO.getSpanNames(TestObjects.BACKEND.serviceName()); + Assert.assertEquals(2, spanNames.size()); + Assert.assertTrue(spanNames.contains("query")); + Assert.assertTrue(spanNames.contains("get")); + + // search traces + final QueryRequest query = QueryRequest.newBuilder() + .lookback(86400000L) + .endTs(System.currentTimeMillis()) + .minDuration(1000L) + .limit(10).build(); + Duration duration = new Duration(); + duration.setStep(Step.SECOND); + DateTime endTime = new DateTime(query.endTs()); + DateTime startTime = endTime.minus(org.joda.time.Duration.millis(query.lookback())); + duration.setStart(startTime.toString("yyyy-MM-dd HHmmss")); + duration.setEnd(endTime.toString("yyyy-MM-dd HHmmss")); + final List> traces = zipkinQueryDAO.getTraces(query, duration); + Assert.assertEquals(1, traces.size()); + final List needsSpans = traceSpans.stream().filter(s -> s.duration() > 1000L).collect(Collectors.toList()); + Assert.assertEquals(needsSpans.size(), traces.get(0).size()); + for (Span needSpan : needsSpans) { + Assert.assertTrue(traces.get(0).stream().anyMatch(needSpan::equals)); + } + + // get trace + final List trace = zipkinQueryDAO.getTrace(traceSpans.get(0).traceId()); + Assert.assertEquals(traceSpans.size(), trace.size()); + for (Span span : traceSpans) { + Assert.assertTrue(trace.stream().anyMatch(span::equals)); + } + + final Set keys = tagAutoCompleteQueryDAO.queryTagAutocompleteKeys(TagType.ZIPKIN, 10, duration); + Assert.assertEquals(1, keys.size()); + Assert.assertTrue(keys.contains("http.path")); + + final Set values = tagAutoCompleteQueryDAO.queryTagAutocompleteValues(TagType.ZIPKIN, "http.path", 10, duration); + Assert.assertEquals(1, values.size()); + Assert.assertTrue(values.contains("/api")); + } + +} diff --git a/zipkin-server/storage-cassandra/src/test/resources/application.yml b/zipkin-server/storage-cassandra/src/test/resources/application.yml new file mode 100644 index 00000000000..92458620a68 --- /dev/null +++ b/zipkin-server/storage-cassandra/src/test/resources/application.yml @@ -0,0 +1,66 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +core: + selector: zipkin + zipkin: + # The max length of service + instance names should be less than 200 + serviceNameMaxLength: ${ZIPKIN_SERVICE_NAME_MAX_LENGTH:70} + # The period(in seconds) of refreshing the service cache. Default value is 10s. + serviceCacheRefreshInterval: ${ZIPKIN_SERVICE_CACHE_REFRESH_INTERVAL:10} + instanceNameMaxLength: ${ZIPKIN_INSTANCE_NAME_MAX_LENGTH:70} + # The max length of service + endpoint names should be less than 240 + endpointNameMaxLength: ${ZIPKIN_ENDPOINT_NAME_MAX_LENGTH:150} + recordDataTTL: ${ZIPKIN_CORE_RECORD_DATA_TTL:3} # Unit is day + metricsDataTTL: ${ZIPKIN_CORE_METRICS_DATA_TTL:7} # Unit is day + # The period of L1 aggregation flush to L2 aggregation. Unit is ms. + l1FlushPeriod: ${ZIPKIN_CORE_L1_AGGREGATION_FLUSH_PERIOD:500} + # The threshold of session time. Unit is ms. Default value is 70s. + storageSessionTimeout: ${ZIPKIN_CORE_STORAGE_SESSION_TIMEOUT:70000} + # Defines a set of span tag keys which are searchable. + # The max length of key=value should be less than 256 or will be dropped. + searchableTracesTags: ${ZIPKIN_SEARCHABLE_TAG_KEYS:http.method} + # The trace sample rate precision is 1/10000, should be between 0 and 10000 + traceSampleRate: ${ZIPKIN_SAMPLE_RATE:10000} + # The number of threads used to prepare metrics data to the storage. + prepareThreads: ${ZIPKIN_PREPARE_THREADS:2} + # The period of doing data persistence. Unit is second.Default value is 25s + persistentPeriod: ${ZIPKIN_PERSISTENT_PERIOD:1} + +storage: + selector: ${ZIPKIN_STORAGE:cassandra} + cassandra: + keyspace: ${ZIPKIN_STORAGE_CASSANDRA_KEYSPACE:zipkin} + # Comma separated list of host addresses part of Cassandra cluster. Ports default to 9042 but you can also specify a custom port with 'host:port'. + contactPoints: ${ZIPKIN_STORAGE_CASSANDRA_CONTACT_POINTS:localhost} + # Name of the datacenter that will be considered "local" for load balancing. + localDc: ${ZIPKIN_STORAGE_CASSANDRA_LOCAL_DC:datacenter1} + # Will throw an exception on startup if authentication fails. + username: ${ZIPKIN_STORAGE_CASSANDRA_USERNAME:} + password: ${ZIPKIN_STORAGE_CASSANDRA_PASSWORD:} + # Max pooled connections per datacenter-local host. + maxConnections: ${ZIPKIN_STORAGE_CASSANDRA_MAX_CONNECTIONS:8} + # Using ssl for connection, rely on Keystore + use-ssl: ${ZIPKIN_STORAGE_CASSANDRA_USE_SSL:false} + maxSizeOfBatchCql: ${ZIPKIN_STORAGE_CASSANDRA_MAX_SIZE_OF_BATCH_CQL:2000} + asyncBatchPersistentPoolSize: ${ZIPKIN_STORAGE_CASSANDRA_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4} + +telemetry: + selector: ${ZIPKIN_TELEMETRY:none} + none: + +cluster: + selector: standalone + standalone: \ No newline at end of file diff --git a/zipkin-server/storage-cassandra/src/test/resources/log4j2.xml b/zipkin-server/storage-cassandra/src/test/resources/log4j2.xml new file mode 100644 index 00000000000..9340cc127e9 --- /dev/null +++ b/zipkin-server/storage-cassandra/src/test/resources/log4j2.xml @@ -0,0 +1,36 @@ + + + + + + + + + + + + + + + + + + + +