Skip to content

Commit cff6742

Browse files
committed
Rerun with JDK17
1 parent 8146601 commit cff6742

File tree

2 files changed

+1
-189
lines changed

2 files changed

+1
-189
lines changed

Jenkinsfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ pipeline {
6767
}
6868
stage('Python bwc-upgrade tests') {
6969
agent { label 'medium && x64' }
70-
tools { jdk 'jdk11' }
70+
tools { jdk 'jdk17' }
7171
steps {
7272
checkout scm
7373
sh '''

tests/bwc/test_upgrade.py

Lines changed: 0 additions & 188 deletions
Original file line numberDiff line numberDiff line change
@@ -432,191 +432,3 @@ def assert_meta_data(self, version_def, nodes, data_paths=None):
432432
cursor.fetchall())
433433

434434
self._process_on_stop()
435-
436-
437-
class DefaultTemplateMetaDataCompatibilityTest(NodeProvider, unittest.TestCase):
438-
CLUSTER_ID = gen_id()
439-
440-
CLUSTER_SETTINGS = {
441-
'cluster.name': CLUSTER_ID,
442-
}
443-
444-
SUPPORTED_VERSIONS = (
445-
VersionDef('3.0.x', []),
446-
VersionDef('latest-nightly', [])
447-
)
448-
449-
def test_metadata_compatibility(self):
450-
nodes = 3
451-
452-
cluster = self._new_cluster(self.SUPPORTED_VERSIONS[0].version,
453-
nodes,
454-
settings=self.CLUSTER_SETTINGS)
455-
cluster.start()
456-
with connect(cluster.node().http_url, error_trace=True) as conn:
457-
cursor = conn.cursor()
458-
cursor.execute("select 1")
459-
self._process_on_stop()
460-
461-
paths = [node._settings['path.data'] for node in cluster.nodes()]
462-
for version_def in self.SUPPORTED_VERSIONS[1:]:
463-
self.assert_dynamic_string_detection(version_def, nodes, paths)
464-
465-
def assert_dynamic_string_detection(self, version_def, nodes, data_paths):
466-
""" Test that a dynamic string column detection works as expected.
467-
468-
If the cluster was initially created/started with a lower CrateDB
469-
version, we must ensure that our default template is also upgraded, if
470-
needed, because it is persisted in the cluster state. That's why
471-
re-creating tables would not help.
472-
"""
473-
self._move_nodes_folder_if_needed(data_paths)
474-
cluster = self._new_cluster(
475-
version_def.version,
476-
nodes,
477-
data_paths,
478-
self.CLUSTER_SETTINGS,
479-
prepare_env(version_def.java_home),
480-
)
481-
cluster.start()
482-
with connect(cluster.node().http_url, error_trace=True) as conn:
483-
cursor = conn.cursor()
484-
cursor.execute('CREATE TABLE t1 (o object)')
485-
cursor.execute('''INSERT INTO t1 (o) VALUES ({"name" = 'foo'})''')
486-
self.assertEqual(cursor.rowcount, 1)
487-
cursor.execute('REFRESH TABLE t1')
488-
cursor.execute("SELECT o['name'], count(*) FROM t1 GROUP BY 1")
489-
rs = cursor.fetchall()
490-
self.assertEqual(['foo', 1], rs[0])
491-
cursor.execute('DROP TABLE t1')
492-
self._process_on_stop()
493-
494-
def _move_nodes_folder_if_needed(self, data_paths):
495-
"""Eliminates the cluster-id folder inside the data directory."""
496-
for path in data_paths:
497-
data_path_incl_cluster_id = os.path.join(path, self.CLUSTER_ID)
498-
if os.path.exists(data_path_incl_cluster_id):
499-
src_path_nodes = os.path.join(data_path_incl_cluster_id, 'nodes')
500-
target_path_nodes = os.path.join(self._path_data, 'nodes')
501-
shutil.move(src_path_nodes, target_path_nodes)
502-
shutil.rmtree(data_path_incl_cluster_id)
503-
504-
505-
class SnapshotCompatibilityTest(NodeProvider, unittest.TestCase):
506-
507-
CREATE_REPOSITORY = '''
508-
CREATE REPOSITORY r1 TYPE S3
509-
WITH (access_key = 'minio',
510-
secret_key = 'miniostorage',
511-
bucket='backups',
512-
endpoint = '127.0.0.1:9000',
513-
protocol = 'http')
514-
'''
515-
516-
CREATE_SNAPSHOT_TPT = "CREATE SNAPSHOT r1.s{} ALL WITH (wait_for_completion = true)"
517-
518-
RESTORE_SNAPSHOT_TPT = "RESTORE SNAPSHOT r1.s{} ALL WITH (wait_for_completion = true)"
519-
520-
DROP_DOC_TABLE = 'DROP TABLE t1'
521-
522-
VERSION = ('5.0.x', 'latest-nightly')
523-
524-
def test_snapshot_compatibility(self):
525-
"""Test snapshot compatibility when upgrading 5.0.x -> latest-nightly
526-
527-
Using Minio as a S3 repository, the first cluster that runs
528-
creates the repo, a table and inserts/selects some data, which
529-
then is snapshotted and deleted. The next cluster recovers the
530-
data from the last snapshot, performs further inserts/selects,
531-
to then snapshot the data and delete it.
532-
"""
533-
with MinioServer() as minio:
534-
t = threading.Thread(target=minio.run)
535-
t.daemon = True
536-
t.start()
537-
wait_until(lambda: _is_up('127.0.0.1', 9000))
538-
539-
num_nodes = 3
540-
num_docs = 30
541-
prev_version = None
542-
num_snapshot = 1
543-
544-
cluster_settings = {
545-
'cluster.name': gen_id(),
546-
}
547-
548-
paths = None
549-
for version in self.VERSION:
550-
cluster = self._new_cluster(version, num_nodes, paths, settings=cluster_settings)
551-
paths = [node._settings['path.data'] for node in cluster.nodes()]
552-
cluster.start()
553-
with connect(cluster.node().http_url, error_trace=True) as conn:
554-
c = conn.cursor()
555-
if not prev_version:
556-
c.execute(self.CREATE_REPOSITORY)
557-
c.execute(CREATE_ANALYZER)
558-
c.execute(CREATE_DOC_TABLE)
559-
insert_data(conn, 'doc', 't1', num_docs)
560-
else:
561-
c.execute(self.RESTORE_SNAPSHOT_TPT.format(num_snapshot - 1))
562-
c.execute('SELECT COUNT(*) FROM t1')
563-
rowcount = c.fetchone()[0]
564-
self.assertEqual(rowcount, num_docs)
565-
run_selects(c, version)
566-
c.execute(self.CREATE_SNAPSHOT_TPT.format(num_snapshot))
567-
c.execute(self.DROP_DOC_TABLE)
568-
self._process_on_stop()
569-
prev_version = version
570-
num_snapshot += 1
571-
572-
573-
class PreOidsFetchValueTest(NodeProvider, unittest.TestCase):
574-
575-
def test_pre_oid_references(self):
576-
cluster = self._new_cluster('5.4.x', 3)
577-
cluster.start()
578-
579-
with connect(cluster.node().http_url, error_trace=True) as conn:
580-
c = conn.cursor()
581-
c.execute("create table tbl (a text, b text) partitioned by (a)")
582-
c.execute("insert into tbl (a, b) values ('foo1', 'bar1')")
583-
584-
for idx, node in enumerate(cluster):
585-
new_node = self.upgrade_node(node, '5.8.5')
586-
cluster[idx] = new_node
587-
588-
with connect(cluster.node().http_url, error_trace=True) as conn:
589-
c = conn.cursor()
590-
c.execute("alter table tbl add column c text")
591-
c.execute("insert into tbl (a, b, c) values ('foo1', 'bar2', 'baz2')")
592-
c.execute("insert into tbl (a, b, c) values ('foo2', 'bar1', 'baz1')")
593-
594-
for idx, node in enumerate(cluster):
595-
new_node = self.upgrade_node(node, '5.9.x')
596-
cluster[idx] = new_node
597-
598-
with connect(cluster.node().http_url, error_trace=True) as conn:
599-
c = conn.cursor()
600-
c.execute("insert into tbl (a, b, c) values ('foo1', 'bar3', 'baz3')")
601-
c.execute("insert into tbl (a, b, c) values ('foo2', 'bar2', 'baz2')")
602-
c.execute("insert into tbl (a, b, c) values ('foo3', 'bar1', 'baz1')")
603-
604-
for idx, node in enumerate(cluster):
605-
new_node = self.upgrade_node(node, '5.10')
606-
cluster[idx] = new_node
607-
608-
with connect(cluster.node().http_url, error_trace=True) as conn:
609-
c = conn.cursor()
610-
c.execute("insert into tbl (a, b, c) values ('foo1', 'bar4', 'baz4')")
611-
c.execute("insert into tbl (a, b, c) values ('foo2', 'bar3', 'baz3')")
612-
c.execute("insert into tbl (a, b, c) values ('foo3', 'bar2', 'baz2')")
613-
c.execute("insert into tbl (a, b, c) values ('foo4', 'bar1', 'baz1')")
614-
615-
c.execute("refresh table tbl")
616-
617-
# LIMIT 10 forces the engine to go via _doc, which triggers the bug
618-
# fixed by https://github.com/crate/crate/pull/17819
619-
c.execute("select b from tbl limit 10")
620-
result = c.fetchall()
621-
for row in result:
622-
self.assertIsNotNone(row[0])

0 commit comments

Comments
 (0)