Skip to content

Commit a1fd555

Browse files
committed
Extend maximum expiration date
1 parent 5021057 commit a1fd555

File tree

2 files changed

+19
-9
lines changed

2 files changed

+19
-9
lines changed

ttl_test.py

Lines changed: 15 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
import os
22
import time
3+
import datetime
4+
35
import pytest
46
import logging
57

@@ -365,8 +367,11 @@ def test_expiration_overflow_policy_reject_default_ttl(self):
365367
def _base_expiration_overflow_policy_test(self, default_ttl, policy):
366368
"""
367369
Checks that expiration date overflow policy is correctly applied
368-
@jira_ticket CASSANDRA-14092
370+
@jira_ticket CASSANDRA-14092 and CASSANDRA-14227
369371
"""
372+
# Post 5.0 TTL may overflow in 2038 (legacy) or 2106 C14227
373+
overflow_policy_applies = "NONE" != self.cluster.nodelist()[0].get_conf_option("storage_compatibility_mode") \
374+
or datetime.date.today().year >= 2086
370375
MAX_TTL = 20 * 365 * 24 * 60 * 60 # 20 years in seconds
371376
default_time_to_live = MAX_TTL if default_ttl else None
372377
self.prepare(default_time_to_live=default_time_to_live)
@@ -384,9 +389,9 @@ def _base_expiration_overflow_policy_test(self, default_ttl, policy):
384389
try:
385390
result = self.session1.execute_async(query + ";")
386391
result.result()
387-
if policy == 'REJECT':
392+
if policy == 'REJECT' and overflow_policy_applies:
388393
pytest.fail("should throw InvalidRequest")
389-
if self.cluster.version() >= '3.0': # client warn only on 3.0+
394+
if self.cluster.version() >= '3.0' and overflow_policy_applies: # client warn only on 3.0+
390395
if policy == 'CAP':
391396
logger.debug("Warning is {}".format(result.warnings[0]))
392397
assert 'exceeds maximum supported expiration' in result.warnings[0], 'Warning not found'
@@ -399,10 +404,10 @@ def _base_expiration_overflow_policy_test(self, default_ttl, policy):
399404

400405
self.cluster.flush()
401406
# Data should be present unless policy is reject
402-
assert_row_count(self.session1, 'ttl_table', 0 if policy == 'REJECT' else 1)
407+
assert_row_count(self.session1, 'ttl_table', 0 if (policy == 'REJECT' and overflow_policy_applies) else 1)
403408

404409
# Check that warning is always logged, unless policy is REJECT
405-
if policy != 'REJECT':
410+
if policy != 'REJECT' and overflow_policy_applies:
406411
node1 = self.cluster.nodelist()[0]
407412
prefix = 'default ' if default_ttl else ''
408413
warning = node1.grep_log("Request on table {}.{} with {}ttl of {} seconds exceeds maximum supported expiration"
@@ -599,7 +604,11 @@ def test_recover_negative_expiration_date_sstables_with_scrub(self):
599604
node.watch_log_for('Loading new SSTables', timeout=10)
600605

601606
logger.debug("Check that there are no rows present")
602-
assert_row_count(session, 'ttl_table', 0)
607+
# CASSANDRA-14227 5.0 upwards we have long TTL that can read overflowed rows
608+
if self.cluster.version() >= '5.0':
609+
assert_row_count(session, 'ttl_table', 1)
610+
else:
611+
assert_row_count(session, 'ttl_table', 0)
603612

604613
logger.debug("Shutting down node")
605614
self.cluster.stop()

upgrade_tests/upgrade_through_versions_test.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -432,6 +432,7 @@ def upgrade_scenario(self, populate=True, create_schema=True, rolling=False, aft
432432

433433
self.upgrade_to_version(version_meta, partial=True, nodes=(node,), internode_ssl=internode_ssl)
434434

435+
logger.debug(str(self.fixture_dtest_setup.subprocs))
435436
self._check_on_subprocs(self.fixture_dtest_setup.subprocs)
436437
logger.debug('Successfully upgraded %d of %d nodes to %s' %
437438
(num + 1, len(self.cluster.nodelist()), version_meta.version))
@@ -488,7 +489,7 @@ def _check_on_subprocs(self, subprocs):
488489
if not all(subproc_statuses):
489490
message = "A subprocess has terminated early. Subprocess statuses: "
490491
for s in subprocs:
491-
message += "{name} (is_alive: {aliveness}), ".format(name=s.name, aliveness=s.is_alive())
492+
message += "{name} (is_alive: {aliveness}, exitCode: {exitCode}), ".format(name=s.name, aliveness=s.is_alive(), exitCode=s.exitcode)
492493
message += "attempting to terminate remaining subprocesses now."
493494
self._terminate_subprocs()
494495
raise RuntimeError(message)
@@ -654,7 +655,7 @@ def _start_continuous_write_and_verify(self, wait_for_rowcount=0, max_wait_s=600
654655
# queue of verified writes, which are update candidates
655656
verification_done_queue = Queue(maxsize=500)
656657

657-
writer = Process(target=data_writer, args=(self, to_verify_queue, verification_done_queue, 25))
658+
writer = Process(name="data_writer", target=data_writer, args=(self, to_verify_queue, verification_done_queue, 25))
658659
# daemon subprocesses are killed automagically when the parent process exits
659660
writer.daemon = True
660661
self.fixture_dtest_setup.subprocs.append(writer)
@@ -663,7 +664,7 @@ def _start_continuous_write_and_verify(self, wait_for_rowcount=0, max_wait_s=600
663664
if wait_for_rowcount > 0:
664665
self._wait_until_queue_condition('rows written (but not verified)', to_verify_queue, operator.ge, wait_for_rowcount, max_wait_s=max_wait_s)
665666

666-
verifier = Process(target=data_checker, args=(self, to_verify_queue, verification_done_queue))
667+
verifier = Process(name="data_checker", target=data_checker, args=(self, to_verify_queue, verification_done_queue))
667668
# daemon subprocesses are killed automagically when the parent process exits
668669
verifier.daemon = True
669670
self.fixture_dtest_setup.subprocs.append(verifier)

0 commit comments

Comments
 (0)