Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix #2513: Update logging effect in automation #3437

Merged
merged 1 commit into from
May 24, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions nose.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
[nosetests]
logging-filter=azure.cli.testsdk,vcr_test_base
10 changes: 5 additions & 5 deletions scripts/automation/tests/nose_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,10 @@

from __future__ import print_function

import os.path
from ..utilities.path import get_repo_root


def get_nose_runner(report_folder, parallel=True, process_timeout=600, process_restart=True,
xunit_report=False, exclude_integration=True):
"""Create a nose execution method"""
Expand All @@ -24,7 +28,7 @@ def _run_nose(test_folders):
or not os.path.isdir(report_folder):
raise ValueError('Report folder {} does not exist'.format(report_folder))

arguments = [__file__, '-v', '--nologcapture']
arguments = [__file__, '-v', '-c', os.path.join(get_repo_root(), 'nose.cfg')]
if parallel:
arguments += ['--processes=-1', '--process-timeout={}'.format(process_timeout)]
if process_restart:
Expand All @@ -36,12 +40,8 @@ def _run_nose(test_folders):
else:
test_report = ''

if exclude_integration:
arguments += ['--ignore-files=integration*']

debug_file = os.path.join(report_folder, 'test-debug.log')
arguments += ['--debug-log={}'.format(debug_file)]
arguments += ['--nologcapture']
arguments.extend(test_folders)
result = nose.run(argv=arguments)

Expand Down
30 changes: 10 additions & 20 deletions src/azure-cli-testsdk/azure/cli/testsdk/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,6 @@
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------

from __future__ import print_function
import datetime
import unittest
import os
import inspect
Expand All @@ -29,27 +27,17 @@
from .utilities import create_random_name
from .decorators import live_only

logger = logging.getLogger('azuer.cli.testsdk')
logger = logging.getLogger('azure.cli.testsdk')


class IntegrationTestBase(unittest.TestCase):
def __init__(self, method_name):
super(IntegrationTestBase, self).__init__(method_name)
self.diagnose = os.environ.get(ENV_TEST_DIAGNOSE, None) == 'True'

def cmd(self, command, checks=None, expect_failure=False):
if self.diagnose:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this being removed? I use this extensively during local troubleshooting. Or is this now accomplished another way?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The command will print in logging whenever the test fails. You don't need to set diagnostics anymore.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay, let's give it a shot :)

begin = datetime.datetime.now()
print('\nExecuting command: {}'.format(command))

result = execute(command, expect_failure=expect_failure)

if self.diagnose:
duration = datetime.datetime.now() - begin
print('\nCommand accomplished in {} s. Exit code {}.\n{}'.format(
duration.total_seconds(), result.exit_code, result.output))

return result.assert_with_checks(checks)
@classmethod
def cmd(cls, command, checks=None, expect_failure=False):
return execute(command, expect_failure=expect_failure).assert_with_checks(checks)

def create_random_name(self, prefix, length): # pylint: disable=no-self-use
return create_random_name(prefix=prefix, length=length)
Expand Down Expand Up @@ -238,17 +226,21 @@ def _custom_request_query_matcher(cls, r1, r2):

class ExecutionResult(object): # pylint: disable=too-few-public-methods
def __init__(self, command, expect_failure=False, in_process=True):
logger.info('Execute command %s', command)
if in_process:
self._in_process_execute(command)
else:
self._out_of_process_execute(command)

if expect_failure and self.exit_code == 0:
raise AssertionError('The command is expected to fail but it doesn\'.')
logger.error('Command "%s" => %d. (It did not fail as expected) Output: %s', command,
self.exit_code, self.output)
raise AssertionError('The command did not fail as it was expected.')
elif not expect_failure and self.exit_code != 0:
logger.error('Command "%s" => %d. Output: %s', command, self.exit_code, self.output)
raise AssertionError('The command failed. Exit code: {}'.format(self.exit_code))

logger.info('Command "%s" => %d.', command, self.exit_code)

self.json_value = None
self.skip_assert = os.environ.get(ENV_SKIP_ASSERT, None) == 'True'

Expand All @@ -260,8 +252,6 @@ def assert_with_checks(self, *args):
elif callable(each):
checks.append(each)

logger.info('Checkers to be executed %s', len(checks))

if not self.skip_assert:
for c in checks:
c(self)
Expand Down
8 changes: 6 additions & 2 deletions src/azure-cli-testsdk/azure/cli/testsdk/vcr_test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import sys
import tempfile
import traceback
import logging
from random import choice
from string import digits, ascii_lowercase

Expand Down Expand Up @@ -45,6 +46,7 @@
MOCKED_TENANT_ID = '00000000-0000-0000-0000-000000000000'
MOCKED_STORAGE_ACCOUNT = 'dummystorage'

logger = logging.getLogger('vcr_test_base')

# MOCK METHODS

Expand Down Expand Up @@ -109,7 +111,7 @@ def _mock_subscriptions(self): # pylint: disable=unused-argument


def _mock_user_access_token(_, _1, _2, _3): # pylint: disable=unused-argument
return ('Bearer', 'top-secret-token-for-you', None)
return 'Bearer', 'top-secret-token-for-you', None


def _mock_operation_delay(_):
Expand Down Expand Up @@ -284,7 +286,6 @@ def __init__(self, test_file, test_name, run_live=False, debug=False, debug_vcr=
self.exception = CLIError('No recorded result provided for {}.'.format(self.test_name))

if debug_vcr:
import logging
logging.basicConfig()
vcr_log = logging.getLogger('vcr')
vcr_log.setLevel(logging.INFO)
Expand Down Expand Up @@ -427,8 +428,11 @@ def cmd(self, command, checks=None, allowed_exceptions=None,
cli_main(command_list, file=output)
except Exception as ex: # pylint: disable=broad-except
ex_msg = str(ex)
logger.error('Command "%s" => %s. Output: %s', command, ex_msg, output.getvalue())
if not next((x for x in allowed_exceptions if x in ex_msg), None):
raise ex

logger.info('Command "%s" => %s.', command, 'success')
self._track_executed_commands(command_list)
result = output.getvalue().strip()
output.close()
Expand Down