commit 5ea0ed67c45e6f76cae40c28cbe361a62b66142c Author: Bernard Cafarelli Date: Thu Sep 24 13:12:20 2020 +0200 Fix lower-constraints and pep8 for Focal Bump lower-constraints to versions supporting python 3.8 and with wheels available and sync requirements Bump hacking to 3.2.0 in order to work with Focal, disabling W504 to align with neutron Clean pep8 requirements from lower-constraints Change-Id: I743acf9f2b7d7000e3340fee4887c8a1f3647133 diff --git a/lower-constraints.txt b/lower-constraints.txt index d7e0f9e..dac9027 100644 --- a/lower-constraints.txt +++ b/lower-constraints.txt @@ -3,36 +3,33 @@ alembic==0.8.10 amqp==2.2.2 appdirs==1.4.3 asn1crypto==0.24.0 -astroid==1.6.5 Babel==2.5.3 bcrypt==3.1.4 beautifulsoup4==4.6.0 cachetools==2.0.1 certifi==2018.1.18 -cffi==1.11.5 +cffi==1.14.0 chardet==3.0.4 -cliff==2.11.0 +cliff==3.4.0 cmd2==0.8.1 contextlib2==0.5.5 coverage==4.0 -cryptography==2.1.4 +cryptography==2.7 debtcollector==1.19.0 -decorator==4.2.1 +decorator==4.4.0 deprecation==2.0 docutils==0.14 dogpile.cache==0.6.5 dulwich==0.19.0 enum-compat==0.0.2 -eventlet==0.18.2 +eventlet==0.25.1 extras==1.0.0 fasteners==0.14.1 fixtures==3.0.0 flake8-import-order==0.12 -flake8==2.5.5 future==0.16.0 futurist==1.6.0 -greenlet==0.4.13 -hacking==0.12.0 +greenlet==0.4.15 httplib2==0.10.3 idna==2.6 imagesize==1.0.0 @@ -45,13 +42,12 @@ jsonpointer==2.0 jsonschema==2.6.0 keystoneauth1==3.4.0 keystonemiddleware==4.21.0 -kombu==4.1.0 +kombu==4.6.1 linecache2==1.0.0 logilab-common==1.4.1 logutils==0.3.5 Mako==1.0.7 -MarkupSafe==1.0 -mccabe==0.2.1 +MarkupSafe==1.1.1 mock==2.0.0 monotonic==1.4 mox3==0.25.0 @@ -60,20 +56,20 @@ msgpack==0.5.6 munch==2.2.0 netaddr==0.7.18 netifaces==0.10.6 -neutron-lib==1.18.0 +neutron-lib==2.2.0 openstacksdk==0.12.0 os-client-config==1.29.0 os-service-types==1.2.0 os-xenapi==0.3.1 -osc-lib==1.10.0 +osc-lib==1.12.0 oslo.cache==1.29.0 oslo.concurrency==3.26.0 oslo.config==5.2.0 oslo.context==2.20.0 -oslo.db==4.35.0 +oslo.db==4.44.0 oslo.i18n==3.15.3 oslo.log==3.36.0 -oslo.messaging==5.29.0 +oslo.messaging==12.4.0 oslo.middleware==3.35.0 oslo.policy==1.34.0 oslo.privsep==1.28.0 @@ -84,34 +80,29 @@ oslo.service==1.30.0 oslo.utils==3.33.0 oslo.versionedobjects==1.32.0 oslotest==3.2.0 -osprofiler==2.0.0 +osprofiler==2.4.0 ovs==2.8.1 ovsdbapp==0.10.0 packaging==17.1 -paramiko==2.4.1 Paste==2.0.3 PasteDeploy==1.5.2 pbr==2.0.0 pecan==1.2.1 -pep8==1.5.7 pika-pool==0.1.3 pika==0.10.0 prettytable==0.7.2 psutil==5.4.3 -psycopg2==2.7.7 +psycopg2==2.8 pyasn1==0.4.2 pycadf==2.7.0 -pycodestyle==2.3.1 pycparser==2.18 -pyflakes==0.8.1 Pygments==2.2.0 pyinotify==0.9.6 -pylint==1.9.2 PyMySQL==0.7.6 PyNaCl==1.2.1 pyparsing==2.2.0 pyperclip==1.6.0 -pyroute2==0.4.21 +pyroute2==0.5.13 python-dateutil==2.7.0 python-designateclient==2.9.0 python-editor==1.0.3 @@ -121,7 +112,7 @@ python-neutronclient==6.7.0 python-novaclient==10.1.0 python-subunit==1.2.0 pytz==2018.3 -PyYAML==3.12 +PyYAML==3.13 repoze.lru==0.7 requests-mock==1.2.0 requests==2.18.4 @@ -130,7 +121,7 @@ rfc3986==1.1.0 Routes==2.4.1 ryu==4.23 simplejson==3.13.2 -six==1.10.0 +six==1.11.0 snowballstemmer==1.2.1 sqlalchemy-migrate==0.11.0 SQLAlchemy==1.2.0 @@ -139,7 +130,7 @@ statsd==3.2.2 stestr==2.0.0 stevedore==1.20.0 Tempita==0.5.2 -tenacity==4.9.0 +tenacity==6.0.0 testrepository==0.0.20 testresources==2.0.0 testscenarios==0.4 diff --git a/networking_sfc/cli/port_pair_group.py b/networking_sfc/cli/port_pair_group.py index 99d9deb..056e0b0 100644 --- a/networking_sfc/cli/port_pair_group.py +++ b/networking_sfc/cli/port_pair_group.py @@ -103,11 +103,10 @@ class PortPairGroupCreate(extension.ClientExtensionCreate, PortPairGroup): ingress_n_tuple_dict = {} egress_n_tuple_dict = {} # Split input of ppg_n_tuple_mapping by & and = - raw_data = dict([ - (content[0], content[1]) for content in - [sub_field.split('=') for sub_field in - [field for field in value.split('&') if field]] - ]) + raw_data = {content[0]: content[1] for content in + [sub_field.split('=') for sub_field in + [field for field in value.split('&') if field]] + } # Store ingress_n_tuple values and egress_n_tuple values # into corresponding dictionary, and expand # source_port_range and destination_port_range to diff --git a/networking_sfc/db/sfc_db.py b/networking_sfc/db/sfc_db.py index 1cbb5b8..48adaac 100644 --- a/networking_sfc/db/sfc_db.py +++ b/networking_sfc/db/sfc_db.py @@ -252,8 +252,7 @@ class SfcDbPlugin( curr_pg_tap_enabled = pg['tap_enabled'] if prev_pg_tap_enabled and curr_pg_tap_enabled: raise ext_tap.ConsecutiveTapPPGNotSupported() - else: - prev_pg_tap_enabled = curr_pg_tap_enabled + prev_pg_tap_enabled = curr_pg_tap_enabled query = model_query.query_with_hooks(context, PortChain) for port_chain_db in query.all(): if port_chain_db['id'] == pc_id: @@ -803,88 +802,88 @@ class SfcDbPlugin( def _validate_port_chains_for_graph(self, context, port_chains, graph_id=None): - # create a list of all port-chains that will be associated - all_port_chains = set() - for src_chain in port_chains: - all_port_chains.add(src_chain) - for dst_chain in port_chains[src_chain]: - all_port_chains.add(dst_chain) - # check if any of the port-chains are already in a graph - if self._any_port_chains_in_a_graph( - context, all_port_chains, graph_id): - raise ext_sg.ServiceGraphInvalidPortChains( - port_chains=port_chains) - - # dict whose keys are PCs and values are lists of dependency-PCs - # (PCs incoming to the point where the key is a outgoing) - parenthood = {} - encapsulation = None - fc_cls = fc_db.FlowClassifierDbPlugin - for src_chain in port_chains: - src_pc = self._get_port_chain(context, src_chain) - curr_corr = src_pc.chain_parameters['correlation']['value'] - # guarantee that branching PPG supports correlation - assocs = src_pc.chain_group_associations - src_ppg = max(assocs, key=(lambda ppg: ppg.position)) - ppg_id = src_ppg['portpairgroup_id'] + # create a list of all port-chains that will be associated + all_port_chains = set() + for src_chain in port_chains: + all_port_chains.add(src_chain) + for dst_chain in port_chains[src_chain]: + all_port_chains.add(dst_chain) + # check if any of the port-chains are already in a graph + if self._any_port_chains_in_a_graph( + context, all_port_chains, graph_id): + raise ext_sg.ServiceGraphInvalidPortChains( + port_chains=port_chains) + + # dict whose keys are PCs and values are lists of dependency-PCs + # (PCs incoming to the point where the key is a outgoing) + parenthood = {} + encapsulation = None + fc_cls = fc_db.FlowClassifierDbPlugin + for src_chain in port_chains: + src_pc = self._get_port_chain(context, src_chain) + curr_corr = src_pc.chain_parameters['correlation']['value'] + # guarantee that branching PPG supports correlation + assocs = src_pc.chain_group_associations + src_ppg = max(assocs, key=(lambda ppg: ppg.position)) + ppg_id = src_ppg['portpairgroup_id'] + ppg = self._get_port_pair_group(context, ppg_id) + for pp in ppg.port_pairs: + sfparams = pp['service_function_parameters'] + if sfparams['correlation']['value'] != curr_corr: + raise ext_sg.ServiceGraphImpossibleBranching() + + # verify encapsulation consistency across all PCs (part 1) + if not encapsulation: + encapsulation = curr_corr + elif encapsulation != curr_corr: + raise ext_sg.ServiceGraphInconsistentEncapsulation() + # list of all port chains at this branching point: + branching_point = [] + # list of every flow classifier at this branching point: + fcs_for_src_chain = [] + for dst_chain in port_chains[src_chain]: + # check if the current destination PC was already added + if dst_chain in branching_point: + raise ext_sg.ServiceGraphPortChainInConflict( + pc_id=dst_chain) + branching_point.append(dst_chain) + dst_pc = self._get_port_chain(context, dst_chain) + curr_corr = dst_pc.chain_parameters['correlation']['value'] + # guarantee that destination PPG supports correlation + assocs = dst_pc.chain_group_associations + dst_ppg = min(assocs, key=(lambda ppg: ppg.position)) + ppg_id = dst_ppg['portpairgroup_id'] ppg = self._get_port_pair_group(context, ppg_id) for pp in ppg.port_pairs: sfparams = pp['service_function_parameters'] if sfparams['correlation']['value'] != curr_corr: raise ext_sg.ServiceGraphImpossibleBranching() - - # verify encapsulation consistency across all PCs (part 1) - if not encapsulation: - encapsulation = curr_corr - elif encapsulation != curr_corr: + # verify encapsulation consistency across all PCs (part 2) + if encapsulation != curr_corr: raise ext_sg.ServiceGraphInconsistentEncapsulation() - # list of all port chains at this branching point: - branching_point = [] - # list of every flow classifier at this branching point: - fcs_for_src_chain = [] - for dst_chain in port_chains[src_chain]: - # check if the current destination PC was already added - if dst_chain in branching_point: - raise ext_sg.ServiceGraphPortChainInConflict( - pc_id=dst_chain) - branching_point.append(dst_chain) - dst_pc = self._get_port_chain(context, dst_chain) - curr_corr = dst_pc.chain_parameters['correlation']['value'] - # guarantee that destination PPG supports correlation - assocs = dst_pc.chain_group_associations - dst_ppg = min(assocs, key=(lambda ppg: ppg.position)) - ppg_id = dst_ppg['portpairgroup_id'] - ppg = self._get_port_pair_group(context, ppg_id) - for pp in ppg.port_pairs: - sfparams = pp['service_function_parameters'] - if sfparams['correlation']['value'] != curr_corr: - raise ext_sg.ServiceGraphImpossibleBranching() - # verify encapsulation consistency across all PCs (part 2) - if encapsulation != curr_corr: - raise ext_sg.ServiceGraphInconsistentEncapsulation() - dst_pc_dict = self._make_port_chain_dict(dst_pc) - # acquire associated flow classifiers - fcs = dst_pc_dict['flow_classifiers'] - for fc_id in fcs: - fc = self._get_flow_classifier(context, fc_id) - fcs_for_src_chain.append(fc) # update list of every FC - # update the parenthood dict - if dst_chain in parenthood: - parenthood[dst_chain].append(src_chain) - else: - parenthood[dst_chain] = [src_chain] - # detect duplicate FCs, consequently branching ambiguity - for i, fc1 in enumerate(fcs_for_src_chain): - for fc2 in fcs_for_src_chain[i + 1:]: - if(fc_cls.flowclassifier_basic_conflict(fc1, fc2)): - raise ext_sg.\ - ServiceGraphFlowClassifierInConflict( - fc1_id=fc1['id'], fc2_id=fc2['id']) - - # check for circular paths within the graph via parenthood dict: - for port_chain in parenthood: - if self._is_there_a_loop(parenthood, port_chain, []): - raise ext_sg.ServiceGraphLoopDetected() + dst_pc_dict = self._make_port_chain_dict(dst_pc) + # acquire associated flow classifiers + fcs = dst_pc_dict['flow_classifiers'] + for fc_id in fcs: + fc = self._get_flow_classifier(context, fc_id) + fcs_for_src_chain.append(fc) # update list of every FC + # update the parenthood dict + if dst_chain in parenthood: + parenthood[dst_chain].append(src_chain) + else: + parenthood[dst_chain] = [src_chain] + # detect duplicate FCs, consequently branching ambiguity + for i, fc1 in enumerate(fcs_for_src_chain): + for fc2 in fcs_for_src_chain[i + 1:]: + if(fc_cls.flowclassifier_basic_conflict(fc1, fc2)): + raise ext_sg.\ + ServiceGraphFlowClassifierInConflict( + fc1_id=fc1['id'], fc2_id=fc2['id']) + + # check for circular paths within the graph via parenthood dict: + for port_chain in parenthood: + if self._is_there_a_loop(parenthood, port_chain, []): + raise ext_sg.ServiceGraphLoopDetected() def _setup_graph_chain_associations(self, context, graph_db, port_chains): with db_api.CONTEXT_READER.using(context): diff --git a/networking_sfc/extensions/servicegraph.py b/networking_sfc/extensions/servicegraph.py index 85d1bbf..1ca0c1c 100755 --- a/networking_sfc/extensions/servicegraph.py +++ b/networking_sfc/extensions/servicegraph.py @@ -117,6 +117,7 @@ def normalize_service_graph(port_chains): 'is invalid: %s.' % key) return port_chains + RESOURCE_ATTRIBUTE_MAP = { SERVICE_GRAPHS: { 'id': { @@ -196,7 +197,7 @@ class Servicegraph(extensions.ExtensionDescriptor): @six.add_metaclass(ABCMeta) -class ServiceGraphPluginBase(object): +class ServiceGraphPluginBase(): def get_plugin_type(self): return SG_EXT diff --git a/networking_sfc/services/flowclassifier/common/context.py b/networking_sfc/services/flowclassifier/common/context.py index d873077..a681d31 100644 --- a/networking_sfc/services/flowclassifier/common/context.py +++ b/networking_sfc/services/flowclassifier/common/context.py @@ -13,7 +13,7 @@ # under the License. -class FlowClassifierPluginContext(object): +class FlowClassifierPluginContext(): """Flow Classifier context base class.""" def __init__(self, plugin, plugin_context): self._plugin = plugin diff --git a/networking_sfc/services/flowclassifier/common/exceptions.py b/networking_sfc/services/flowclassifier/common/exceptions.py index f7a685e..54b4519 100644 --- a/networking_sfc/services/flowclassifier/common/exceptions.py +++ b/networking_sfc/services/flowclassifier/common/exceptions.py @@ -25,7 +25,6 @@ class FlowClassifierDriverError(exceptions.NeutronException): class FlowClassifierException(exceptions.NeutronException): """Base for flow classifier driver exceptions returned to user.""" - pass class FlowClassifierBadRequest(exceptions.BadRequest, FlowClassifierException): diff --git a/networking_sfc/services/flowclassifier/driver_manager.py b/networking_sfc/services/flowclassifier/driver_manager.py index 421d2c6..b773a65 100644 --- a/networking_sfc/services/flowclassifier/driver_manager.py +++ b/networking_sfc/services/flowclassifier/driver_manager.py @@ -111,10 +111,9 @@ class FlowClassifierDriverManager(NamedExtensionManager): ) if raise_orig_exc: raise - else: - raise fc_exc.FlowClassifierDriverError( - method=method_name - ) + raise fc_exc.FlowClassifierDriverError( + method=method_name + ) def create_flow_classifier_precommit(self, context): """Driver precommit before the db transaction committed.""" diff --git a/networking_sfc/services/flowclassifier/drivers/base.py b/networking_sfc/services/flowclassifier/drivers/base.py index 0f28afe..cc24ac0 100644 --- a/networking_sfc/services/flowclassifier/drivers/base.py +++ b/networking_sfc/services/flowclassifier/drivers/base.py @@ -18,7 +18,7 @@ import six @six.add_metaclass(abc.ABCMeta) -class FlowClassifierDriverBaseLegacy(object): +class FlowClassifierDriverBaseLegacy(): """Flow Classifier Driver Base Class for legacy driver interface""" @abc.abstractmethod diff --git a/networking_sfc/services/sfc/agent/extensions/sfc.py b/networking_sfc/services/sfc/agent/extensions/sfc.py index a945c3d..e6fd339 100644 --- a/networking_sfc/services/sfc/agent/extensions/sfc.py +++ b/networking_sfc/services/sfc/agent/extensions/sfc.py @@ -29,7 +29,7 @@ from networking_sfc.services.sfc.drivers.ovs import rpc_topics as sfc_topics LOG = logging.getLogger(__name__) -class SfcPluginApi(object): +class SfcPluginApi(): def __init__(self, topic, host): self.host = host self.target = oslo_messaging.Target(topic=topic, version='1.0') @@ -49,7 +49,7 @@ class SfcPluginApi(object): @six.add_metaclass(abc.ABCMeta) -class SfcAgentDriver(object): +class SfcAgentDriver(): """Defines stable abstract interface for SFC Agent Driver.""" @abc.abstractmethod diff --git a/networking_sfc/services/sfc/common/context.py b/networking_sfc/services/sfc/common/context.py index 2b36aeb..d999127 100644 --- a/networking_sfc/services/sfc/common/context.py +++ b/networking_sfc/services/sfc/common/context.py @@ -13,7 +13,7 @@ # under the License. -class SfcPluginContext(object): +class SfcPluginContext(): """SFC context base class.""" def __init__(self, plugin, plugin_context): self._plugin = plugin diff --git a/networking_sfc/services/sfc/common/exceptions.py b/networking_sfc/services/sfc/common/exceptions.py index febd6fb..23c011a 100644 --- a/networking_sfc/services/sfc/common/exceptions.py +++ b/networking_sfc/services/sfc/common/exceptions.py @@ -25,7 +25,6 @@ class SfcDriverError(exceptions.NeutronException): class SfcException(exceptions.NeutronException): """Base for SFC driver exceptions returned to user.""" - pass class SfcBadRequest(exceptions.BadRequest, SfcException): diff --git a/networking_sfc/services/sfc/common/ovs_ext_lib.py b/networking_sfc/services/sfc/common/ovs_ext_lib.py index 89c0d51..4b8d05f 100644 --- a/networking_sfc/services/sfc/common/ovs_ext_lib.py +++ b/networking_sfc/services/sfc/common/ovs_ext_lib.py @@ -45,7 +45,7 @@ def get_port_mask(min_port, max_port): return masks -class SfcOVSBridgeExt(object): +class SfcOVSBridgeExt(): def __init__(self, ovs_bridge): self.bridge = ovs_bridge @@ -60,7 +60,7 @@ class SfcOVSBridgeExt(object): def do_action_groups(self, action, kwargs_list): group_strs = [_build_group_expr_str(kw, action) for kw in kwargs_list] - if action == 'add' or action == 'del': + if action in ('add', 'del'): cmd = '%s-groups' % action elif action == 'mod': cmd = '%s-group' % action diff --git a/networking_sfc/services/sfc/driver_manager.py b/networking_sfc/services/sfc/driver_manager.py index 9b00e58..1d27e78 100644 --- a/networking_sfc/services/sfc/driver_manager.py +++ b/networking_sfc/services/sfc/driver_manager.py @@ -108,10 +108,9 @@ class SfcDriverManager(NamedExtensionManager): ) if raise_orig_exc: raise - else: - raise sfc_exc.SfcDriverError( - method=method_name - ) + raise sfc_exc.SfcDriverError( + method=method_name + ) def create_port_chain_precommit(self, context): self._call_drivers("create_port_chain_precommit", context, diff --git a/networking_sfc/services/sfc/drivers/base.py b/networking_sfc/services/sfc/drivers/base.py index 9cf275d..09d7407 100644 --- a/networking_sfc/services/sfc/drivers/base.py +++ b/networking_sfc/services/sfc/drivers/base.py @@ -19,7 +19,7 @@ import six @six.add_metaclass(abc.ABCMeta) -class SfcDriverBaseLegacy(object): +class SfcDriverBaseLegacy(): """SFC Driver Base Class for legacy interface.""" @abc.abstractmethod diff --git a/networking_sfc/services/sfc/drivers/ovs/db.py b/networking_sfc/services/sfc/drivers/ovs/db.py index 057fa8c..a87abce 100644 --- a/networking_sfc/services/sfc/drivers/ovs/db.py +++ b/networking_sfc/services/sfc/drivers/ovs/db.py @@ -124,7 +124,7 @@ class PathNode(model_base.BASEV2, model_base.HasId, model_base.HasProject): sa.ForeignKey('sfc_path_nodes.id', ondelete='SET NULL')) -class OVSSfcDriverDB(object): +class OVSSfcDriverDB(): def initialize(self): self.admin_context = n_context.get_admin_context() diff --git a/networking_sfc/services/sfc/drivers/ovs/driver.py b/networking_sfc/services/sfc/drivers/ovs/driver.py index 0ea739a..a3e617c 100644 --- a/networking_sfc/services/sfc/drivers/ovs/driver.py +++ b/networking_sfc/services/sfc/drivers/ovs/driver.py @@ -1368,7 +1368,6 @@ class OVSSfcDriver(driver_base.SfcDriverBase, DBConnectionError: (pymysql.err.OperationalError) (2014, 'Command Out of Sync')) """ - pass # try: # flowrule_status = dict(status=status) # self.update_path_node(id, flowrule_status) diff --git a/networking_sfc/services/sfc/drivers/ovs/rpc.py b/networking_sfc/services/sfc/drivers/ovs/rpc.py index e42290b..123b9a0 100644 --- a/networking_sfc/services/sfc/drivers/ovs/rpc.py +++ b/networking_sfc/services/sfc/drivers/ovs/rpc.py @@ -23,7 +23,7 @@ from networking_sfc.services.sfc.drivers.ovs import rpc_topics as sfc_topics LOG = logging.getLogger(__name__) -class SfcRpcCallback(object): +class SfcRpcCallback(): """Sfc RPC server.""" def __init__(self, driver): @@ -48,7 +48,7 @@ class SfcRpcCallback(object): flowrule_dict['status']) -class SfcAgentRpcClient(object): +class SfcAgentRpcClient(): """RPC client for ovs sfc agent.""" def __init__(self, topic=sfc_topics.SFC_AGENT): diff --git a/requirements.txt b/requirements.txt index 3545539..9807606 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,20 +3,20 @@ # process, which may cause wedges in the gate later. pbr!=2.1.0,>=2.0.0 # Apache-2.0 -eventlet!=0.18.3,!=0.20.1,>=0.18.2 # MIT +eventlet>=0.25.1 # MIT netaddr>=0.7.18 # BSD python-neutronclient>=6.7.0 # Apache-2.0 SQLAlchemy>=1.2.0 # MIT alembic>=0.8.10 # MIT -six>=1.10.0 # MIT +six>=1.11.0 # MIT stevedore>=1.20.0 # Apache-2.0 oslo.config>=5.2.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 oslo.log>=3.36.0 # Apache-2.0 -oslo.messaging>=5.29.0 # Apache-2.0 +oslo.messaging>=12.4.0 # Apache-2.0 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 oslo.utils>=3.33.0 # Apache-2.0 -neutron-lib>=1.18.0 # Apache-2.0 +neutron-lib>=2.2.0 # Apache-2.0 neutron>=13.0.0.0b2 # Apache-2.0 # The comment below indicates this project repo is current with neutron-lib diff --git a/test-requirements.txt b/test-requirements.txt index 47d84f4..81d2011 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,10 +1,11 @@ # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. -hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 +hacking>=3.2.0,<3.3.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD flake8-import-order==0.12 # LGPLv3 +pylint==2.4.4 # GPLv2 mock>=2.0.0 # BSD requests-mock>=1.2.0 # Apache-2.0 testresources>=2.0.0 # Apache-2.0/BSD @@ -14,8 +15,6 @@ WebOb>=1.7.1 # MIT WebTest>=2.0.27 # MIT oslotest>=3.2.0 # Apache-2.0 stestr>=2.0.0 # Apache-2.0 -astroid==1.6.5 # LGPLv2.1 -pylint==1.9.2 # GPLv2 isort==4.3.21 # MIT -psycopg2>=2.7.7 # LGPL/ZPL +psycopg2>=2.8 # LGPL/ZPL PyMySQL>=0.7.6 # MIT License diff --git a/tox.ini b/tox.ini index 337af77..b479945 100644 --- a/tox.ini +++ b/tox.ini @@ -83,7 +83,8 @@ commands = [flake8] # TODO(dougwig) -- uncomment this to test for remaining linkages # N530 direct neutron imports not allowed -ignore = N530 +# W504 line break after binary operator +ignore = N530,W504 # From neutron-lib flake8 # H904: Delay string interpolations at logging calls enable-extensions=H904