Bug 1344833 - Enable flake8 rule E128: "continuation line under-indented for visual indent". r=Dexter
authorFederico Padua <federico_padua@yahoo.it>
Sat, 18 Mar 2017 21:18:27 +0100
changeset 348439 0c20d53a06dc3f19fb1904e8f2f839d61c1126a6
parent 348438 28dfd89c8e5d34ba11c8ea3b131c623e3f6cf2f4
child 348440 d9839e96fe102c376c350a81fb15ebb5b7bfc5c1
push id39167
push useralessio.placitelli@gmail.com
push dateMon, 20 Mar 2017 17:00:42 +0000
treeherderautoland@0c20d53a06dc [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersDexter
bugs1344833
milestone55.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1344833 - Enable flake8 rule E128: "continuation line under-indented for visual indent". r=Dexter This patch enables flake8 rule E128: "continuation line under-indented for visual indent" in toolkit/components/telemetry by removing the relative E128 entry from toolkit/components/telemetry/.flake8 and fixing the files for which the E128 error was reported. Precisely 10 errors violating E128 rule were found and solved. MozReview-Commit-ID: 4sNXIxGFJQN
toolkit/components/telemetry/.flake8
toolkit/components/telemetry/gen-histogram-data.py
toolkit/components/telemetry/histogram_tools.py
toolkit/components/telemetry/parse_scalars.py
--- a/toolkit/components/telemetry/.flake8
+++ b/toolkit/components/telemetry/.flake8
@@ -1,5 +1,5 @@
 [flake8]
 # See http://pep8.readthedocs.io/en/latest/intro.html#configuration
-ignore = E121, E123, E126, E129, E133, E226, E241, E242, E704, W503, E402, E128, E501, E202, W602, E127, W601
+ignore = E121, E123, E126, E129, E133, E226, E241, E242, E704, W503, E402, E501, E202, W602, E127, W601
 max-line-length = 99
 filename = *.py, +.lint
--- a/toolkit/components/telemetry/gen-histogram-data.py
+++ b/toolkit/components/telemetry/gen-histogram-data.py
@@ -16,26 +16,26 @@ banner = """/* This file is auto-generat
 """
 
 
 def print_array_entry(output, histogram, name_index, exp_index, label_index, label_count):
     cpp_guard = histogram.cpp_guard()
     if cpp_guard:
         print("#if defined(%s)" % cpp_guard, file=output)
     print("  { %s, %s, %s, %s, %d, %d, %s, %d, %d, %s },"
-        % (histogram.low(),
-           histogram.high(),
-           histogram.n_buckets(),
-           histogram.nsITelemetry_kind(),
-           name_index,
-           exp_index,
-           histogram.dataset(),
-           label_index,
-           label_count,
-           "true" if histogram.keyed() else "false"), file=output)
+          % (histogram.low(),
+             histogram.high(),
+             histogram.n_buckets(),
+             histogram.nsITelemetry_kind(),
+             name_index,
+             exp_index,
+             histogram.dataset(),
+             label_index,
+             label_count,
+             "true" if histogram.keyed() else "false"), file=output)
     if cpp_guard:
         print("#endif", file=output)
 
 
 def write_histogram_table(output, histograms):
     string_table = StringTable()
     label_table = []
     label_count = 0
--- a/toolkit/components/telemetry/histogram_tools.py
+++ b/toolkit/components/telemetry/histogram_tools.py
@@ -366,40 +366,40 @@ associated with the histogram.  Returns 
                 return "string"
             return t.__name__
 
         for key, key_type in type_checked_fields.iteritems():
             if key not in definition:
                 continue
             if not isinstance(definition[key], key_type):
                 raise ValueError, ('value for key "{0}" in Histogram "{1}" '
-                        'should be {2}').format(key, name, nice_type_name(key_type))
+                                   'should be {2}').format(key, name, nice_type_name(key_type))
 
         for key, key_type in type_checked_list_fields.iteritems():
             if key not in definition:
                 continue
             if not all(isinstance(x, key_type) for x in definition[key]):
                 raise ValueError, ('all values for list "{0}" in Histogram "{1}" '
-                        'should be {2}').format(key, name, nice_type_name(key_type))
+                                   'should be {2}').format(key, name, nice_type_name(key_type))
 
     @staticmethod
     def check_keys(name, definition, allowed_keys):
         for key in definition.iterkeys():
             if key not in allowed_keys:
                 raise KeyError, '%s not permitted for %s' % (key, name)
 
     def set_bucket_parameters(self, low, high, n_buckets):
         self._low = low
         self._high = high
         self._n_buckets = n_buckets
         if whitelists is not None and self._n_buckets > 100 and type(self._n_buckets) is int:
             if self._name not in whitelists['n_buckets']:
                 raise KeyError, ('New histogram "%s" is not permitted to have more than 100 buckets. '
-                                'Histograms with large numbers of buckets use disproportionately high amounts of resources. '
-                                'Contact the Telemetry team (e.g. in #telemetry) if you think an exception ought to be made.' % self._name)
+                                 'Histograms with large numbers of buckets use disproportionately high amounts of resources. '
+                                 'Contact the Telemetry team (e.g. in #telemetry) if you think an exception ought to be made.' % self._name)
 
     @staticmethod
     def boolean_flag_bucket_parameters(definition):
         return (1, 2, 3)
 
     @staticmethod
     def linear_bucket_parameters(definition):
         return (definition.get('low', 1),
--- a/toolkit/components/telemetry/parse_scalars.py
+++ b/toolkit/components/telemetry/parse_scalars.py
@@ -44,29 +44,29 @@ class ScalarType:
                 conform our name specification.
         """
 
         # Enforce a maximum length on group and probe names.
         MAX_NAME_LENGTH = 40
         for n in [group_name, probe_name]:
             if len(n) > MAX_NAME_LENGTH:
                 raise ValueError("Name '{}' exceeds maximum name length of {} characters."
-                                .format(n, MAX_NAME_LENGTH))
+                                 .format(n, MAX_NAME_LENGTH))
 
         def check_name(name, error_msg_prefix, allowed_char_regexp):
             # Check if we only have the allowed characters.
             chars_regxp = r'^[a-zA-Z0-9' + allowed_char_regexp + r']+$'
             if not re.search(chars_regxp, name):
                 raise ValueError(error_msg_prefix + " name must be alpha-numeric. Got: '{}'".format(name))
 
             # Don't allow leading/trailing digits, '.' or '_'.
             if re.search(r'(^[\d\._])|([\d\._])$', name):
                 raise ValueError(error_msg_prefix +
-                    " name must not have a leading/trailing digit, a dot or underscore. Got: '{}'"
-                    .format(name))
+                                 " name must not have a leading/trailing digit, a dot or underscore. Got: '{}'"
+                                 .format(name))
 
         check_name(group_name, 'Group', r'\.')
         check_name(probe_name, 'Probe', r'_')
 
     def validate_types(self, definition):
         """This function performs some basic sanity checks on the scalar definition:
             - Checks that all the required fields are available.
             - Checks that all the fields have the expected types.
@@ -110,17 +110,17 @@ class ScalarType:
 
         # Do we have any unknown field?
         unknown_fields = [f for f in definition.keys() if f not in ALL_FIELDS]
         if len(unknown_fields) > 0:
             raise KeyError(self._name + ' - unknown fields: ' + ', '.join(unknown_fields))
 
         # Checks the type for all the fields.
         wrong_type_names = ['{} must be {}'.format(f, ALL_FIELDS[f].__name__)
-            for f in definition.keys() if not isinstance(definition[f], ALL_FIELDS[f])]
+                            for f in definition.keys() if not isinstance(definition[f], ALL_FIELDS[f])]
         if len(wrong_type_names) > 0:
             raise TypeError(self._name + ' - ' + ', '.join(wrong_type_names))
 
         # Check that the lists are not empty and that data in the lists
         # have the correct types.
         list_fields = [f for f in definition if isinstance(definition[f], list)]
         for field in list_fields:
             # Check for empty lists.
@@ -232,17 +232,17 @@ class ScalarType:
         """Get the nsITelemetry constant equivalent to the chose release channel collection
         policy for the scalar.
         """
         # The collection policy is optional, but we still define a default
         # behaviour for it.
         release_channel_collection = \
             self._definition.get('release_channel_collection', 'opt-in')
         return 'nsITelemetry::' + ('DATASET_RELEASE_CHANNEL_OPTOUT'
-            if release_channel_collection == 'opt-out' else 'DATASET_RELEASE_CHANNEL_OPTIN')
+                                   if release_channel_collection == 'opt-out' else 'DATASET_RELEASE_CHANNEL_OPTIN')
 
     @property
     def cpp_guard(self):
         """Get the cpp guard for this scalar"""
         return self._definition.get('cpp_guard')
 
 
 def load_scalars(filename):