Fix invalid string escape sequences.

pull/33174/head
Matt Clay 7 years ago
parent 6ac9d05de6
commit 9735a70059

@ -11,7 +11,7 @@ ANSIBLE_METADATA = {'metadata_version': '1.1',
'supported_by': 'community'}
DOCUMENTATION = '''
DOCUMENTATION = r'''
---
module: ec2_ami_find
version_added: '2.0'

@ -11,7 +11,7 @@ ANSIBLE_METADATA = {'metadata_version': '1.1',
'supported_by': 'community'}
DOCUMENTATION = '''
DOCUMENTATION = r'''
---
module: cloudwatchevent_rule
short_description: Manage CloudWatch Event rules and targets

@ -368,7 +368,7 @@ def get_target_from_rule(module, client, rule, name, group, groups, vpc_id):
group_id or a non-None ip range.
"""
FOREIGN_SECURITY_GROUP_REGEX = '^(\S+)/(sg-\S+)/(\S+)'
FOREIGN_SECURITY_GROUP_REGEX = r'^(\S+)/(sg-\S+)/(\S+)'
group_id = None
group_name = None
ip = None

@ -169,9 +169,9 @@ class AnsibleSubnetSearchException(AnsibleRouteTableException):
pass
CIDR_RE = re.compile('^(\d{1,3}\.){3}\d{1,3}\/\d{1,2}$')
SUBNET_RE = re.compile('^subnet-[A-z0-9]+$')
ROUTE_TABLE_RE = re.compile('^rtb-[A-z0-9]+$')
CIDR_RE = re.compile(r'^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$')
SUBNET_RE = re.compile(r'^subnet-[A-z0-9]+$')
ROUTE_TABLE_RE = re.compile(r'^rtb-[A-z0-9]+$')
def find_subnets(vpc_conn, vpc_id, identified_subnets):

@ -238,7 +238,7 @@ def get_account_id(module, region=None, endpoint=None, **aws_connect_kwargs):
except ClientError as e:
if (e.response['Error']['Code'] == 'AccessDenied'):
except_msg = to_native(e.message)
account_id = except_msg.search("arn:aws:iam::([0-9]{12,32}):\w+/").group(1)
account_id = except_msg.search(r"arn:aws:iam::([0-9]{12,32}):\w+/").group(1)
if account_id is None:
module.fail_json_aws(e, msg="getting account information")
except Exception as e:

@ -235,7 +235,7 @@ def validate_params(module, aws):
function_name = module.params['function_name']
# validate function name
if not re.search('^[\w\-:]+$', function_name):
if not re.search(r'^[\w\-:]+$', function_name):
module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
)

@ -222,7 +222,7 @@ def validate_params(module, aws):
function_name = module.params['lambda_function_arn']
# validate function name
if not re.search('^[\w\-:]+$', function_name):
if not re.search(r'^[\w\-:]+$', function_name):
module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
)

@ -361,7 +361,7 @@ def main():
# validate function_name if present
function_name = module.params['function_name']
if function_name:
if not re.search("^[\w\-:]+$", function_name):
if not re.search(r"^[\w\-:]+$", function_name):
module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
)

@ -188,7 +188,7 @@ def validate_params(module):
# validate function name
if function_name.startswith('arn:'):
if not re.search('^[\w\-]+$', function_name):
if not re.search(r'^[\w\-]+$', function_name):
module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(
function_name)
@ -197,7 +197,7 @@ def validate_params(module):
module.fail_json(
msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
else:
if not re.search('^[\w\-:]+$', function_name):
if not re.search(r'^[\w\-:]+$', function_name):
module.fail_json(
msg='ARN {0} is invalid. ARNs must contain only alphanumeric characters, hyphens and colons.'.format(function_name)
)

@ -613,8 +613,8 @@ AZURE_ENUM_MODULES = ['azure.mgmt.compute.models']
def extract_names_from_blob_uri(blob_uri, storage_suffix):
# HACK: ditch this once python SDK supports get by URI
m = re.match('^https://(?P<accountname>[^\.]+)\.blob\.{0}/'
'(?P<containername>[^/]+)/(?P<blobname>.+)$'.format(storage_suffix), blob_uri)
m = re.match(r'^https://(?P<accountname>[^.]+)\.blob\.{0}/'
r'(?P<containername>[^/]+)/(?P<blobname>.+)$'.format(storage_suffix), blob_uri)
if not m:
raise Exception("unable to parse blob uri '%s'" % blob_uri)
extracted_names = m.groupdict()

@ -13,7 +13,7 @@ ANSIBLE_METADATA = {'metadata_version': '1.1',
'supported_by': 'community'}
DOCUMENTATION = '''
DOCUMENTATION = r'''
---
module: proxmox_kvm
short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster.

@ -271,8 +271,8 @@ except ImportError:
from ansible.module_utils.basic import AnsibleModule
NAME_RE = '({0}|{0}{1}*{0})'.format('[a-zA-Z0-9]', '[a-zA-Z0-9\-]')
HOSTNAME_RE = '({0}\.)*{0}$'.format(NAME_RE)
NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]')
HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE)
MAX_DEVICES = 100
PACKET_DEVICE_STATES = (
@ -403,7 +403,7 @@ def get_hostname_list(module):
if (len(hostnames) == 1) and (count > 0):
hostname_spec = hostnames[0]
count_range = range(count_offset, count_offset + count)
if re.search("%\d{0,2}d", hostname_spec):
if re.search(r"%\d{0,2}d", hostname_spec):
hostnames = [hostname_spec % i for i in count_range]
elif count > 1:
hostname_spec = '%s%%02d' % hostname_spec

@ -215,7 +215,7 @@ LOCATIONS = ['us/las',
'de/fkb']
uuid_match = re.compile(
'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
def _wait_for_completion(profitbricks, promise, wait_timeout, msg):

@ -95,7 +95,7 @@ LOCATIONS = ['us/las',
'de/fkb']
uuid_match = re.compile(
'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
def _wait_for_completion(profitbricks, promise, wait_timeout, msg):

@ -98,7 +98,7 @@ from ansible.module_utils.basic import AnsibleModule
uuid_match = re.compile(
'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
def _wait_for_completion(profitbricks, promise, wait_timeout, msg):

@ -147,7 +147,7 @@ from ansible.module_utils._text import to_native
uuid_match = re.compile(
'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
def _wait_for_completion(profitbricks, promise, wait_timeout, msg):

@ -95,7 +95,7 @@ from ansible.module_utils.basic import AnsibleModule
uuid_match = re.compile(
'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
def _wait_for_completion(profitbricks, promise, wait_timeout, msg):

@ -145,7 +145,7 @@ class Imgadm(object):
# Helper method to massage stderr
def errmsg(self, stderr):
match = re.match('^imgadm .*?: error \(\w+\): (.*): .*', stderr)
match = re.match(r'^imgadm .*?: error \(\w+\): (.*): .*', stderr)
if match:
return match.groups()[0]
else:
@ -236,7 +236,7 @@ class Imgadm(object):
if rc != 0:
self.module.fail_json(msg='Failed to import image: {0}'.format(self.errmsg(stderr)))
regex = 'Image {0} \(.*\) is already installed, skipping'.format(self.uuid)
regex = r'Image {0} \(.*\) is already installed, skipping'.format(self.uuid)
if re.match(regex, stdout):
self.changed = False

@ -181,7 +181,7 @@ def alter_retention_policy(module, client, retention_policy):
duration = module.params['duration']
replication = module.params['replication']
default = module.params['default']
duration_regexp = re.compile('(\d+)([hdw]{1})|(^INF$){1}')
duration_regexp = re.compile(r'(\d+)([hdw]{1})|(^INF$){1}')
changed = False
duration_lookup = duration_regexp.search(duration)

@ -126,7 +126,7 @@ from ansible.module_utils._text import to_native
def db_exists(cursor, db):
res = cursor.execute("SHOW DATABASES LIKE %s", (db.replace("_", "\_"),))
res = cursor.execute("SHOW DATABASES LIKE %s", (db.replace("_", r"\_"),))
return bool(res)

@ -503,7 +503,7 @@ def get_database_privileges(cursor, user, db):
datacl = cursor.fetchone()[0]
if datacl is None:
return set()
r = re.search('%s\\\\?\"?=(C?T?c?)/[^,]+\,?' % user, datacl)
r = re.search(r'%s\\?"?=(C?T?c?)/[^,]+,?' % user, datacl)
if r is None:
return set()
o = set()

@ -287,7 +287,7 @@ def main():
age = None
else:
# convert age to seconds:
m = re.match("^(-?\d+)(s|m|h|d|w)?$", params['age'].lower())
m = re.match(r"^(-?\d+)(s|m|h|d|w)?$", params['age'].lower())
seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
if m:
age = int(m.group(1)) * seconds_per_unit.get(m.group(2), 1)
@ -298,7 +298,7 @@ def main():
size = None
else:
# convert size to bytes:
m = re.match("^(-?\d+)(b|k|m|g|t)?$", params['size'].lower())
m = re.match(r"^(-?\d+)(b|k|m|g|t)?$", params['size'].lower())
bytes_per_unit = {"b": 1, "k": 1024, "m": 1024**2, "g": 1024**3, "t": 1024**4}
if m:
size = int(m.group(1)) * bytes_per_unit.get(m.group(2), 1)

@ -179,7 +179,7 @@ def main():
module.fail_json(msg="%s needs a key parameter" % state)
# All xattr must begin in user namespace
if key is not None and not re.match('^user\.', key):
if key is not None and not re.match(r'^user\.', key):
key = 'user.%s' % key
if (state == 'present' or value is not None):

@ -274,7 +274,7 @@ from ansible.module_utils.basic import AnsibleModule, json_dict_bytes_to_unicode
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils._text import to_bytes, to_native
_IDENT = "[a-zA-Z-][a-zA-Z0-9_\-\.]*"
_IDENT = r"[a-zA-Z-][a-zA-Z0-9_\-\.]*"
_NSIDENT = _IDENT + "|" + _IDENT + ":" + _IDENT
# Note: we can't reasonably support the 'if you need to put both ' and " in a string, concatenate
# strings wrapped by the other delimiter' XPath trick, especially as simple XPath.

@ -12,7 +12,7 @@ ANSIBLE_METADATA = {'metadata_version': '1.1',
'supported_by': 'core'}
DOCUMENTATION = '''
DOCUMENTATION = r'''
---
module: add_host
short_description: add a host (and alternatively a group) to the ansible-playbook in-memory inventory

@ -75,7 +75,7 @@ def main():
def monit_version():
rc, out, err = module.run_command('%s -V' % MONIT, check_rc=True)
version_line = out.split('\n')[0]
version = re.search("[0-9]+\.[0-9]+", version_line).group().split('.')
version = re.search(r"[0-9]+\.[0-9]+", version_line).group().split('.')
# Use only major and minor even if there are more these should be enough
return int(version[0]), int(version[1])

@ -206,7 +206,7 @@ def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, k
motd += irc.recv(1024)
# The server might send back a shorter nick than we specified (due to NICKLEN),
# so grab that and use it from now on (assuming we find the 00[1-4] response).
match = re.search('^:\S+ 00[1-4] (?P<nick>\S+) :', motd, flags=re.M)
match = re.search(r'^:\S+ 00[1-4] (?P<nick>\S+) :', motd, flags=re.M)
if match:
nick = match.group('nick')
break
@ -223,7 +223,7 @@ def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, k
start = time.time()
while 1:
join += irc.recv(1024)
if re.search('^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M):
if re.search(r'^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M):
break
elif time.time() - start > timeout:
raise Exception('Timeout waiting for IRC JOIN response')

@ -137,7 +137,7 @@ from ansible.module_utils.basic import AnsibleModule
def parse_out(string):
return re.sub("\s+", " ", string).strip()
return re.sub(r"\s+", " ", string).strip()
def has_changed(string):

@ -220,7 +220,7 @@ class Npm(object):
if dep:
# node.js v0.10.22 changed the `npm outdated` module separator
# from "@" to " ". Split on both for backwards compatibility.
pkg, other = re.split('\s|@', dep, 1)
pkg, other = re.split(r'\s|@', dep, 1)
outdated.append(pkg)
return outdated

@ -150,7 +150,7 @@ from ansible.module_utils.basic import AnsibleModule
def parse_for_packages(stdout):
packages = []
data = stdout.split('\n')
regex = re.compile('^\(\d+/\d+\)\s+\S+\s+(\S+)')
regex = re.compile(r'^\(\d+/\d+\)\s+\S+\s+(\S+)')
for l in data:
p = regex.search(l)
if p:

@ -208,8 +208,8 @@ class SourcesList(object):
return s
# Drop options and protocols.
line = re.sub('\[[^\]]+\]', '', line)
line = re.sub('\w+://', '', line)
line = re.sub(r'\[[^\]]+\]', '', line)
line = re.sub(r'\w+://', '', line)
# split line into valid keywords
parts = [part for part in line.split() if part not in VALID_SOURCE_TYPES]

@ -235,7 +235,7 @@ def package_present(names, pkg_spec, module):
# "file:/local/package/directory/ is empty" message on stderr
# while still installing the package, so we need to look for
# for a message like "packagename-1.0: ok" just in case.
match = re.search("\W%s-[^:]+: ok\W" % pkg_spec[name]['stem'], pkg_spec[name]['stdout'])
match = re.search(r"\W%s-[^:]+: ok\W" % pkg_spec[name]['stem'], pkg_spec[name]['stdout'])
if match:
# It turns out we were able to install the package.
@ -286,7 +286,7 @@ def package_latest(names, pkg_spec, module):
pkg_spec[name]['changed'] = False
for installed_name in pkg_spec[name]['installed_names']:
module.debug("package_latest(): checking for pre-upgrade package name: %s" % installed_name)
match = re.search("\W%s->.+: ok\W" % installed_name, pkg_spec[name]['stdout'])
match = re.search(r"\W%s->.+: ok\W" % installed_name, pkg_spec[name]['stdout'])
if match:
module.debug("package_latest(): pre-upgrade package name match: %s" % installed_name)
@ -502,7 +502,7 @@ def upgrade_packages(pkg_spec, module):
# Try to find any occurrence of a package changing version like:
# "bzip2-1.0.6->1.0.6p0: ok".
match = re.search("\W\w.+->.+: ok\W", pkg_spec['*']['stdout'])
match = re.search(r"\W\w.+->.+: ok\W", pkg_spec['*']['stdout'])
if match:
pkg_spec['*']['changed'] = True

@ -194,7 +194,7 @@ def upgrade(module, pacman_path):
}
if rc == 0:
regex = re.compile('([\w-]+) ((?:\S+)-(?:\S+)) -> ((?:\S+)-(?:\S+))')
regex = re.compile(r'([\w-]+) ((?:\S+)-(?:\S+)) -> ((?:\S+)-(?:\S+))')
for p in data:
m = regex.search(p)
packages.append(m.group(1))
@ -419,11 +419,11 @@ def main():
for i, pkg in enumerate(pkgs):
if not pkg: # avoid empty strings
continue
elif re.match(".*\.pkg\.tar(\.(gz|bz2|xz|lrz|lzo|Z))?$", pkg):
elif re.match(r".*\.pkg\.tar(\.(gz|bz2|xz|lrz|lzo|Z))?$", pkg):
# The package given is a filename, extract the raw pkg name from
# it and store the filename
pkg_files.append(pkg)
pkgs[i] = re.sub('-[0-9].*$', '', pkgs[i].split('/')[-1])
pkgs[i] = re.sub(r'-[0-9].*$', '', pkgs[i].split('/')[-1])
else:
pkg_files.append(None)

@ -95,8 +95,8 @@ def main():
# Try to spot where this has happened and fix it.
for fragment in params['name']:
if (
re.search('^\d+(?:\.\d+)*', fragment)
and packages and re.search('@[^,]*$', packages[-1])
re.search(r'^\d+(?:\.\d+)*', fragment)
and packages and re.search(r'@[^,]*$', packages[-1])
):
packages[-1] += ',' + fragment
else:

@ -231,7 +231,7 @@ def codex_list(module):
if rc != 0:
module.fail_json(msg="unable to list grimoire collection, fix your Codex")
rex = re.compile("^\s*\[\d+\] : (?P<grim>[\w\-\+\.]+) : [\w\-\+\./]+(?: : (?P<ver>[\w\-\+\.]+))?\s*$")
rex = re.compile(r"^\s*\[\d+\] : (?P<grim>[\w\-+.]+) : [\w\-+./]+(?: : (?P<ver>[\w\-+.]+))?\s*$")
# drop 4-line header and empty trailing line
for line in stdout.splitlines()[4:-1]:

@ -102,7 +102,7 @@ def query_package(module, name, depot=None):
else:
rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True)
if rc == 0:
version = re.sub("\s\s+|\t" , " ", stdout).strip().split()[1]
version = re.sub(r"\s\s+|\t", " ", stdout).strip().split()[1]
else:
version = None

@ -231,7 +231,7 @@ def main():
for cpu, details in memory_details_summary.items():
cpu_total_memory_size = details.get('total_memory_size')
if cpu_total_memory_size:
ram = re.search('(\d+)\s+(\w+)', cpu_total_memory_size)
ram = re.search(r'(\d+)\s+(\w+)', cpu_total_memory_size)
if ram:
if ram.group(2) == 'GB':
facts['hw_memory_total'] = facts['hw_memory_total'] + int(ram.group(1))

@ -144,20 +144,20 @@ def parse_lv(data):
name = None
for line in data.splitlines():
match = re.search("LOGICAL VOLUME:\s+(\w+)\s+VOLUME GROUP:\s+(\w+)", line)
match = re.search(r"LOGICAL VOLUME:\s+(\w+)\s+VOLUME GROUP:\s+(\w+)", line)
if match is not None:
name = match.group(1)
vg = match.group(2)
continue
match = re.search("LPs:\s+(\d+).*PPs", line)
match = re.search(r"LPs:\s+(\d+).*PPs", line)
if match is not None:
lps = int(match.group(1))
continue
match = re.search("PP SIZE:\s+(\d+)", line)
match = re.search(r"PP SIZE:\s+(\d+)", line)
if match is not None:
pp_size = int(match.group(1))
continue
match = re.search("INTER-POLICY:\s+(\w+)", line)
match = re.search(r"INTER-POLICY:\s+(\w+)", line)
if match is not None:
policy = match.group(1)
continue
@ -174,22 +174,22 @@ def parse_vg(data):
for line in data.splitlines():
match = re.search("VOLUME GROUP:\s+(\w+)", line)
match = re.search(r"VOLUME GROUP:\s+(\w+)", line)
if match is not None:
name = match.group(1)
continue
match = re.search("TOTAL PP.*\((\d+)", line)
match = re.search(r"TOTAL PP.*\((\d+)", line)
if match is not None:
size = int(match.group(1))
continue
match = re.search("PP SIZE:\s+(\d+)", line)
match = re.search(r"PP SIZE:\s+(\d+)", line)
if match is not None:
pp_size = int(match.group(1))
continue
match = re.search("FREE PP.*\((\d+)", line)
match = re.search(r"FREE PP.*\((\d+)", line)
if match is not None:
free = int(match.group(1))
continue

@ -404,7 +404,7 @@ def parsekey(module, raw_key, rank=None):
type_index = None # index of keytype in key string|list
# remove comment yaml escapes
raw_key = raw_key.replace('\#', '#')
raw_key = raw_key.replace(r'\#', '#')
# split key safely
lex = shlex.shlex(raw_key)

@ -74,7 +74,7 @@ def activate(module):
def is_policy_enabled(module, name):
cmd = "%s list" % (AWALL_PATH)
rc, stdout, stderr = module.run_command(cmd)
if re.search("^%s\s+enabled" % name, stdout, re.MULTILINE):
if re.search(r"^%s\s+enabled" % name, stdout, re.MULTILINE):
return True
return False

@ -261,7 +261,7 @@ def get_quotas(name, nofail):
out = run_gluster(['volume', 'quota', name, 'list'])
for row in out.split('\n'):
if row[:1] == '/':
q = re.split('\s+', row)
q = re.split(r'\s+', row)
quotas[q[0]] = q[1]
return quotas

@ -161,14 +161,14 @@ def optionDict(line, iface, option, value):
def getValueFromLine(s):
spaceRe = re.compile('\s+')
spaceRe = re.compile(r'\s+')
for m in spaceRe.finditer(s):
pass
valueEnd = m.start()
option = s.split()[0]
optionStart = s.find(option)
optionLen = len(option)
valueStart = re.search('\s', s[optionLen + optionStart:]).end() + optionLen + optionStart
valueStart = re.search(r'\s', s[optionLen + optionStart:]).end() + optionLen + optionStart
return s[valueStart:valueEnd]
@ -286,7 +286,7 @@ def setInterfaceOption(module, lines, iface, option, raw_value, state):
old_value = target_option['value']
prefix_start = old_line.find(option)
optionLen = len(option)
old_value_position = re.search("\s+".join(old_value.split()), old_line[prefix_start + optionLen:])
old_value_position = re.search(r"\s+".join(old_value.split()), old_line[prefix_start + optionLen:])
start = old_value_position.start() + prefix_start + optionLen
end = old_value_position.end() + prefix_start + optionLen
line = old_line[:start] + value + old_line[end:]

@ -66,7 +66,7 @@ class Blacklist(object):
return False
def get_pattern(self):
return '^blacklist\s*' + self.module + '$'
return r'^blacklist\s*' + self.module + '$'
def readlines(self):
f = open(self.filename, 'r')

@ -70,10 +70,10 @@ def is_available(name, ubuntuMode):
* if the locale is present in /etc/locales.gen
* or if the locale is present in /usr/share/i18n/SUPPORTED"""
if ubuntuMode:
__regexp = '^(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
__regexp = r'^(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
__locales_available = '/usr/share/i18n/SUPPORTED'
else:
__regexp = '^#{0,1}\s*(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
__regexp = r'^#{0,1}\s*(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
__locales_available = '/etc/locale.gen'
re_compiled = re.compile(__regexp)
@ -117,11 +117,11 @@ def replace_line(existing_line, new_line):
def set_locale(name, enabled=True):
""" Sets the state of the locale. Defaults to enabled. """
search_string = '#{0,1}\s*%s (?P<charset>.+)' % name
search_string = r'#{0,1}\s*%s (?P<charset>.+)' % name
if enabled:
new_string = '%s \g<charset>' % (name)
new_string = r'%s \g<charset>' % (name)
else:
new_string = '# %s \g<charset>' % (name)
new_string = r'# %s \g<charset>' % (name)
try:
f = open("/etc/locale.gen", "r")
lines = [re.sub(search_string, new_string, line) for line in f]

@ -226,7 +226,7 @@ def get_lvm_version(module):
rc, out, err = module.run_command("%s version" % (ver_cmd))
if rc != 0:
return None
m = re.search("LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out)
m = re.search(r"LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out)
if not m:
return None
return mkversion(m.group(1), m.group(2), m.group(3))

@ -169,11 +169,11 @@ class Sv(object):
else:
self.full_state = out
m = re.search('\(pid (\d+)\)', out)
m = re.search(r'\(pid (\d+)\)', out)
if m:
self.pid = m.group(1)
m = re.search(' (\d+)s', out)
m = re.search(r' (\d+)s', out)
if m:
self.duration = m.group(1)

@ -672,10 +672,10 @@ class LinuxService(Service):
initpath = '/etc/init'
if self.upstart_version >= LooseVersion('0.6.7'):
manreg = re.compile('^manual\s*$', re.M | re.I)
manreg = re.compile(r'^manual\s*$', re.M | re.I)
config_line = 'manual\n'
else:
manreg = re.compile('^start on manual\s*$', re.M | re.I)
manreg = re.compile(r'^start on manual\s*$', re.M | re.I)
config_line = 'start on manual\n'
conf_file_name = "%s/%s.conf" % (initpath, self.name)
override_file_name = "%s/%s.override" % (initpath, self.name)
@ -1308,7 +1308,7 @@ class SunOSService(Service):
# Support for synchronous restart/refresh is only supported on
# Oracle Solaris >= 11.2
for line in open('/etc/release', 'r').readlines():
m = re.match('\s+Oracle Solaris (\d+\.\d+).*', line.rstrip())
m = re.match(r'\s+Oracle Solaris (\d+\.\d+).*', line.rstrip())
if m and m.groups()[0] >= 11.2:
return True

@ -180,11 +180,11 @@ class Svc(object):
else:
self.full_state = out
m = re.search('\(pid (\d+)\)', out)
m = re.search(r'\(pid (\d+)\)', out)
if m:
self.pid = m.group(1)
m = re.search('(\d+) seconds', out)
m = re.search(r'(\d+) seconds', out)
if m:
self.duration = m.group(1)

@ -517,7 +517,7 @@ class SmartOSTimezone(Timezone):
# sm-set-timezone knows no state and will always set the timezone.
# XXX: https://github.com/joyent/smtools/pull/2
m = re.match('^\* Changed (to)? timezone (to)? (%s).*' % value, stdout.splitlines()[1])
m = re.match(r'^\* Changed (to)? timezone (to)? (%s).*' % value, stdout.splitlines()[1])
if not (m and m.groups()[-1] == value):
self.module.fail_json(msg='Failed to set timezone')
else:

Loading…
Cancel
Save