repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
etianen/moody-templates
|
src/moody/loader.py
|
1
|
5355
|
"""A caching template loader that allows disk-based templates to be used."""
import os, sys
from abc import ABCMeta, abstractmethod
class TemplateDoesNotExist(Exception):
"""A named template could not be found."""
class Source(metaclass=ABCMeta):
"""A source of template data."""
__slots__ = ()
@abstractmethod
def load_source(self, template_name):
"""
Loads the template source code for the template of the given name.
If no source code can be found, returns None.
"""
class MemorySource(Source):
"""A template loader that loads from memory."""
__slots__ = ("templates",)
def __init__(self, templates):
"""Initializes the MemorySource from a dict of template source strings."""
self.templates = templates
def load_source(self, template_name):
"""Loads the source from the memory template dict."""
return self.templates.get(template_name)
def __str__(self):
"""Returns a string representation."""
return "<memory>"
class DirectorySource(Source):
"""A template loader that loads from a directory on disk."""
__slots__ = ("dirname")
def __init__(self, dirname):
"""
Initializes the DirectorySource.
On windows, the dirname should be specified using forward-slashes.
"""
self.dirname = dirname
def load_source(self, template_name):
"""Loads the source from disk."""
template_path = os.path.normpath(os.path.join(self.dirname, template_name))
if os.path.exists(template_path):
with open(template_path, "r") as template_file:
return template_file.read()
return None
def __str__(self):
"""Returns a string representation."""
return self.dirname
class DebugLoader:
"""
A template loader that doesn't cache compiled templates.
Terrible performance, but great for debugging.
"""
__slots__ = ("_sources", "_parser",)
def __init__(self, sources, parser):
"""
Initializes the Loader.
When specifying template_dirs on Windows,the forward slash '/' should be used as a path separator.
"""
self._sources = list(reversed(sources))
self._parser = parser
def compile(self, template, name="__string__", params=None, meta=None):
"""Compiles the given template source."""
default_meta = {
"__loader__": self
}
default_meta.update(meta or {})
return self._parser.compile(template, name, params, default_meta)
def _load_all(self, template_name):
"""Loads and returns all the named templates from the sources."""
# Load from all the template sources.
templates = []
for source in self._sources:
template_src = source.load_source(template_name)
if template_src is not None:
meta = {
"__super__": templates and templates[-1] or None,
}
templates.append(self.compile(template_src, template_name, {}, meta))
return templates
def load(self, *template_names):
"""
Loads and returns the named template.
If more than one template name is given, then the first template that exists will be used.
On Windows, the forward slash '/' should be used as a path separator.
"""
if not template_names:
raise ValueError("You must specify at least one template name.")
for template_name in template_names:
templates = self._load_all(template_name)
if templates:
return templates[-1]
# Raise an error.
template_name_str = ", ".join(repr(template_name) for template_name in template_names)
source_name_str = ", ".join(str(source) for source in self._sources)
raise TemplateDoesNotExist("Could not find a template named {} in any of {}.".format(template_name_str, source_name_str))
def render(self, *template_names, **params):
"""
Loads and renders the named template.
If more than one template name is given, then the first template that exists will be used.
On Windows, the forward slash '/' should be used as a path separator.
"""
return self.load(*template_names).render(**params)
class Loader(DebugLoader):
"""
A template loader.
Compiled templates are cached for performance.
"""
__slots__ = ("_cache",)
def __init__(self, sources, parser):
"""Initializes the loader."""
super(Loader, self).__init__(sources, parser)
self._cache = {}
def clear_cache(self, ):
"""Clears the template cache."""
self._cache.clear()
def _load_all(self, template_name):
"""A caching version of the debug loader's load method."""
if template_name in self._cache:
return self._cache[template_name]
template = super(Loader, self)._load_all(template_name)
self._cache[template_name] = template
return template
|
bsd-3-clause
| -6,064,786,215,887,489,000
| 30.692308
| 129
| 0.580579
| false
| 4.743136
| false
| false
| false
|
ewjoachim/pyler
|
pyler/euler_test_base.py
|
1
|
2100
|
import signal
import unittest
import time
from . import website as w
class EulerProblem(unittest.TestCase):
problem_id = None
def solver(self, input_val):
raise NotImplementedError()
simple_input = None
simple_output = None
real_input = None
def solve_real(self):
"""
Returns the solution of the Problem for the real input
"""
return self.solver(self.real_input)
def solve_simple(self):
"""
Returns the solution of the Problem for the simple input
"""
return self.solver(self.simple_input)
@classmethod
def setUpClass(cls):
if cls.solver is EulerProblem.solver:
raise unittest.SkipTest(
"Not running the tests for a not implemented problem")
def test_simple(self):
"""
Checks the simple example
"""
self.assertEqual(self.solve_simple(), self.simple_output)
def test_real(self):
"""
Checks the real problem against the website
"""
website = w.Website()
real_output = self.solve_real()
self.assertTrue(w.check_solution(
website, self.problem_id, solution=real_output))
# Windows has no Alarm signal. Sorry pal.
use_signal = hasattr(signal, "SIGALRM")
def test_time(self):
"""
Checks that the real problem runs under a minute
"""
time_limit = 60
try:
if self.use_signal:
def handler(signum, frame): # pylint: disable=unused-argument
raise TimeoutError()
old_handler = signal.signal(signal.SIGALRM, handler)
signal.alarm(time_limit)
before = time.time()
self.solve_real()
after = time.time()
if after - before > time_limit:
raise TimeoutError()
except TimeoutError:
self.fail("Test failed to end in less than a minute.")
finally:
if self.use_signal:
signal.signal(signal.SIGALRM, old_handler)
|
mit
| 8,948,021,599,226,098,000
| 26.631579
| 78
| 0.57381
| false
| 4.487179
| true
| false
| false
|
almeidapaulopt/frappe
|
frappe/model/db_query.py
|
1
|
21364
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
from six import iteritems, string_types
"""build query for doclistview and return results"""
import frappe, json, copy, re
import frappe.defaults
import frappe.share
import frappe.permissions
from frappe.utils import flt, cint, getdate, get_datetime, get_time, make_filter_tuple, get_filter, add_to_date
from frappe import _
from frappe.model import optional_fields
from frappe.model.utils.user_settings import get_user_settings, update_user_settings
from datetime import datetime
class DatabaseQuery(object):
def __init__(self, doctype):
self.doctype = doctype
self.tables = []
self.conditions = []
self.or_conditions = []
self.fields = None
self.user = None
self.ignore_ifnull = False
self.flags = frappe._dict()
def execute(self, query=None, fields=None, filters=None, or_filters=None,
docstatus=None, group_by=None, order_by=None, limit_start=False,
limit_page_length=None, as_list=False, with_childnames=False, debug=False,
ignore_permissions=False, user=None, with_comment_count=False,
join='left join', distinct=False, start=None, page_length=None, limit=None,
ignore_ifnull=False, save_user_settings=False, save_user_settings_fields=False,
update=None, add_total_row=None, user_settings=None):
if not ignore_permissions and not frappe.has_permission(self.doctype, "read", user=user):
frappe.flags.error_message = _('Insufficient Permission for {0}').format(frappe.bold(self.doctype))
raise frappe.PermissionError(self.doctype)
# fitlers and fields swappable
# its hard to remember what comes first
if (isinstance(fields, dict)
or (isinstance(fields, list) and fields and isinstance(fields[0], list))):
# if fields is given as dict/list of list, its probably filters
filters, fields = fields, filters
elif fields and isinstance(filters, list) \
and len(filters) > 1 and isinstance(filters[0], string_types):
# if `filters` is a list of strings, its probably fields
filters, fields = fields, filters
if fields:
self.fields = fields
else:
self.fields = ["`tab{0}`.`name`".format(self.doctype)]
if start: limit_start = start
if page_length: limit_page_length = page_length
if limit: limit_page_length = limit
self.filters = filters or []
self.or_filters = or_filters or []
self.docstatus = docstatus or []
self.group_by = group_by
self.order_by = order_by
self.limit_start = 0 if (limit_start is False) else cint(limit_start)
self.limit_page_length = cint(limit_page_length) if limit_page_length else None
self.with_childnames = with_childnames
self.debug = debug
self.join = join
self.distinct = distinct
self.as_list = as_list
self.ignore_ifnull = ignore_ifnull
self.flags.ignore_permissions = ignore_permissions
self.user = user or frappe.session.user
self.update = update
self.user_settings_fields = copy.deepcopy(self.fields)
if user_settings:
self.user_settings = json.loads(user_settings)
if query:
result = self.run_custom_query(query)
else:
result = self.build_and_run()
if with_comment_count and not as_list and self.doctype:
self.add_comment_count(result)
if save_user_settings:
self.save_user_settings_fields = save_user_settings_fields
self.update_user_settings()
return result
def build_and_run(self):
args = self.prepare_args()
args.limit = self.add_limit()
if args.conditions:
args.conditions = "where " + args.conditions
if self.distinct:
args.fields = 'distinct ' + args.fields
query = """select %(fields)s from %(tables)s %(conditions)s
%(group_by)s %(order_by)s %(limit)s""" % args
return frappe.db.sql(query, as_dict=not self.as_list, debug=self.debug, update=self.update)
def prepare_args(self):
self.parse_args()
self.sanitize_fields()
self.extract_tables()
self.set_optional_columns()
self.build_conditions()
args = frappe._dict()
if self.with_childnames:
for t in self.tables:
if t != "`tab" + self.doctype + "`":
self.fields.append(t + ".name as '%s:name'" % t[4:-1])
# query dict
args.tables = self.tables[0]
# left join parent, child tables
for child in self.tables[1:]:
args.tables += " {join} {child} on ({child}.parent = {main}.name)".format(join=self.join,
child=child, main=self.tables[0])
if self.grouped_or_conditions:
self.conditions.append("({0})".format(" or ".join(self.grouped_or_conditions)))
args.conditions = ' and '.join(self.conditions)
if self.or_conditions:
args.conditions += (' or ' if args.conditions else "") + \
' or '.join(self.or_conditions)
self.set_field_tables()
args.fields = ', '.join(self.fields)
self.set_order_by(args)
self.validate_order_by_and_group_by(args.order_by)
args.order_by = args.order_by and (" order by " + args.order_by) or ""
self.validate_order_by_and_group_by(self.group_by)
args.group_by = self.group_by and (" group by " + self.group_by) or ""
return args
def parse_args(self):
"""Convert fields and filters from strings to list, dicts"""
if isinstance(self.fields, string_types):
if self.fields == "*":
self.fields = ["*"]
else:
try:
self.fields = json.loads(self.fields)
except ValueError:
self.fields = [f.strip() for f in self.fields.split(",")]
for filter_name in ["filters", "or_filters"]:
filters = getattr(self, filter_name)
if isinstance(filters, string_types):
filters = json.loads(filters)
if isinstance(filters, dict):
fdict = filters
filters = []
for key, value in iteritems(fdict):
filters.append(make_filter_tuple(self.doctype, key, value))
setattr(self, filter_name, filters)
def sanitize_fields(self):
'''
regex : ^.*[,();].*
purpose : The regex will look for malicious patterns like `,`, '(', ')', ';' in each
field which may leads to sql injection.
example :
field = "`DocType`.`issingle`, version()"
As field contains `,` and mysql function `version()`, with the help of regex
the system will filter out this field.
'''
regex = re.compile('^.*[,();].*')
blacklisted_keywords = ['select', 'create', 'insert', 'delete', 'drop', 'update', 'case']
blacklisted_functions = ['concat', 'concat_ws', 'if', 'ifnull', 'nullif', 'coalesce',
'connection_id', 'current_user', 'database', 'last_insert_id', 'session_user',
'system_user', 'user', 'version']
def _raise_exception():
frappe.throw(_('Cannot use sub-query or function in fields'), frappe.DataError)
for field in self.fields:
if regex.match(field):
if any(keyword in field.lower() for keyword in blacklisted_keywords):
_raise_exception()
if any("{0}(".format(keyword) in field.lower() \
for keyword in blacklisted_functions):
_raise_exception()
def extract_tables(self):
"""extract tables from fields"""
self.tables = ['`tab' + self.doctype + '`']
# add tables from fields
if self.fields:
for f in self.fields:
if ( not ("tab" in f and "." in f) ) or ("locate(" in f) or ("count(" in f):
continue
table_name = f.split('.')[0]
if table_name.lower().startswith('group_concat('):
table_name = table_name[13:]
if table_name.lower().startswith('ifnull('):
table_name = table_name[7:]
if not table_name[0]=='`':
table_name = '`' + table_name + '`'
if not table_name in self.tables:
self.append_table(table_name)
def append_table(self, table_name):
self.tables.append(table_name)
doctype = table_name[4:-1]
if (not self.flags.ignore_permissions) and (not frappe.has_permission(doctype)):
frappe.flags.error_message = _('Insufficient Permission for {0}').format(frappe.bold(doctype))
raise frappe.PermissionError(doctype)
def set_field_tables(self):
'''If there are more than one table, the fieldname must not be ambigous.
If the fieldname is not explicitly mentioned, set the default table'''
if len(self.tables) > 1:
for i, f in enumerate(self.fields):
if '.' not in f:
self.fields[i] = '{0}.{1}'.format(self.tables[0], f)
def set_optional_columns(self):
"""Removes optional columns like `_user_tags`, `_comments` etc. if not in table"""
columns = frappe.db.get_table_columns(self.doctype)
# remove from fields
to_remove = []
for fld in self.fields:
for f in optional_fields:
if f in fld and not f in columns:
to_remove.append(fld)
for fld in to_remove:
del self.fields[self.fields.index(fld)]
# remove from filters
to_remove = []
for each in self.filters:
if isinstance(each, string_types):
each = [each]
for element in each:
if element in optional_fields and element not in columns:
to_remove.append(each)
for each in to_remove:
if isinstance(self.filters, dict):
del self.filters[each]
else:
self.filters.remove(each)
def build_conditions(self):
self.conditions = []
self.grouped_or_conditions = []
self.build_filter_conditions(self.filters, self.conditions)
self.build_filter_conditions(self.or_filters, self.grouped_or_conditions)
# match conditions
if not self.flags.ignore_permissions:
match_conditions = self.build_match_conditions()
if match_conditions:
self.conditions.append("(" + match_conditions + ")")
def build_filter_conditions(self, filters, conditions, ignore_permissions=None):
"""build conditions from user filters"""
if ignore_permissions is not None:
self.flags.ignore_permissions = ignore_permissions
if isinstance(filters, dict):
filters = [filters]
for f in filters:
if isinstance(f, string_types):
conditions.append(f)
else:
conditions.append(self.prepare_filter_condition(f))
def prepare_filter_condition(self, f):
"""Returns a filter condition in the format:
ifnull(`tabDocType`.`fieldname`, fallback) operator "value"
"""
f = get_filter(self.doctype, f)
tname = ('`tab' + f.doctype + '`')
if not tname in self.tables:
self.append_table(tname)
if 'ifnull(' in f.fieldname:
column_name = f.fieldname
else:
column_name = '{tname}.{fname}'.format(tname=tname,
fname=f.fieldname)
can_be_null = True
# prepare in condition
if f.operator.lower() in ('in', 'not in'):
values = f.value or ''
if not isinstance(values, (list, tuple)):
values = values.split(",")
fallback = "''"
value = (frappe.db.escape((v or '').strip(), percent=False) for v in values)
value = '("{0}")'.format('", "'.join(value))
else:
df = frappe.get_meta(f.doctype).get("fields", {"fieldname": f.fieldname})
df = df[0] if df else None
if df and df.fieldtype in ("Check", "Float", "Int", "Currency", "Percent"):
can_be_null = False
if f.operator.lower() == 'between' and \
(f.fieldname in ('creation', 'modified') or (df and (df.fieldtype=="Date" or df.fieldtype=="Datetime"))):
value = get_between_date_filter(f.value, df)
fallback = "'0000-00-00 00:00:00'"
elif df and df.fieldtype=="Date":
value = getdate(f.value).strftime("%Y-%m-%d")
fallback = "'0000-00-00'"
elif (df and df.fieldtype=="Datetime") or isinstance(f.value, datetime):
value = get_datetime(f.value).strftime("%Y-%m-%d %H:%M:%S.%f")
fallback = "'0000-00-00 00:00:00'"
elif df and df.fieldtype=="Time":
value = get_time(f.value).strftime("%H:%M:%S.%f")
fallback = "'00:00:00'"
elif f.operator.lower() in ("like", "not like") or (isinstance(f.value, string_types) and
(not df or df.fieldtype not in ["Float", "Int", "Currency", "Percent", "Check"])):
value = "" if f.value==None else f.value
fallback = '""'
if f.operator.lower() in ("like", "not like") and isinstance(value, string_types):
# because "like" uses backslash (\) for escaping
value = value.replace("\\", "\\\\").replace("%", "%%")
else:
value = flt(f.value)
fallback = 0
# put it inside double quotes
if isinstance(value, string_types) and not f.operator.lower() == 'between':
value = '"{0}"'.format(frappe.db.escape(value, percent=False))
if (self.ignore_ifnull
or not can_be_null
or (f.value and f.operator.lower() in ('=', 'like'))
or 'ifnull(' in column_name.lower()):
condition = '{column_name} {operator} {value}'.format(
column_name=column_name, operator=f.operator,
value=value)
else:
condition = 'ifnull({column_name}, {fallback}) {operator} {value}'.format(
column_name=column_name, fallback=fallback, operator=f.operator,
value=value)
return condition
def build_match_conditions(self, as_condition=True):
"""add match conditions if applicable"""
self.match_filters = []
self.match_conditions = []
only_if_shared = False
if not self.user:
self.user = frappe.session.user
if not self.tables: self.extract_tables()
meta = frappe.get_meta(self.doctype)
role_permissions = frappe.permissions.get_role_permissions(meta, user=self.user)
self.shared = frappe.share.get_shared(self.doctype, self.user)
if not meta.istable and not role_permissions.get("read") and not self.flags.ignore_permissions:
only_if_shared = True
if not self.shared:
frappe.throw(_("No permission to read {0}").format(self.doctype), frappe.PermissionError)
else:
self.conditions.append(self.get_share_condition())
else:
# apply user permissions?
if role_permissions.get("apply_user_permissions", {}).get("read"):
# get user permissions
user_permissions = frappe.permissions.get_user_permissions(self.user)
self.add_user_permissions(user_permissions,
user_permission_doctypes=role_permissions.get("user_permission_doctypes").get("read"))
if role_permissions.get("if_owner", {}).get("read"):
self.match_conditions.append("`tab{0}`.owner = '{1}'".format(self.doctype,
frappe.db.escape(self.user, percent=False)))
if as_condition:
conditions = ""
if self.match_conditions:
# will turn out like ((blog_post in (..) and blogger in (...)) or (blog_category in (...)))
conditions = "((" + ") or (".join(self.match_conditions) + "))"
doctype_conditions = self.get_permission_query_conditions()
if doctype_conditions:
conditions += (' and ' + doctype_conditions) if conditions else doctype_conditions
# share is an OR condition, if there is a role permission
if not only_if_shared and self.shared and conditions:
conditions = "({conditions}) or ({shared_condition})".format(
conditions=conditions, shared_condition=self.get_share_condition())
return conditions
else:
return self.match_filters
def get_share_condition(self):
return """`tab{0}`.name in ({1})""".format(self.doctype, ", ".join(["'%s'"] * len(self.shared))) % \
tuple([frappe.db.escape(s, percent=False) for s in self.shared])
def add_user_permissions(self, user_permissions, user_permission_doctypes=None):
user_permission_doctypes = frappe.permissions.get_user_permission_doctypes(user_permission_doctypes, user_permissions)
meta = frappe.get_meta(self.doctype)
for doctypes in user_permission_doctypes:
match_filters = {}
match_conditions = []
# check in links
for df in meta.get_fields_to_check_permissions(doctypes):
user_permission_values = user_permissions.get(df.options, [])
cond = 'ifnull(`tab{doctype}`.`{fieldname}`, "")=""'.format(doctype=self.doctype, fieldname=df.fieldname)
if user_permission_values:
if not cint(frappe.get_system_settings("apply_strict_user_permissions")):
condition = cond + " or "
else:
condition = ""
condition += """`tab{doctype}`.`{fieldname}` in ({values})""".format(
doctype=self.doctype, fieldname=df.fieldname,
values=", ".join([('"'+frappe.db.escape(v, percent=False)+'"') for v in user_permission_values]))
else:
condition = cond
match_conditions.append("({condition})".format(condition=condition))
match_filters[df.options] = user_permission_values
if match_conditions:
self.match_conditions.append(" and ".join(match_conditions))
if match_filters:
self.match_filters.append(match_filters)
def get_permission_query_conditions(self):
condition_methods = frappe.get_hooks("permission_query_conditions", {}).get(self.doctype, [])
if condition_methods:
conditions = []
for method in condition_methods:
c = frappe.call(frappe.get_attr(method), self.user)
if c:
conditions.append(c)
return " and ".join(conditions) if conditions else None
def run_custom_query(self, query):
if '%(key)s' in query:
query = query.replace('%(key)s', 'name')
return frappe.db.sql(query, as_dict = (not self.as_list))
def set_order_by(self, args):
meta = frappe.get_meta(self.doctype)
if self.order_by:
args.order_by = self.order_by
else:
args.order_by = ""
# don't add order by from meta if a mysql group function is used without group by clause
group_function_without_group_by = (len(self.fields)==1 and
( self.fields[0].lower().startswith("count(")
or self.fields[0].lower().startswith("min(")
or self.fields[0].lower().startswith("max(")
) and not self.group_by)
if not group_function_without_group_by:
sort_field = sort_order = None
if meta.sort_field and ',' in meta.sort_field:
# multiple sort given in doctype definition
# Example:
# `idx desc, modified desc`
# will covert to
# `tabItem`.`idx` desc, `tabItem`.`modified` desc
args.order_by = ', '.join(['`tab{0}`.`{1}` {2}'.format(self.doctype,
f.split()[0].strip(), f.split()[1].strip()) for f in meta.sort_field.split(',')])
else:
sort_field = meta.sort_field or 'modified'
sort_order = (meta.sort_field and meta.sort_order) or 'desc'
args.order_by = "`tab{0}`.`{1}` {2}".format(self.doctype, sort_field or "modified", sort_order or "desc")
# draft docs always on top
if meta.is_submittable:
args.order_by = "`tab{0}`.docstatus asc, {1}".format(self.doctype, args.order_by)
def validate_order_by_and_group_by(self, parameters):
"""Check order by, group by so that atleast one column is selected and does not have subquery"""
if not parameters:
return
_lower = parameters.lower()
if 'select' in _lower and ' from ' in _lower:
frappe.throw(_('Cannot use sub-query in order by'))
for field in parameters.split(","):
if "." in field and field.strip().startswith("`tab"):
tbl = field.strip().split('.')[0]
if tbl not in self.tables:
if tbl.startswith('`'):
tbl = tbl[4:-1]
frappe.throw(_("Please select atleast 1 column from {0} to sort/group").format(tbl))
def add_limit(self):
if self.limit_page_length:
return 'limit %s, %s' % (self.limit_start, self.limit_page_length)
else:
return ''
def add_comment_count(self, result):
for r in result:
if not r.name:
continue
r._comment_count = 0
if "_comments" in r:
r._comment_count = len(json.loads(r._comments or "[]"))
def update_user_settings(self):
# update user settings if new search
user_settings = json.loads(get_user_settings(self.doctype))
if hasattr(self, 'user_settings'):
user_settings.update(self.user_settings)
if self.save_user_settings_fields:
user_settings['fields'] = self.user_settings_fields
update_user_settings(self.doctype, user_settings)
def get_order_by(doctype, meta):
order_by = ""
sort_field = sort_order = None
if meta.sort_field and ',' in meta.sort_field:
# multiple sort given in doctype definition
# Example:
# `idx desc, modified desc`
# will covert to
# `tabItem`.`idx` desc, `tabItem`.`modified` desc
order_by = ', '.join(['`tab{0}`.`{1}` {2}'.format(doctype,
f.split()[0].strip(), f.split()[1].strip()) for f in meta.sort_field.split(',')])
else:
sort_field = meta.sort_field or 'modified'
sort_order = (meta.sort_field and meta.sort_order) or 'desc'
order_by = "`tab{0}`.`{1}` {2}".format(doctype, sort_field or "modified", sort_order or "desc")
# draft docs always on top
if meta.is_submittable:
order_by = "`tab{0}`.docstatus asc, {1}".format(doctype, order_by)
return order_by
@frappe.whitelist()
def get_list(doctype, *args, **kwargs):
'''wrapper for DatabaseQuery'''
kwargs.pop('cmd', None)
return DatabaseQuery(doctype).execute(None, *args, **kwargs)
def is_parent_only_filter(doctype, filters):
#check if filters contains only parent doctype
only_parent_doctype = True
if isinstance(filters, list):
for flt in filters:
if doctype not in flt:
only_parent_doctype = False
if 'Between' in flt:
flt[3] = get_between_date_filter(flt[3])
return only_parent_doctype
def get_between_date_filter(value, df=None):
'''
return the formattted date as per the given example
[u'2017-11-01', u'2017-11-03'] => '2017-11-01 00:00:00.000000' AND '2017-11-04 00:00:00.000000'
'''
from_date = None
to_date = None
date_format = "%Y-%m-%d %H:%M:%S.%f"
if df:
date_format = "%Y-%m-%d %H:%M:%S.%f" if df.fieldtype == 'Datetime' else "%Y-%m-%d"
if value and isinstance(value, (list, tuple)):
if len(value) >= 1: from_date = value[0]
if len(value) >= 2: to_date = value[1]
if not df or (df and df.fieldtype == 'Datetime'):
to_date = add_to_date(to_date,days=1)
data = "'%s' AND '%s'" % (
get_datetime(from_date).strftime(date_format),
get_datetime(to_date).strftime(date_format))
return data
|
mit
| -5,802,090,927,492,263,000
| 32.43349
| 120
| 0.66921
| false
| 3.140379
| false
| false
| false
|
ratzeni/vcf-miner.client
|
setup.py
|
1
|
1546
|
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'VERSION')) as f:
__version__ = f.read().strip()
with open(os.path.join(here, 'requirements.txt')) as f:
required = f.read().splitlines()
with open(os.path.join(here, 'README.rst')) as f:
long_description = f.read()
extra_files = [os.path.join(here, 'requirements.txt'),
os.path.join(here, 'README.rst'),
os.path.join(here, 'VERSION'),
]
AuthorInfo = (
("Atzeni Rossano", "ratzeni@crs4.it"),
)
setup(name = "vcfminerclient",
version=__version__,
description = "client package for VCFMiner",
long_description=long_description,
author=",".join(a[0] for a in AuthorInfo),
author_email=",".join("<%s>" % a[1] for a in AuthorInfo),
zip_safe=True,
url='https://github.com/ratzeni/vcf-miner.client',
packages=find_packages(exclude=('tests',)),
keywords='utilities',
install_requires=required,
package_data={'': extra_files},
license='MIT',
platforms="Posix; MacOS X; Windows",
classifiers=["Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Internet",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5"],
)
|
mit
| -9,032,565,296,916,977,000
| 31.893617
| 63
| 0.575679
| false
| 3.7343
| false
| false
| false
|
marcuschia/ShaniXBMCWork
|
script.video.F4mProxy/lib/f4mDownloader.py
|
1
|
49882
|
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as etree
import base64
from struct import unpack, pack
import sys
import io
import os
import time
import itertools
import xbmcaddon
import xbmc
import urllib2,urllib
import traceback
import urlparse
import posixpath
import re
import hmac
import hashlib
import binascii
import zlib
from hashlib import sha256
import cookielib
import akhds
#import youtube_dl
#from youtube_dl.utils import *
addon_id = 'script.video.F4mProxy'
selfAddon = xbmcaddon.Addon(id=addon_id)
__addonname__ = selfAddon.getAddonInfo('name')
__icon__ = selfAddon.getAddonInfo('icon')
downloadPath = xbmc.translatePath(selfAddon.getAddonInfo('profile'))#selfAddon["profile"])
F4Mversion=''
#from Crypto.Cipher import AES
value_unsafe = '%+&;#'
VALUE_SAFE = ''.join(chr(c) for c in range(33, 127)
if chr(c) not in value_unsafe)
def urlencode_param(value):
"""Minimal URL encoding for query parameter"""
return urllib.quote_plus(value, safe=VALUE_SAFE)
class FlvReader(io.BytesIO):
"""
Reader for Flv files
The file format is documented in https://www.adobe.com/devnet/f4v.html
"""
# Utility functions for reading numbers and strings
def read_unsigned_long_long(self):
return unpack('!Q', self.read(8))[0]
def read_unsigned_int(self):
return unpack('!I', self.read(4))[0]
def read_unsigned_char(self):
return unpack('!B', self.read(1))[0]
def read_string(self):
res = b''
while True:
char = self.read(1)
if char == b'\x00':
break
res+=char
return res
def read_box_info(self):
"""
Read a box and return the info as a tuple: (box_size, box_type, box_data)
"""
real_size = size = self.read_unsigned_int()
box_type = self.read(4)
header_end = 8
if size == 1:
real_size = self.read_unsigned_long_long()
header_end = 16
return real_size, box_type, self.read(real_size-header_end)
def read_asrt(self, debug=False):
version = self.read_unsigned_char()
self.read(3) # flags
quality_entry_count = self.read_unsigned_char()
quality_modifiers = []
for i in range(quality_entry_count):
quality_modifier = self.read_string()
quality_modifiers.append(quality_modifier)
segment_run_count = self.read_unsigned_int()
segments = []
#print 'segment_run_count',segment_run_count
for i in range(segment_run_count):
first_segment = self.read_unsigned_int()
fragments_per_segment = self.read_unsigned_int()
segments.append((first_segment, fragments_per_segment))
#print 'segments',segments
return {'version': version,
'quality_segment_modifiers': quality_modifiers,
'segment_run': segments,
}
def read_afrt(self, debug=False):
version = self.read_unsigned_char()
self.read(3) # flags
time_scale = self.read_unsigned_int()
quality_entry_count = self.read_unsigned_char()
quality_entries = []
for i in range(quality_entry_count):
mod = self.read_string()
quality_entries.append(mod)
fragments_count = self.read_unsigned_int()
#print 'fragments_count',fragments_count
fragments = []
for i in range(fragments_count):
first = self.read_unsigned_int()
first_ts = self.read_unsigned_long_long()
duration = self.read_unsigned_int()
if duration == 0:
discontinuity_indicator = self.read_unsigned_char()
else:
discontinuity_indicator = None
fragments.append({'first': first,
'ts': first_ts,
'duration': duration,
'discontinuity_indicator': discontinuity_indicator,
})
#print 'fragments',fragments
return {'version': version,
'time_scale': time_scale,
'fragments': fragments,
'quality_entries': quality_entries,
}
def read_abst(self, debug=False):
version = self.read_unsigned_char()
self.read(3) # flags
bootstrap_info_version = self.read_unsigned_int()
streamType=self.read_unsigned_char()#self.read(1) # Profile,Live,Update,Reserved
islive=False
if (streamType & 0x20) >> 5:
islive=True
print 'LIVE',streamType,islive
time_scale = self.read_unsigned_int()
current_media_time = self.read_unsigned_long_long()
smpteTimeCodeOffset = self.read_unsigned_long_long()
movie_identifier = self.read_string()
server_count = self.read_unsigned_char()
servers = []
for i in range(server_count):
server = self.read_string()
servers.append(server)
quality_count = self.read_unsigned_char()
qualities = []
for i in range(server_count):
quality = self.read_string()
qualities.append(server)
drm_data = self.read_string()
metadata = self.read_string()
segments_count = self.read_unsigned_char()
#print 'segments_count11',segments_count
segments = []
for i in range(segments_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'asrt'
segment = FlvReader(box_data).read_asrt()
segments.append(segment)
fragments_run_count = self.read_unsigned_char()
#print 'fragments_run_count11',fragments_run_count
fragments = []
for i in range(fragments_run_count):
# This info is only useful for the player, it doesn't give more info
# for the download process
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'afrt'
fragments.append(FlvReader(box_data).read_afrt())
return {'segments': segments,
'movie_identifier': movie_identifier,
'drm_data': drm_data,
'fragments': fragments,
},islive
def read_bootstrap_info(self):
"""
Read the bootstrap information from the stream,
returns a dict with the following keys:
segments: A list of dicts with the following keys
segment_run: A list of (first_segment, fragments_per_segment) tuples
"""
total_size, box_type, box_data = self.read_box_info()
assert box_type == b'abst'
return FlvReader(box_data).read_abst()
def read_bootstrap_info(bootstrap_bytes):
return FlvReader(bootstrap_bytes).read_bootstrap_info()
def build_fragments_list(boot_info, startFromFregment=None, live=True):
""" Return a list of (segment, fragment) for each fragment in the video """
res = []
segment_run_table = boot_info['segments'][0]
print 'segment_run_table',segment_run_table
# I've only found videos with one segment
#if len(segment_run_table['segment_run'])>1:
# segment_run_table['segment_run']=segment_run_table['segment_run'][-2:] #pick latest
frag_start = boot_info['fragments'][0]['fragments']
#print boot_info['fragments']
# sum(j for i, j in segment_run_table['segment_run'])
first_frag_number=frag_start[0]['first']
last_frag_number=frag_start[-1]['first']
if last_frag_number==0:
last_frag_number=frag_start[-2]['first']
endfragment=0
segment_to_start=None
for current in range (len(segment_run_table['segment_run'])):
seg,fregCount=segment_run_table['segment_run'][current]
#print 'segmcount',seg,fregCount
if (not live):
frag_end=last_frag_number
else:
frag_end=first_frag_number+fregCount-1
if fregCount>10000:
frag_end=last_frag_number
#if frag_end
segment_run_table['segment_run'][current]=(seg,fregCount,first_frag_number,frag_end)
if (not startFromFregment==None) and startFromFregment>=first_frag_number and startFromFregment<=frag_end:
segment_to_start=current
first_frag_number+=fregCount
# print 'current status',segment_run_table['segment_run']
#if we have no index then take the last segment
if segment_to_start==None:
segment_to_start=len(segment_run_table['segment_run'])-1
#if len(segment_run_table['segment_run'])>2:
# segment_to_start=len(segment_run_table['segment_run'])-2;
if live:
startFromFregment=segment_run_table['segment_run'][-1][3]
# if len(boot_info['fragments'][0]['fragments'])>1: #go bit back
# startFromFregment= boot_info['fragments'][0]['fragments'][-1]['first']
else:
startFromFregment= boot_info['fragments'][0]['fragments'][0]['first'] #start from begining
#if len(boot_info['fragments'][0]['fragments'])>2: #go little bit back
# startFromFregment= boot_info['fragments'][0]['fragments'][-2]['first']
#print 'startFromFregment',startFromFregment,boot_info,len(boot_info['fragments'][0]['fragments'])
#print 'segment_to_start',segment_to_start
for currentIndex in range (segment_to_start,len(segment_run_table['segment_run'])):
currentSegment=segment_run_table['segment_run'][currentIndex]
#print 'currentSegment',currentSegment
(seg,fregCount,frag_start,frag_end)=currentSegment
#print 'startFromFregment',startFromFregment,
if (not startFromFregment==None) and startFromFregment>=frag_start and startFromFregment<=frag_end:
frag_start=startFromFregment
#print 'frag_start',frag_start,frag_end
for currentFreg in range(frag_start,frag_end+1):
res.append((seg,currentFreg ))
# print 'fragmentlist',res,boot_info
return res
#totalFrags=sum(j for i, j in segment_run_table['segment_run'])
#lastSegment=segment_run_table['segment_run'][-1]
#lastSegmentStart= lastSegment[0]
#lastSegmentFragCount = lastSegment[1]
#print 'totalFrags',totalFrags
#first_frag_number = frag_start[0]['first']
#startFragOfLastSegment= first_frag_number +totalFrags - lastSegmentFragCount
#for (i, frag_number) in zip(range(1, lastSegmentFragCount+1), itertools.count(startFragOfLastSegment)):
# res.append((lastSegmentStart,frag_number )) #this was i, i am using first segement start
#return res
#segment_run_entry = segment_run_table['segment_run'][0]
#print 'segment_run_entry',segment_run_entry,segment_run_table
#n_frags = segment_run_entry[1]
#startingPoint = segment_run_entry[0]
#fragment_run_entry_table = boot_info['fragments'][0]['fragments']
#frag_entry_index = 0
#first_frag_number = fragment_run_entry_table[0]['first']
#first_frag_number=(startingPoint*n_frags) -(n_frags)+1
#print 'THENUMBERS',startingPoint,n_frags,first_frag_number
#for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)):
# res.append((startingPoint,frag_number )) #this was i, i am using first segement start
#return res
def join(base,url):
join = urlparse.urljoin(base,url)
url = urlparse.urlparse(join)
path = posixpath.normpath(url[2])
return urlparse.urlunparse(
(url.scheme,url.netloc,path,url.params,url.query,url.fragment)
)
def _add_ns(prop):
#print 'F4Mversion',F4Mversion
return '{http://ns.adobe.com/f4m/%s}%s' %(F4Mversion, prop)
#class ReallyQuietDownloader(youtube_dl.FileDownloader):
# def to_screen(sef, *args, **kargs):
# pass
class F4MDownloader():
"""
A downloader for f4m manifests or AdobeHDS.
"""
outputfile =''
clientHeader=None
cookieJar=cookielib.LWPCookieJar()
def __init__(self):
self.init_done=False
def getUrl(self,url, ischunkDownloading=False):
try:
post=None
print 'url',url
#openner = urllib2.build_opener(urllib2.HTTPHandler, urllib2.HTTPSHandler)
cookie_handler = urllib2.HTTPCookieProcessor(self.cookieJar)
openner = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
if post:
req = urllib2.Request(url, post)
else:
req = urllib2.Request(url)
ua_header=False
if self.clientHeader:
for n,v in self.clientHeader:
req.add_header(n,v)
if n=='User-Agent':
ua_header=True
if not ua_header:
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; Win64; x64; Trident/7.0; rv:11.0) like Gecko')
#response = urllib2.urlopen(req)
if self.proxy and ( (not ischunkDownloading) or self.use_proxy_for_chunks ):
req.set_proxy(self.proxy, 'http')
response = openner.open(req)
data=response.read()
return data
except:
print 'Error in getUrl'
traceback.print_exc()
return None
def _write_flv_header2(self, stream):
"""Writes the FLV header and the metadata to stream"""
# FLV header
stream.write(b'FLV\x01')
stream.write(b'\x01')
stream.write(b'\x00\x00\x00\x09')
# FLV File body
stream.write(b'\x00\x00\x00\x09')
def _write_flv_header(self, stream, metadata):
"""Writes the FLV header and the metadata to stream"""
# FLV header
stream.write(b'FLV\x01')
stream.write(b'\x05')
stream.write(b'\x00\x00\x00\x09')
# FLV File body
stream.write(b'\x00\x00\x00\x00')
# FLVTAG
if metadata:
stream.write(b'\x12') # Script data
stream.write(pack('!L',len(metadata))[1:]) # Size of the metadata with 3 bytes
stream.write(b'\x00\x00\x00\x00\x00\x00\x00')
stream.write(metadata)
# All this magic numbers have been extracted from the output file
# produced by AdobeHDS.php (https://github.com/K-S-V/Scripts)
stream.write(b'\x00\x00\x01\x73')
def init(self, out_stream, url, proxy=None,use_proxy_for_chunks=True,g_stopEvent=None, maxbitrate=0, auth='',swf=None):
try:
self.init_done=False
self.total_frags=0
self.init_url=url
self.clientHeader=None
self.status='init'
self.proxy = proxy
self.auth=auth
#self.auth="pvtoken=exp%3D9999999999%7Eacl%3D%252f%252a%7Edata%3DZXhwPTE0MDYzMDMxMTV+YWNsPSUyZip+ZGF0YT1wdmMsc35obWFjPWQxODA5MWVkYTQ4NDI3NjFjODhjOWQwY2QxNTk3YTI0MWQwOWYwNWI1N2ZmMDE0ZjcxN2QyMTVjZTJkNmJjMDQ%3D%2196e4sdLWrezE46RaCBzzP43/LEM5en2KujAosbeDimQ%3D%7Ehmac%3DACF8A1E4467676C9BCE2721CA5EFF840BD6ED1780046954039373A3B0D942ADC&hdntl=exp=1406303115~acl=%2f*~data=hdntl~hmac=4ab96fa533fd7c40204e487bfc7befaf31dd1f49c27eb1f610673fed9ff97a5f&als=0,2,0,0,0,NaN,0,0,0,37,f,52293145.57,52293155.9,t,s,GARWLHLMHNGA,2.11.3,37&hdcore=2.11.3"
if self.auth ==None or self.auth =='None' :
self.auth=''
if self.proxy and len(self.proxy)==0:
self.proxy=None
self.use_proxy_for_chunks=use_proxy_for_chunks
self.out_stream=out_stream
self.g_stopEvent=g_stopEvent
self.maxbitrate=maxbitrate
if '|' in url:
sp = url.split('|')
url = sp[0]
self.clientHeader = sp[1]
self.clientHeader= urlparse.parse_qsl(self.clientHeader)
print 'header recieved now url and headers are',url, self.clientHeader
self.status='init done'
self.url=url
self.swf=swf
#self.downloadInternal( url)
return self.preDownoload()
#os.remove(self.outputfile)
except:
traceback.print_exc()
self.status='finished'
return False
def preDownoload(self):
global F4Mversion
try:
self.seqNumber=0
self.live=False #todo find if its Live or not
man_url = self.url
url=self.url
print 'Downloading f4m manifest'
manifest = self.getUrl(man_url)#.read()
if not manifest:
return False
print len(manifest)
try:
print manifest
except: pass
self.status='manifest done'
#self.report_destination(filename)
#dl = ReallyQuietDownloader(self.ydl, {'continuedl': True, 'quiet': True, 'noprogress':True})
version_fine="xmlns=\".*?\/([0-9].*?)\""
F4Mversion =re.findall(version_fine, manifest)[0]
#print F4Mversion,_add_ns('media')
auth_patt='<pv-2.0>(.*?)<'
auth_obj =re.findall(auth_patt, manifest)
self.auth20=''
if auth_obj and len(auth_obj)>0:
self.auth20=auth_obj[0] #not doing anything for time being
print 'auth got from xml',self.auth,self.auth20
#quick for one example where the xml was wrong.
if '\"bootstrapInfoId' in manifest:
manifest=manifest.replace('\"bootstrapInfoId','\" bootstrapInfoId')
doc = etree.fromstring(manifest)
print doc
# Added the-one 05082014
# START
# Check if manifest defines a baseURL tag
baseURL_tag = doc.find(_add_ns('baseURL'))
if baseURL_tag != None:
man_url = baseURL_tag.text
url = man_url
self.url = url
print 'base url defined as: %s' % man_url
# END
try:
#formats = [(int(f.attrib.get('bitrate', -1)),f) for f in doc.findall(_add_ns('media'))]
formats=[]
for f in doc.findall(_add_ns('media')):
vtype=f.attrib.get('type', '')
if f.attrib.get('type', '')=='video' or vtype=='' :
formats.append([int(f.attrib.get('bitrate', -1)),f])
print 'format works',formats
except:
formats=[(int(0),f) for f in doc.findall(_add_ns('media'))]
#print 'formats',formats
formats = sorted(formats, key=lambda f: f[0])
if self.maxbitrate==0:
rate, media = formats[-1]
elif self.maxbitrate==-1:
rate, media = formats[0]
else: #find bitrate
brselected=None
rate, media=None,None
for r, m in formats:
if r<=self.maxbitrate:
rate, media=r,m
else:
break
if media==None:
rate, media = formats[-1]
dest_stream = self.out_stream
print 'rate selected',rate
self.metadata=None
try:
self.metadata = base64.b64decode(media.find(_add_ns('metadata')).text)
print 'metadata stream read done'#,media.find(_add_ns('metadata')).text
#self._write_flv_header(dest_stream, metadata)
#dest_stream.flush()
except: pass
# Modified the-one 05082014
# START
# url and href can be used interchangeably
# so if url attribute is not present
# check for href attribute
try:
mediaUrl=media.attrib['url']
except:
mediaUrl=media.attrib['href']
# END
# Added the-one 05082014
# START
# if media url/href points to another f4m file
if '.f4m' in mediaUrl:
sub_f4m_url = join(man_url,mediaUrl)
print 'media points to another f4m file: %s' % sub_f4m_url
print 'Downloading f4m sub manifest'
sub_manifest = self.getUrl(sub_f4m_url)#.read()
if not sub_manifest:
return False
print len(sub_manifest)
try:
print sub_manifest
except: pass
self.status='sub manifest done'
F4Mversion =re.findall(version_fine, sub_manifest)[0]
doc = etree.fromstring(sub_manifest)
print doc
media = doc.find(_add_ns('media'))
if media == None:
return False
try:
self.metadata = base64.b64decode(media.find(_add_ns('metadata')).text)
print 'metadata stream read done'
except: pass
try:
mediaUrl=media.attrib['url']
except:
mediaUrl=media.attrib['href']
# END
try:
bootStrapID = media.attrib['bootstrapInfoId']
except: bootStrapID='xx'
#print 'mediaUrl',mediaUrl
base_url = join(man_url,mediaUrl)#compat_urlparse.urljoin(man_url,media.attrib['url'])
keybase_url=join(man_url,'key_')
if mediaUrl.endswith('/') and not base_url.endswith('/'):
base_url += '/'
self.base_url=base_url
self.keybase_url=keybase_url
bsArray=doc.findall(_add_ns('bootstrapInfo'))
print 'bootStrapID',bootStrapID
#bootStrapID='bootstrap_450'
bootstrap=self.getBootStrapWithId(bsArray,bootStrapID)
if bootstrap==None: #if not available then find any!
print 'bootStrapID NOT Found'
bootstrap=doc.findall(_add_ns('bootstrapInfo'))[0]
else:
print 'found bootstrap with id',bootstrap
#print 'bootstrap',bootstrap
bootstrapURL1=''
try:
bootstrapURL1=bootstrap.attrib['url']
except: pass
bootstrapURL=''
bootstrapData=None
queryString=None
if bootstrapURL1=='':
bootstrapData=base64.b64decode(doc.findall(_add_ns('bootstrapInfo'))[0].text)
#
else:
from urlparse import urlparse
queryString = urlparse(url).query
print 'queryString11',queryString
if len(queryString)==0: queryString=None
if queryString==None or '?' in bootstrap.attrib['url']:
bootstrapURL = join(man_url,bootstrap.attrib['url'])# take out querystring for later
queryString = urlparse(bootstrapURL).query
print 'queryString override',queryString
if len(queryString)==0:
queryString=None
if len(self.auth)>0:
bootstrapURL+='?'+self.auth
queryString=self.auth#self._pv_params('',self.auth20)#not in use
elif len(self.auth20)>0:
queryString=self._pv_params(self.swf,self.auth20)
bootstrapURL+='?'+queryString
else:
print 'queryString!!',queryString
bootstrapURL = join(man_url,bootstrap.attrib['url'])+'?'+queryString
if len(self.auth)>0:
authval=self.auth#self._pv_params('',self.auth20)#not in use
bootstrapURL = join(man_url,bootstrap.attrib['url'])+'?'+authval
queryString=authval
elif len(self.auth20)>0:
authval=self._pv_params(self.swf,self.auth20)#requires swf param
bootstrapURL = join(man_url,bootstrap.attrib['url'])+'?'+authval
queryString=authval
print 'bootstrapURL',bootstrapURL
if queryString==None:
queryString=''
self.bootstrapURL=bootstrapURL
self.queryString=queryString
self.bootstrap, self.boot_info, self.fragments_list,self.total_frags=self.readBootStrapInfo(bootstrapURL,bootstrapData)
self.init_done=True
return True
except:
traceback.print_exc()
return False
def readAKKey(self, data):
messageKeyExists=False
key=""
firstByte=ord(data[0])
pos=1
returnIV=None
if firstByte==12: #version12
pos+=4+4+2+1;
# print 'indeedddd',firstByte
# print 'data',repr(data)
messageByte=ord(data[pos])
pos+=1
messageKeyExists=(messageByte & 4) > 0;
messageIV=(messageByte & 2) > 0;
if messageIV:
pos+=16
# print 'IV exists'
if messageKeyExists:
# print 'Message Key exists!!!'
returnIV=data[pos-16:pos]
d = str(data[pos]);
pos+=1
key = d;
while(d != '\x00'):
d = str(data[pos]);
pos+=1
if d != '\x00':
key+= d;
else:
print 'SOMETHING WRONG.... got other than 12'
print 1/0#shouldn't come where
return messageKeyExists, key,pos,returnIV
def getFrames(self,box_data, remainingdata):
frames=[]
KeepProcessing = False;
currentStep= 0;
tagLen = 0;
if(box_data):
if remainingdata and len(remainingdata)>0:
box_data=remainingdata+box_data
remainingdata=None
lookForTagStart = 0;
KeepProcessing = True;
while(KeepProcessing and lookForTagStart<len(box_data)):
currentStep = ord(box_data[lookForTagStart]);
tagLen = ord(box_data[lookForTagStart + 1]) << 16 | ord(box_data[lookForTagStart + 2]) << 8 | ord(box_data[lookForTagStart + 3]) & 255;
nextTag = lookForTagStart + 11 + tagLen + 4
if (nextTag > len(box_data) and currentStep > 0):
remainingdata = [];
remainingdata=box_data[lookForTagStart:]
KeepProcessing = False;
elif (currentStep > 0):
chunk = []
chunk=box_data[lookForTagStart:lookForTagStart+tagLen + 11 + 4]
frames.append((1,chunk))
elif (currentStep == 0):
KeepProcessing = False;
#if nextTag==len(box_data):
# KeepProcessing=False
#print nextTag, len(box_data)
lookForTagStart = nextTag;
return frames,remainingdata
# #def AES(self,key):
# return Rijndael(key, keySize=16, blockSize=16, padding=padWithPadLen())
# def AES_CBC(self,key):
# return CBC(blockCipherInstance=AES(key))
def addBytesToOutput(self,prefix,data,post,segmentid,buffer):
dataLen=0
if data and len(data)>0:
dataLen=len(data)
#print 'Incomming',repr(prefix)
prefix=list(prefix)
prefix[3]=chr(dataLen & 255)
prefix[2]=chr(dataLen >> 8 & 255);
prefix[1]=chr(dataLen >> 16 & 255);
#print repr(prefix)
prefix=''.join(prefix)
#print repr(prefix)
#print len(prefix)
finalArray=prefix
if data and len(data)>0:
finalArray+=data
if post and len(post):
finalArray+=post
# with open("c:\\temp\\myfile.mp4", 'a+b') as output:
# output.write(finalArray)
lenReturned=len(finalArray)
buffer.write(finalArray)
buffer.flush()
return lenReturned
def keep_sending_video(self,dest_stream, segmentToStart=None, totalSegmentToSend=0):
try:
self.status='download Starting'
self.downloadInternal(self.url,dest_stream,segmentToStart,totalSegmentToSend)
except:
traceback.print_exc()
self.status='finished'
def downloadInternal(self,url,dest_stream ,segmentToStart=None,totalSegmentToSend=0):
global F4Mversion
try:
#dest_stream = self.out_stream
queryString=self.queryString
print 'segmentToStart',segmentToStart
if self.live or segmentToStart==0 or segmentToStart==None:
print 'writing metadata'#,len(self.metadata)
self._write_flv_header(dest_stream, self.metadata)
dest_stream.flush()
#elif segmentToStart>0 and not self.live:
# self._write_flv_header2(dest_stream)
# dest_stream.flush()
url=self.url
bootstrap, boot_info, fragments_list,total_frags=(self.bootstrap, self.boot_info, self.fragments_list,self.total_frags)
print boot_info, fragments_list,total_frags
self.status='bootstrap done'
self.status='file created'
self.downloaded_bytes = 0
self.bytes_in_disk = 0
self.frag_counter = 0
start = time.time()
frags_filenames = []
self.seqNumber=0
if segmentToStart and not self.live :
self.seqNumber=segmentToStart
if self.seqNumber>=total_frags:
self.seqNumber=total_frags-1
#for (seg_i, frag_i) in fragments_list:
#for seqNumber in range(0,len(fragments_list)):
self.segmentAvailable=0
frameSent=0
keyValue=""
keyData=None
firstPacket=True
remainingFrameData=None
decrypter=None
errors=0
file=0
lastIV=None
AKSession=None
while True:
#if not self.live:
# _write_flv_header2
try:
if self.g_stopEvent.isSet():
return
except: pass
seg_i, frag_i=fragments_list[self.seqNumber]
self.seqNumber+=1
frameSent+=1
name = u'Seg%d-Frag%d' % (seg_i, frag_i)
#print 'base_url',base_url,name
if AKSession:
name+=AKSession
url = self.base_url + name
if queryString and '?' not in url:
url+='?'+queryString
elif '?' in self.base_url:
url = self.base_url.split('?')[0] + name+'?'+self.base_url.split('?')[1]
#print(url),base_url,name
#frag_filename = u'%s-%s' % (tmpfilename, name)
#success = dl._do_download(frag_filename, {'url': url})
print 'downloading....',url
success=False
urlTry=0
while not success and urlTry<5:
success = self.getUrl(url,True)
if not success: xbmc.sleep(300)
urlTry+=1
print 'downloaded',not success==None,url
if not success:
return False
#with open(frag_filename, 'rb') as down:
if 1==1:
down_data = success#down.read()
reader = FlvReader(down_data)
while True:
_, box_type, box_data = reader.read_box_info()
print 'box_type',box_type,len(box_data)
#if box_type == b'afra':
# dest_stream.write(box_data)
# dest_stream.flush()
# break
if box_type == b'mdat':
isDrm=True if ord(box_data[0])&1 else False
boxlength=len(box_data)
seglen=0
file+=1
# if file>6: print 1/0
skip=False
doDecrypt=False
# print 'first byte',repr(box_data[0]),'kk'
isAkamaiEncrypted=True if ord(box_data[0])==11 or ord(box_data[0])==10 else False
if isAkamaiEncrypted:
# print 'Total MDAT count',len(box_data), len(box_data)%16
_loc8_ = ord(box_data[1]) << 16 | ord(box_data[2]) << 8 | ord(box_data[3]) & 255;
_loc9_ = box_data[11:11+_loc8_]
# print 'this is encrypted',len(_loc9_),_loc8_,repr(box_data[1:70])
keyExists,Key,dataread,lastIV=self.readAKKey(_loc9_)
if keyExists:
# print 'key exists and its len is ',_loc8_,repr(Key)
doDecrypt=True
keyValueNew=Key.split('key_')[1]
# print 'previous key is'+keyValue,'new key is',keyValueNew
if keyValue=="":
keyValue="_"+keyValueNew
AKSession=keyValue
keyurl = self.keybase_url +keyValueNew
if queryString and '?' not in keyurl:
keyurl+='?'+queryString+'&guid=CHRLRCMRHGUD'
print 'the key url is ',keyurl,'thanks'
keyData=self.getUrl(keyurl,False)
skip=False
firstPacket=True
elif not keyValue=="_"+keyValueNew:
keyValue="_"+keyValueNew#take new key
AKSession=keyValue
keyurl = self.keybase_url +keyValueNew
if queryString and '?' not in keyurl:
keyurl+='?'+queryString+'&guid=CHRLRCMRHGUD'
keyData=self.getUrl(keyurl,False)
firstPacket=True
#todo decryptit! and put it in box_data
#print 'before skip'
if skip:
break;
if keyData:
print 'key data is',repr(keyData),len(keyData)
#do decrypt here. frame by frame
#now generate frames
#put remaining in remaining
#for each frame decrypt and write and flush
try:
frames=[]
# print 'before frames data', repr(box_data[0:70])
frames,remainingFrameData=self.getFrames(box_data,remainingFrameData)
# print 'after frames data first frame', repr(frames[0][0:70])
#print 'frames',frames
for frame in frames:
data=frame[1]
datalen=ord(data[1]) << 16 | ord(data[2]) << 8 | ord(data[3]) & 255;
preFrame=len(data)
#print 'samp>',len(data),datalen,ord(data[0]) ,'<samp'
if firstPacket:
firstPacket=False
# data=data[0:datalen]
#print 'first>',len(data),ord(data[0]),datalen,'<first'
# else:
if 1==1:
#if not not key frame then decrypt else
firstByte=ord(data[0])
frameHeader=data[:11]
framePad=data[11 + datalen:11 + datalen+4];
if firstByte==10 or firstByte==11:
if firstByte==10:
frameHeader = list(frameHeader)
frameHeader[0]=chr(8)
frameHeader=''.join(frameHeader)
if firstByte==11:
frameHeader = list(frameHeader)
frameHeader[0]=chr(9)
frameHeader=''.join(frameHeader)
data=data[11:11+datalen]
#print 'sub>',len(data),firstByte,datalen,datalen%16,len(data)%16 ,'<sub'
keyExistsNew,KeyNew,dataread,ignoreIV=self.readAKKey(data)
# print 'dataread',dataread,keyExistsNew,KeyNew,ignoreIV
try:
data=akhds.tagDecrypt(data,keyData)
except:
print 'decryption error'
errors+=1
traceback.print_exc()
if errors>10: print 1/0
# print 'pre return size %d, %d %d'%(len(frameHeader),len(data), len(framePad))
seglen1=self.addBytesToOutput(frameHeader,data,framePad,1,dest_stream)
seglen+=seglen1
# print 'pre frame %d, after %d'%(preFrame,seglen1)
else:
print 'hmm no 10 or 11?'
# print 'pre return size %d, %d %d'%(len(frameHeader),len(data), len(framePad))
seglen1=self.addBytesToOutput(frameHeader,None,None,1,dest_stream)
seglen+=seglen1
# print 'pre frame %d, after %d'%(preFrame,seglen1)
#est_stream.write(data)
#dest_stream.flush()
#dest_stream.write(self.decryptData(data,keyData))
#dest_stream.flush()
except:
print traceback.print_exc()
self.g_stopEvent.set()
else:
dest_stream.write(box_data)
dest_stream.flush()
print 'box length is %d and seg total is %d'%(boxlength,seglen)
break
# Using the following code may fix some videos, but
# only in mplayer, VLC won't play the sound.
# mdat_reader = FlvReader(box_data)
# media_type = mdat_reader.read_unsigned_char()
# while True:
# if mdat_reader.read_unsigned_char() == media_type:
# if mdat_reader.read_unsigned_char() == 0x00:
# break
# dest_stream.write(pack('!B', media_type))
# dest_stream.write(b'\x00')
# dest_stream.write(mdat_reader.read())
# break
self.status='play'
if self.seqNumber==len(fragments_list) or (totalSegmentToSend>0 and frameSent==totalSegmentToSend):
if not self.live:
break
self.seqNumber=0
#todo if the url not available then get manifest and get the data again
total_frags=None
try:
bootstrap, boot_info, fragments_list,total_frags=self.readBootStrapInfo(self.bootstrapURL,None,updateMode=True,lastSegment=seg_i, lastFragement=frag_i)
except:
traceback.print_exc()
pass
if total_frags==None:
break
del self.downloaded_bytes
del self.frag_counter
except:
traceback.print_exc()
return
def getBootStrapWithId (self,BSarray, id):
try:
for bs in BSarray:
print 'compare val is ',bs.attrib['id'], 'id', id
if bs.attrib['id']==id:
print 'gotcha'
return bs
except: pass
return None
def readBootStrapInfo(self,bootstrapUrl,bootStrapData, updateMode=False, lastFragement=None,lastSegment=None):
try:
retries=0
while retries<=10:
try:
if self.g_stopEvent.isSet():
print 'event is set. returning'
return
except: pass
if bootStrapData==None:
bootStrapData =self.getUrl(bootstrapUrl)
if bootStrapData==None:
retries+=1
continue
#print 'bootstrapData',len(bootStrapData)
bootstrap = bootStrapData#base64.b64decode(bootStrapData)#doc.findall(_add_ns('bootstrapInfo'))[0].text)
#print 'boot stream read done'
boot_info,self.live = read_bootstrap_info(bootstrap)
#print 'boot_info read done',boot_info
newFragement=None
if not lastFragement==None:
newFragement=lastFragement+1
fragments_list = build_fragments_list(boot_info,newFragement,self.live)
total_frags = len(fragments_list)
#print 'fragments_list',fragments_list, newFragement
#print lastSegment
if updateMode and (len(fragments_list)==0 or ( newFragement and newFragement>fragments_list[0][1])):
#todo check lastFragement to see if we got valid data
print 'retrying......'
bootStrapData=None
retries+=1
xbmc.sleep(2000)
continue
return bootstrap, boot_info, fragments_list,total_frags
except:
traceback.print_exc()
def _pv_params(self, pvswf, pv):
"""Returns any parameters needed for Akamai HD player verification.
Algorithm originally documented by KSV, source:
http://stream-recorder.com/forum/showpost.php?p=43761&postcount=13
"""
#return pv;
#pv="ZXhwPTE0NDAxNTUyODJ+YWNsPSUyZip+ZGF0YT1wdmMsc35obWFjPWMyZjk4MmVjZjFjODQyM2IzZDkxMzExMjNmY2ExN2U4Y2UwMjU4NWFhODg3MWFjYzM5YmI0MmVlNTYxYzM5ODc="
# pv="ZXhwPTE0NDAzMjc3ODF+YWNsPSUyZip+ZGF0YT1wdmMsc35obWFjPTYyYTE2MzU2MTNjZTI4ZWI2MTg0MmRjYjFlZTZlYTYwYTA5NWUzZDczNTQ5MTQ1ZDVkNTc0M2M2Njk5MDJjNjY="
# pv="ZXhwPTE0Mzk2MDgzMTl+YWNsPSUyZip+ZGF0YT1wdmMsc35obWFjPTExYTJiNzQ4NjQyYmY1M2VlNzk5MzhhNTMzNjc1MTAzZjk2NWViOGVhODY4MzUwODkwZGM1MjVmNjI3ODM4MzQ="
try:
data, hdntl = pv.split(";")
except ValueError:
data = pv
hdntl = ""
print 'DATA IS',data
print 'hdntl IS',hdntl
if data=="": return hdntl
first_stage_msg=binascii.unhexlify('056377146640142763057567157640125041016376130175171220177717044510157134116364123221072012122137150351003442036164015632157517073355151142067436113220106435137171174171127530157325044270025004')
first_stage_key=data
hash_data=""
if pvswf is None:
print 'swf required for pv2 decryption'
pvswf=""
if pvswf.startswith('http'):
swf = self.getUrl(pvswf,False)
hash = hashlib.sha256()
hash.update(self.swfdecompress(swf))
hash = base64.b64encode(hash.digest()).decode("ascii")
else:
hash=pvswf # the incoming is the hash!
print 'hash',hash
# shouldhash="AFe6zmDCNudrcFNyePaAzAn/KRT5ES99ql4SNqldM2I="
# if shouldhash==hash:
# print '**************HASH MATCH ********************'
# else:
# print '********* NOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTT**********'
second_stage_key = hmac.new(first_stage_key,first_stage_msg , sha256).digest()
# second_stage_data=hash_data #
second_stage_data=base64.b64decode( hash)
buffer="106,45,165,20,106,45,165,20,38,45,165,87,11,98,228,14,107,89,233,25,101,36,223,76,97,28,175,18,23,86,164,6,1,56,157,64,123,58,186,100,54,34,184,14,3,44,164,20,106,6,222,84,122,45,165,20,106,28,196,84,122,111,183,84,122,45,165,20,106,45,165,20,106,45,165,20,106,45,165,20,106,45,165,20,106,45,165,20,106,45,165,20,106,45,165,20"
buffer=buffer.split(',');
second_stage_data+=chr(int(buffer[len(second_stage_data)]))
# print len(second_stage_data),repr(second_stage_data)
third_stage_key= hmac.new(second_stage_key, second_stage_data, sha256).digest()
#hash=shouldhash
msg = "exp=9999999999~acl=%2f%2a~data={0}!{1}".format(data, hash)
auth = hmac.new(third_stage_key, msg.encode("ascii"), sha256)
pvtoken = "{0}~hmac={1}".format(msg, auth.hexdigest())
# The "hdntl" parameter can be accepted as a cookie or passed in the
# query string, but the "pvtoken" parameter can only be in the query
# string
print 'pvtoken',pvtoken
params=urllib.urlencode({'pvtoken':pvtoken})+'&'+hdntl+'&hdcore=2.11.3'
params=params.replace('%2B','+')
params=params.replace('%2F','/')
print params
return params
def swfdecompress(self,data):
if data[:3] == b"CWS":
data = b"F" + data[1:8] + zlib.decompress(data[8:])
return data
|
gpl-2.0
| -2,045,013,516,949,013,200
| 42.145133
| 548
| 0.494427
| false
| 4.142679
| false
| false
| false
|
lluisball/godenerg
|
axpert/charger.py
|
1
|
2546
|
from time import sleep
from datetime import datetime, timedelta
from axpert.protocol import (
CMD_REL, parse_inverter_conf, empty_inverter_conf, CmdSpec
)
from axpert.settings import charger_conf
from axpert.datalogger import get_avg_last
FLOAT_VOL = charger_conf['float_voltage']
ABSORB_VOL = charger_conf['absorbtion_voltage']
ABSORB_AMPS_THRESHOLD = charger_conf['absorbtion_amps_threshold']
CHARGE_START_CHECK = charger_conf['charge_check_start']
CHARGE_END_CHECK = charger_conf['charge_check_end']
def get_inverter_conf(executor):
try:
response = executor(CMD_REL.get('settings'))
return parse_inverter_conf(response.data)
except:
return empty_inverter_conf()
def set_float_volts_to(log, executor, target):
try:
log.info('Changing float charge setting to %.1f' % target)
executor(CmdSpec(code='PBFT', size=11, val='%.1f'% target, json=None))
except Exception as e:
log.error('Could not set the float charge setting')
log.exception(e)
def manual_charger(log, executor):
def _stop_charge_check(now):
if now.hour in range(CHARGE_START_CHECK, CHARGE_END_CHECK + 1) \
and now.minute in [1, 10, 20, 30, 40, 50] \
and now.second in [1, 15, 30, 45]:
inverter_conf = get_inverter_conf(executor)
if not inverter_conf.float_volt \
or inverter_conf.float_volt == FLOAT_VOL:
return
avg_last_batt_volts, avg_last_batt_amps = get_avg_last(
log, minutes=30
)
if (ABSORB_VOL - 0.20) < avg_last_batt_volts < (ABSORB_VOL + 0.20)\
and avg_last_batt_amps < ABSORB_AMPS_THRESHOLD:
set_float_volts_to(log, executor, FLOAT_VOL)
def _start_charge_check(now):
if now.hour in [3, 4] \
and now.minute in [1, 3] \
and now.second in [1, 10, 20]:
inverter_conf = get_inverter_conf(executor)
if inverter_conf.float_volt \
and inverter_conf.float_volt == FLOAT_VOL:
set_float_volts_to(log, executor, ABSORB_VOL)
while True:
now = datetime.now()
try:
_start_charge_check(now)
_stop_charge_check(now)
except Exception as e:
log.error('Error in charger!')
log.error(e)
finally:
sleep(1)
|
gpl-3.0
| 5,399,676,398,103,707,000
| 34.361111
| 79
| 0.569521
| false
| 3.4083
| false
| false
| false
|
wooster/framework_depviz
|
depviz.py
|
1
|
10369
|
#!/usr/bin/env python
"""
The MIT License (MIT)
Copyright (c) 2013 Andrew Wooster
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import subprocess
import sys
def escape_arg(argument):
"""Escapes an argument to a command line utility."""
argument = argument.replace('\\', "\\\\").replace("'", "\'").replace('"', '\\"').replace("!", "\\!").replace("`", "\\`")
return "\"%s\"" % argument
def run_command(command, verbose=False):
"""Runs the command and returns the status and the output."""
if verbose:
sys.stderr.write("Running: %s\n" % command)
p = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdin, stdout = (p.stdin, p.stdout)
output = stdout.read()
output = output.strip("\n")
status = stdin.close()
stdout.close()
p.wait()
return (p.returncode, output)
DEPENDENCY_PRIVATE = 'Private'
DEPENDENCY_PUBLIC = 'Public'
DEPENDENCY_UNKNOWN = 'Unknown'
def dependencies_for_resolved_framework_path(lib_base, framework_path, dependencies, dep_to_visibility, exclude_dylibs=True):
def visibility_from_path(path):
visibility = DEPENDENCY_UNKNOWN
if '/PrivateFrameworks/' in path:
visibility = DEPENDENCY_PRIVATE
elif '/Frameworks/' in path:
visibility = DEPENDENCY_PUBLIC
return visibility
real_framework_path = framework_path
if not framework_path.startswith(lib_base):
real_framework_path = lib_base + framework_path
if not os.path.exists(real_framework_path):
real_framework_path = framework_path
if not os.path.exists(real_framework_path):
print >> sys.stderr, "Unable to find framework:", real_framework_path
return
(path, filename) = os.path.split(real_framework_path)
(base, ext) = os.path.splitext(filename)
(status, output) = run_command("otool -L %s" % escape_arg(real_framework_path))
lines = output.splitlines()
dep_to_visibility[base] = visibility_from_path(real_framework_path)
for line in lines:
if not line.startswith("\t"):
continue
if not "(" in line:
continue
parts = line.split("(")
if not len(parts) > 1:
continue
f_path = parts[0].strip()
(_, depname) = os.path.split(f_path)
if depname == base:
# Exclude self-dependency.
continue
visibility = visibility_from_path(f_path)
if exclude_dylibs and f_path.endswith("dylib"):
continue
should_recurse = (dep_to_visibility.get(depname) is None)
dep_to_visibility[depname] = visibility
dependencies.setdefault(base, [])
if not depname in dependencies[base]:
dependencies[base].append(depname)
if should_recurse:
dependencies_for_resolved_framework_path(lib_base, f_path, dependencies, dep_to_visibility, exclude_dylibs=exclude_dylibs)
def dependencies_for_framework_path(framework_path, dependencies, dep_to_visibility, exclude_dylibs=True):
(path, filename) = os.path.split(framework_path)
(base, ext) = os.path.splitext(filename)
lib_path = os.path.join(framework_path, base)
lib_parts = lib_path.split(os.sep)
lib_base_parts = []
for part in lib_parts:
if part == "System":
break
lib_base_parts.append(part)
lib_base = os.sep.join(lib_base_parts)
return dependencies_for_resolved_framework_path(lib_base, lib_path, dependencies, dep_to_visibility, exclude_dylibs=exclude_dylibs)
def dependencies_for_system_library_path(library_path):
entries = os.listdir(library_path)
if "/System/Library" not in library_path or "Frameworks" not in entries or "PrivateFrameworks" not in entries:
print >> sys.stderr, "Path doesn't look like it points to the System/Library folder of an SDK."
sys.exit(1)
dependencies = {}
dep_to_visibility = {}
def update_dependencies(dependencies, dep_to_visibility, library_path, f_path):
framework_paths = os.listdir(os.path.join(library_path, f_path))
for framework_path in framework_paths:
if not framework_path.endswith(".framework"):
continue
full_path = os.path.join(library_path, f_path, framework_path)
dependencies_for_framework_path(full_path, dependencies, dep_to_visibility)
update_dependencies(dependencies, dep_to_visibility, library_path, "Frameworks")
update_dependencies(dependencies, dep_to_visibility, library_path, "PrivateFrameworks")
return (dependencies, dep_to_visibility)
def dot_for_dependencies(dependencies, dep_to_visibility, framework_depnames=None):
l = []
l.append("digraph G {")
l.append("\tnode [shape=box];")
def color_for_visibility(visibility):
if visibility == DEPENDENCY_PRIVATE:
return "#FFD1E0"
elif visibility == DEPENDENCY_PUBLIC:
return "#D1FFD2"
else:
return "#FFFFFF"
if framework_depnames is None:
nodes = {}
seen_deps = []
i = 0
for framework_name, visibility in dep_to_visibility.iteritems():
if framework_name in seen_deps:
continue
nodename = "Node%d" % i
i += 1
nodes[framework_name] = nodename
seen_deps.append(framework_name)
color = color_for_visibility(dep_to_visibility[framework_name])
l.append("\t%s [label=\"%s\", fillcolor=\"%s\"];" % (nodename, framework_name, color))
for framework_name, deps in dependencies.iteritems():
if nodes.get(framework_name) is None:
print >> sys.stderr, "Unknown node", framework_name
continue
from_nodename = nodes[framework_name]
if len(deps) == 0:
l.append("\t\"%s\" -> {};" % framework_name)
for lib_name in deps:
to_nodename = nodes[lib_name]
l.append("\t%s -> %s; // %s -> %s" % (from_nodename, to_nodename, framework_name, lib_name))
else:
def gather_dependents(dependencies, framework_name, seen=None):
"""Get a list of all the frameworks wich depend on framework_name, recursively."""
results = []
if seen is None:
seen = []
for framework, deps in dependencies.iteritems():
if framework_name in deps:
if framework in seen:
continue
seen.append(framework)
# framework depends on framework_name
results.append(framework_name)
for result in gather_dependents(dependencies, framework, seen=seen):
results.append(result)
return list(set(results))
frameworks_to_print = []
for framework_depname in framework_depnames:
for f in gather_dependents(dependencies, framework_depname):
frameworks_to_print.append(f)
frameworks_to_print = list(set(frameworks_to_print))
nodes = {}
seen_deps = []
i = 0
for framework_name, visibility in dep_to_visibility.iteritems():
if framework_name in seen_deps:
continue
if framework_name not in frameworks_to_print:
continue
nodename = "Node%d" % i
i += 1
nodes[framework_name] = nodename
seen_deps.append(framework_name)
color = color_for_visibility(dep_to_visibility[framework_name])
l.append("\t%s [label=\"%s\", style=filled, fillcolor=\"%s\"];" % (nodename, framework_name, color))
for framework_name, deps in dependencies.iteritems():
if framework_name in frameworks_to_print:
if nodes.get(framework_name) is None:
print >> sys.stderr, "Unknown node", framework_name
continue
from_nodename = nodes[framework_name]
if len(deps) == 0:
l.append("\t\"%s\" -> {};" % framework_name)
for lib_name in deps:
if lib_name in frameworks_to_print:
to_nodename = nodes[lib_name]
l.append("\t%s -> %s; // %s -> %s" % (from_nodename, to_nodename, framework_name, lib_name))
l.append("}\n")
return "\n".join(l)
def main(library_path, framework_depnames=None):
library_path = os.path.expanduser(library_path)
(dependencies, dep_to_visibility) = dependencies_for_system_library_path(library_path)
dot_output = dot_for_dependencies(dependencies, dep_to_visibility, framework_depnames=framework_depnames)
print >> sys.stdout, dot_output
if __name__ == "__main__":
if len(sys.argv) < 2:
print >> sys.stderr, "Usage: %s [SDK System Library Path] [framework name ...]"
print >> sys.stderr, " Where the library path is like /System/Library"
print >> sys.stderr, " Where the framework name (optional) is one to determine what depends on it"
sys.exit(1)
framework_depnames = None
if len(sys.argv) > 2:
framework_depnames = sys.argv[2:]
main(sys.argv[1], framework_depnames=framework_depnames)
|
mit
| -6,358,658,463,639,979,000
| 43.311966
| 135
| 0.621564
| false
| 4.031493
| false
| false
| false
|
jsubpy/jsub
|
jsub/exts/repo/file_system.py
|
1
|
2428
|
import os
import json
import logging
import fcntl
from jsub.util import safe_mkdir
from jsub.util import safe_rmdir
from jsub.error import RepoReadError
from jsub.error import TaskNotFoundError
ID_FILENAME = 'id'
class FileSystem(object):
def __init__(self, param):
self.__jsub_dir = os.path.expanduser(param.get('taskDir', '~/jsub/'))
# self.__id_file = os.path.join(self.__jsub_dir, ID_FILENAME)
self.__logger = logging.getLogger('JSUB')
# self.__create_repo_dir()
self.__json_format = param.get('format', 'compact')
def save_task(self, data):
if 'id' not in data:
safe_mkdir(self.__jsub_dir)
data['id'] = self.__new_task_id()
safe_mkdir(os.path.join(self.__jsub_dir,str(data['id']),'taskInfo'))
task_path = os.path.join(self.__jsub_dir, str(data['id']),'taskInfo','repo')
data_str = self.__json_str(data)
with open(task_path, 'a+') as f:
fcntl.flock(f, fcntl.LOCK_EX)
f.seek(0)
f.truncate()
f.write(data_str)
def delete_task(self, task_id):
safe_rmdir(os.path.join(self.__jsub_dir,str(task_id)))
def find_by_id(self, task_id):
return self.task_data(task_id)
def find_by_ids(self, task_ids):
all_data = []
for task_id in task_ids:
try:
td = self.task_data(task_id)
all_data.append(td)
except RepoReadError as e:
self.__logger.debug(e)
return all_data
def all_task_data(self, order='asc'):
task_ids =[d for d in os.listdir(self.__jsub_dir) if os.path.isdir(os.path.join(self.__jsub_dir,d))]
task_ids.sort(key=int, reverse=(order=='desc'))
return self.find_by_ids(task_ids)
def task_data(self, task_id):
safe_mkdir(os.path.join(self.__jsub_dir,str(task_id),'taskInfo'))
task_path = os.path.join(self.__jsub_dir,str(task_id),'taskInfo','repo')
with open(task_path, 'a+') as f:
fcntl.flock(f, fcntl.LOCK_EX)
f.seek(0)
data_str = f.read()
try:
return json.loads(data_str)
except ValueError as e:
raise RepoReadError('JSON decode error on task %s: %s' % (task_id, e))
# def __create_repo_dir(self):
# safe_mkdir(self.__jsub_dir)
def __new_task_id(self):
task_ids =[int(d) for d in os.listdir(self.__jsub_dir) if os.path.isdir(os.path.join(self.__jsub_dir,d))]
if not task_ids:
return 1
task_ids.sort(key=int, reverse=True)
return(task_ids[0]+1)
def __json_str(self, data):
if self.__json_format == 'pretty':
return json.dumps(data, indent=2)
return json.dumps(data, separators=(',', ':'))
|
mit
| 5,301,751,721,617,016,000
| 27.564706
| 107
| 0.656919
| false
| 2.572034
| false
| false
| false
|
sumihai-tekindo/account_sicepat
|
sicepat_erp/invoice_line_exp_cost/invoice_line_exp_cost.py
|
1
|
7446
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Pambudi Satria (<https://github.com/pambudisatria>).
# @author Pambudi Satria <pambudi.satria@yahoo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import fields, models, api
from openerp.osv import osv
import openerp.addons.decimal_precision as dp
class account_invoice_line(models.Model):
_inherit = "account.invoice.line"
@api.one
@api.depends('price_unit', 'discount', 'invoice_line_tax_id', 'quantity',
'product_id', 'invoice_id.partner_id', 'invoice_id.currency_id')
def _compute_price(self):
price = self.price_unit * (1 - (self.discount or 0.0) / 100.0)
taxes = self.invoice_line_tax_id.compute_all(price, self.quantity, product=self.product_id, partner=self.invoice_id.partner_id)
tax_extra_ship_cost = self.invoice_line_tax_id.compute_all(self.extra_shipping_cost, 1, product=self.product_id, partner=self.invoice_id.partner_id)
tax_insurance_fee = self.invoice_line_tax_id.compute_all(self.insurance_fee, 1, product=self.product_id, partner=self.invoice_id.partner_id)
tax_admcost_insurance = self.invoice_line_tax_id.compute_all(self.admcost_insurance, 1, product=self.product_id, partner=self.invoice_id.partner_id)
tax_packing_cost = self.invoice_line_tax_id.compute_all(self.packing_cost, 1, product=self.product_id, partner=self.invoice_id.partner_id)
self.price_subtotal = taxes['total'] + tax_extra_ship_cost['total'] + tax_insurance_fee['total'] + tax_admcost_insurance['total'] + tax_packing_cost['total']
if self.invoice_id:
self.price_subtotal = self.invoice_id.currency_id.round(self.price_subtotal)
def _compute_insurance_fee(self):
insurance_fee = self.insurance_value*((0.2)/100)
extra_shipping_cost = fields.Float(string='Extra Shipping Cost', digits= dp.get_precision('Product Price'), default=0.0)
insurance_value = fields.Float(string='Insurance Value', digits= dp.get_precision('Product Price'), default=0.0)
insurance_fee = fields.Float(string='Insurance Fee', digits= dp.get_precision('Product Price'), default=0.0)
admcost_insurance = fields.Float(string='Cost Administration of Insurance', digits= dp.get_precision('Product Price'), default=0.0)
packing_cost = fields.Float(string='Packing Cost', digits= dp.get_precision('Product Price'), default=0.0)
class account_invoice_tax(models.Model):
_inherit = "account.invoice.tax"
@api.v8
def compute(self, invoice):
tax_grouped = {}
currency = invoice.currency_id.with_context(date=invoice.date_invoice or fields.Date.context_today(invoice))
company_currency = invoice.company_id.currency_id
for line in invoice.invoice_line:
taxes = line.invoice_line_tax_id.compute_all(
(line.price_unit * (1 - (line.discount or 0.0) / 100.0)),
line.quantity, line.product_id, invoice.partner_id)['taxes']
taxes += line.invoice_line_tax_id.compute_all(
line.extra_shipping_cost, 1, line.product_id, invoice.partner_id)['taxes']
taxes += line.invoice_line_tax_id.compute_all(
line.insurance_fee, 1, line.product_id, invoice.partner_id)['taxes']
taxes += line.invoice_line_tax_id.compute_all(
line.admcost_insurance, 1, line.product_id, invoice.partner_id)['taxes']
taxes += line.invoice_line_tax_id.compute_all(
line.packing_cost, 1, line.product_id, invoice.partner_id)['taxes']
for tax in taxes:
val = {
'invoice_id': invoice.id,
'name': tax['name'],
'amount': tax['amount'],
'manual': False,
'sequence': tax['sequence'],
'base': currency.round(tax['price_unit'] * line['quantity']),
}
if invoice.type in ('out_invoice','in_invoice'):
val['base_code_id'] = tax['base_code_id']
val['tax_code_id'] = tax['tax_code_id']
val['base_amount'] = currency.compute(val['base'] * tax['base_sign'], company_currency, round=False)
val['tax_amount'] = currency.compute(val['amount'] * tax['tax_sign'], company_currency, round=False)
val['account_id'] = tax['account_collected_id'] or line.account_id.id
val['account_analytic_id'] = tax['account_analytic_collected_id']
else:
val['base_code_id'] = tax['ref_base_code_id']
val['tax_code_id'] = tax['ref_tax_code_id']
val['base_amount'] = currency.compute(val['base'] * tax['ref_base_sign'], company_currency, round=False)
val['tax_amount'] = currency.compute(val['amount'] * tax['ref_tax_sign'], company_currency, round=False)
val['account_id'] = tax['account_paid_id'] or line.account_id.id
val['account_analytic_id'] = tax['account_analytic_paid_id']
# If the taxes generate moves on the same financial account as the invoice line
# and no default analytic account is defined at the tax level, propagate the
# analytic account from the invoice line to the tax line. This is necessary
# in situations were (part of) the taxes cannot be reclaimed,
# to ensure the tax move is allocated to the proper analytic account.
if not val.get('account_analytic_id') and line.account_analytic_id and val['account_id'] == line.account_id.id:
val['account_analytic_id'] = line.account_analytic_id.id
key = (val['tax_code_id'], val['base_code_id'], val['account_id'])
if not key in tax_grouped:
tax_grouped[key] = val
else:
tax_grouped[key]['base'] += val['base']
tax_grouped[key]['amount'] += val['amount']
tax_grouped[key]['base_amount'] += val['base_amount']
tax_grouped[key]['tax_amount'] += val['tax_amount']
for t in tax_grouped.values():
t['base'] = currency.round(t['base'])
t['amount'] = currency.round(t['amount'])
t['base_amount'] = currency.round(t['base_amount'])
t['tax_amount'] = currency.round(t['tax_amount'])
return tax_grouped
|
gpl-3.0
| -6,949,873,827,119,664,000
| 58.048387
| 165
| 0.592399
| false
| 3.734203
| false
| false
| false
|
jzerbe/taskifier
|
taskifier/internal/__init__.py
|
1
|
3830
|
from datetime import datetime
from django.core.exceptions import ObjectDoesNotExist
import json
from taskifier import const
from taskifier.models import Task, TaskOwner
from taskifier.internal.TaskPayloadHelper import TaskPayloadHelper
def DELETE(task_owner, task_id):
task = _get_task_by_id(task_id)
if task and _is_owner(task_owner, task):
task.delete()
return {const.KEY_ID: task_id,
const.KEY_SOURCE: "",
const.KEY_DEST: "",
const.KEY_CONTENT: "",
const.KEY_READY_TIME: ""}
return {const.KEY_RESP_STATUS: "ERROR",
const.KEY_RESP_STATUS_TEXT: "task not found or bad auth"}
def GET(task_owner, task_id):
task = _get_task_by_id(task_id)
if task and _is_owner(task_owner, task):
return {const.KEY_ID: task_id,
const.KEY_SOURCE: task.source,
const.KEY_DEST: task.dest,
const.KEY_CONTENT: task.content,
const.KEY_READY_TIME: _get_json_from_datetime(task.ready_time)}
return {const.KEY_RESP_STATUS: "ERROR",
const.KEY_RESP_STATUS_TEXT: "task not found or bad auth"}
def POST(task_owner, task_id, request_payload):
if task_owner is None:
return {const.KEY_RESP_STATUS: "ERROR",
const.KEY_RESP_STATUS_TEXT: "specified TaskOwner object not found"}
taskPayloadHelper = TaskPayloadHelper(request_payload)
if not taskPayloadHelper.is_valid() or taskPayloadHelper.is_duplicate():
return {const.KEY_RESP_STATUS: "ERROR",
const.KEY_RESP_STATUS_TEXT: "payload is invalid or already exists"}
if task_id is None:
task = Task(owner=task_owner,
source=taskPayloadHelper[const.KEY_SOURCE],
dest=taskPayloadHelper[const.KEY_DEST],
content=taskPayloadHelper[const.KEY_CONTENT],
ready_time=taskPayloadHelper.get_ready_datetime())
task.save()
task_id = task.id
else:
task = _get_task_by_id(task_id)
task.source = taskPayloadHelper[const.KEY_SOURCE]
task.dest = taskPayloadHelper[const.KEY_DEST]
task.content = taskPayloadHelper[const.KEY_CONTENT]
task.ready_time = taskPayloadHelper.get_ready_datetime()
task.save()
return {const.KEY_ID: task_id,
const.KEY_SOURCE: taskPayloadHelper[const.KEY_SOURCE],
const.KEY_DEST: taskPayloadHelper[const.KEY_DEST],
const.KEY_CONTENT: taskPayloadHelper[const.KEY_CONTENT],
const.KEY_READY_TIME: taskPayloadHelper[const.KEY_READY_TIME]}
def get_owner(owner_key):
query_set = TaskOwner.objects.filter(key=owner_key)
if query_set and (len(query_set) == 1):
return query_set[0]
else:
return None
def _get_json_from_datetime(obj):
dthandler = lambda obj: obj.isoformat() if isinstance(obj, datetime) else None
json_str = json.dumps(obj, default=dthandler)
json_str = json_str.replace('"', '')
json_str = _rreplace(json_str, "000", "Z")
return json_str
def _get_task_by_id(task_id):
if task_id:
task = None
try:
task = Task.objects.get(id=task_id)
except ObjectDoesNotExist:
task = None
return task
else:
return None
def _is_owner(task_owner, task):
if task and task_owner and isinstance(task, Task) and isinstance(task_owner, TaskOwner):
return (task_owner.key == task.owner.key)
else:
return False
def _rreplace(s, old, new):
offset = 0 - len(old)
remainder = s[:offset]
replace_array = s.split(remainder)
replace_confirm = replace_array[(len(replace_array) - 1)]
if replace_confirm == old:
return s[:-len(old)] + new
return s
|
bsd-3-clause
| -939,080,448,045,002,200
| 35.132075
| 92
| 0.620627
| false
| 3.630332
| false
| false
| false
|
nss350/magPy
|
core/windowSelector.py
|
1
|
17249
|
"""
Created on Thu Mar 24 08:18:04 2016
@author: npop
The window selector calculates which global windows to use
Calculates overlapping windows between given sites
This removes the burden on the upcoming processor
Upcoming processor can then simply get the data for the windows
And process them
"""
import os
from datetime import date, time, datetime, timedelta
# my classes
from spectrumReader import SpectrumReader
from windowMasker import WindowMasker
# utils
from utilsIO import *
from utilsWindow import *
class WindowSelector(object):
###################
### CONSTRUCTOR
##################
def __init__(self, proj, fs, decParams, winParams):
self.proj = proj
self.fs = fs
self.decParams = decParams
self.winParams = winParams
self.sites = []
# shared indices
self.sharedIndices = {}
# the masks to use for each site
self.siteMasks = {}
# weights to use for each site
self.siteWeights = {}
# the spec files for eac site at fs
self.siteSpecFolders = {}
self.siteSpecReaders = {}
# global index ranges for all of the spec files
self.siteSpecRanges = {}
# set of all global indices for each site
self.siteGlobalIndices = {}
self.prepend = "spectra"
# time constraints
# priority is datetimes
# then dates
# then times
self.datetimes = {}
self.dates = {}
self.times = {}
# final constraints saved in
self.datetimeConstraints = {}
# add a list for each decimation level
numLevels = self.decParams.getNumLevels()
for iDec in xrange(0, numLevels):
self.datetimes[iDec] = []
self.dates[iDec] = []
self.times[iDec] = []
self.datetimeConstraints[iDec] = []
###################
### GET FUNCTIONS
##################
def getSites(self):
return self.sites
def getSharedWindows(self):
return self.sharedIndices
def getSharedWindowsLevel(self, iDec):
return self.sharedIndices[iDec]
def getNumSharedWindows(self, iDec):
return len(self.sharedIndices[iDec])
def getWindowsForFreq(self, iDec, eIdx):
sharedIndices = self.getSharedWindowsLevel(iDec)
# now mask for the particular frequency - mask for each given site
for s in self.getSites():
for mask in self.getMasks()[s]:
# remove the masked windows from shared indices
sharedIndices = sharedIndices - mask.getMaskWindowsFreq(iDec, eIdx)
return sharedIndices
# do other helper, which calculates the number of non masked windows for the whole level
# this should significantly speed up calculation when constraints are applied
def getUnmaskedWindowsLevel(self, iDec):
indices = set()
evalFreq = self.getDecParams().getEvalFrequenciesForLevel(iDec)
for eIdx, eFreq in enumerate(evalFreq):
indices.update(self.getWindowsForFreq(iDec, eIdx))
return indices
def getSpecReaders(self):
return self.siteSpecReaders
def getSpecRanges(self):
return self.siteSpecRanges
def getGlobalIndices(self):
return self.siteGlobalIndices
def getSampleFreq(self):
return self.fs
def getPrepend(self):
return self.prepend
def getDecParams(self):
return self.decParams
def getWinParams(self):
return self.winParams
def getDatetimeConstraints(self):
self.calcDatetimeConstraints()
return self.datetimeConstraints
def getLevelDatetimeConstraints(self, iDec):
self.calcDatetimeConstraints()
return self.datetimeConstraints[iDec]
def getMasks(self):
return self.siteMasks
def getSpecReaderForWindow(self, site, iDec, iWin):
specRanges = self.getSpecRanges()[site][iDec]
specReaders = self.getSpecReaders()[site][iDec]
for sF in specRanges:
if iWin >= specRanges[sF][0] and iWin <= specRanges[sF][1]:
return sF, specReaders[sF]
# if here, no window found
self.printWarning("Shared window {}, decimation level {} does not appear in any files given the constraints applied".format(iWin, iDec))
return False, False
def getDataSize(self, iDec):
# return data size of first file
dataSize = -1
site = self.getSites()[0]
specReaders = self.getSpecReaders()[site][iDec]
for sF in specReaders:
return specReaders[sF].getDataSize()
###################
### SET FUNCTIONS
##################
def setSites(self, sites):
# first remove repeated sites
sitesSet = set(sites)
sites = list(sitesSet)
# now continue
self.sites = sites
for s in self.sites:
self.siteMasks[s] = []
self.siteSpecFolders[s] = []
self.siteSpecReaders[s] = {}
self.siteSpecRanges[s] = {}
# use sets to hold gIndices
# optimised to find intersections
self.siteGlobalIndices[s] = {}
# at the end, calculate global indices
self.calcGlobalIndices()
# this is the prepend for the spectra files
def setPrepend(prepend):
self.prepend = prepend
###################
### ADD CONSTRAINTS
##################
# for datetime constrains, dates take priority
def addDatetimeConstraint(self, start, stop):
numLevels = self.decParams.getNumLevels()
for iDec in xrange(0, numLevels):
self.addLevelDatetimeConstraint(start, stop, iDec)
def addLevelDatetimeConstraint(self, start, stop, iDec):
datetimeStart = datetime.strptime(start, '%Y-%m-%d %H:%M:%S')
datetimeStop = datetime.strptime(stop, '%Y-%m-%d %H:%M:%S')
self.datetimes[iDec].append([datetimeStart, datetimeStop])
def addDateConstraint(self, dateC):
numLevels = self.decParams.getNumLevels()
for iDec in xrange(0, numLevels):
self.addLevelDateConstraint(dateC, iDec)
def addLevelDateConstraint(self, dateC, iDec):
datetimeC = datetime.strptime(dateC, '%Y-%m-%d').date()
self.dates[iDec].append(datetimeC)
def addTimeConstraint(self, start, stop):
numLevels = self.decParams.getNumLevels()
for iDec in xrange(0, numLevels):
self.addLevelTimeConstraint(start, stop, iDec)
def addLevelTimeConstraint(self, start, stop, iDec):
timeStart = datetime.strptime(start, '%H:%M:%S').time()
timeStop = datetime.strptime(stop, '%H:%M:%S').time()
self.times[iDec].append([timeStart, timeStop])
# this is a mask for with values for each evaluation frequency
def addWindowMask(self, site, maskName, **kwargs):
winMasker = WindowMasker(self.proj, site, self.getSampleFreq(), self.getDecParams(), self.getWinParams())
winMasker.readWindowFile(maskName)
self.siteMasks[site].append(winMasker)
###################
### GET SHARED GLOBAL WINDOWS
### THIS DOES NOT INCLUDE ANY MASKS WHICH MIGHT BE APPLIED
##################
def calcSharedWindows(self):
if len(self.getSites()) == 0:
self.printWarning("No sites given to Window Selector. At least one site needs to be given.")
return False
# calculate datetime constraints
self.calcDatetimeConstraints()
# initialise the sharedIndices with a set from one site
sites = self.getSites()
siteInit = sites[0]
numLevels = self.getDecParams().getNumLevels()
for iDec in xrange(0, numLevels):
self.sharedIndices[iDec] = self.getGlobalIndices()[siteInit][iDec]
# now for each decimation level
# calculate the shared ones
for iDec in xrange(0, numLevels):
for s in self.getSites():
self.sharedIndices[iDec] = self.sharedIndices[iDec].intersection(self.getGlobalIndices()[s][iDec])
# apply time constraints
# time constraints should be formulate as a set
# and then, find the intersection again
for iDec in xrange(0, numLevels):
constraints = self.getLevelDatetimeConstraints(iDec)
if len(constraints) != 0:
datetimeIndices = set()
for dC in constraints:
gIndexStart, firstWindowStart = datetime2gIndex(self.proj.getRefTime(), dC[0], self.decParams.getSampleFreqLevel(iDec), self.winParams.getWindowSize(iDec), self.winParams.getOverlap(iDec))
gIndexEnd, firstWindowEnd = datetime2gIndex(self.proj.getRefTime(), dC[1], self.decParams.getSampleFreqLevel(iDec), self.winParams.getWindowSize(iDec), self.winParams.getOverlap(iDec))
gIndexEnd = gIndexEnd - 1 # as the function returns the next window starting after time
if gIndexEnd < gIndexStart:
gIndexEnd = gIndexStart
datetimeIndices.update(range(gIndexStart, gIndexEnd))
self.printText("Decimation level = {}. Applying date constraint {} - {}, global index constraint {} - {}".format(iDec, dC[0], dC[1], gIndexStart, gIndexEnd))
self.sharedIndices[iDec] = self.sharedIndices[iDec].intersection(datetimeIndices)
###################
### GET WINDOW RANGES
##################
def calcGlobalIndices(self):
# get all the spectra files with the correct sampling frequency
for s in self.getSites():
timeFilesFs = self.proj.getSiteTimeFilesFs(s, self.getSampleFreq())
specFiles = self.proj.getSiteSpectraFiles(s)
specFilesFs = []
for sF in specFiles:
if sF in timeFilesFs:
specFilesFs.append(sF)
self.siteSpecFolders[s] = specFilesFs
# for each decimation level
# loop through each of the spectra folders
# and find the global indices ranges for each decimation level
numLevels = self.decParams.getNumLevels()
for iDec in xrange(0, numLevels):
# get the dictionaries ready
self.siteSpecReaders[s][iDec] = {}
self.siteSpecRanges[s][iDec] = {}
self.siteGlobalIndices[s][iDec] = set()
# loop through spectra folders and figure out global indices
for sF in self.siteSpecFolders[s]:
specReader = SpectrumReader(os.path.join(self.proj.getSpecDataPathSite(s), sF))
check = specReader.openBinaryForReading(self.getPrepend(), iDec)
# see if file exists
# if not, continue
if not check:
continue
self.siteSpecReaders[s][iDec][sF] = specReader
globalRange = specReader.getGlobalRange()
self.siteSpecRanges[s][iDec][sF] = globalRange
# and save set of global indices
self.siteGlobalIndices[s][iDec].update(range(globalRange[0], globalRange[1]+1))
# Datetime constraints: priority is datetime, then dates, then times
def calcDatetimeConstraints(self):
# calculate site dates if required
siteDates = self.calcSiteDates()
# datetime constraints are for each decimation level
numLevels = self.decParams.getNumLevels()
for iDec in xrange(0, numLevels):
# calculate date and time constraints for each level
# begin with the datetime constraints - these have highest priority
self.datetimeConstraints[iDec] = self.datetimes[iDec]
# check to see whether any date and time constraints
if len(self.dates[iDec]) == 0 and len(self.times[iDec]) == 0:
continue
dateConstraints = []
if len(self.dates[iDec]) != 0:
# apply time constraints only on specified days
dateConstraints = self.dates[iDec]
else:
dateConstraints = siteDates
# finally, add the time constraints to the dates
# otherwise add the whole day
dateAndTimeConstraints = []
if len(self.times[iDec]) == 0:
# add whole days
for dC in dateConstraints:
start = datetime.combine(dC, time(0,0,0))
stop = datetime.combine(dC, time(23,59,59))
dateAndTimeConstraints.append([start, stop])
else:
# add each time for each day
for tC in self.times[iDec]:
for dC in dateConstraints:
start = datetime.combine(dC, tC[0])
stop = datetime.combine(dC, tC[1])
# check if this goes over a day
if tC[1] < tC[0]:
# then need to increment the day
dCNext = dC + timedelta(days=1)
stop = datetime.combine(dCNext, tC[1])
# append to constraints
dateAndTimeConstraints.append([start, stop])
# finally, combine datetimes and dateAndTimeConstraints
self.datetimeConstraints[iDec] = self.datetimeConstraints[iDec] + dateAndTimeConstraints
self.datetimeConstraints[iDec] = sorted(self.datetimeConstraints[iDec])
def calcSiteDates(self):
starts = []
stops = []
for s in self.getSites():
starts.append(self.proj.getSiteStart(s))
stops.append(self.proj.getSiteStop(s))
# need all the dates between
d1 = max(starts).date()
d2 = min(stops).date()
if d1 > d2:
self.printWarning("A site passed to the window selector does not overlap with any other sites. There will be no shared windows")
return
# now with d2 > d1
siteDates = []
delta = d2 - d1
# + 1 because inclusive of stop and start days
for i in range(delta.days + 1):
siteDates.append(d1 + timedelta(days=i))
return siteDates
###################
### DEBUG
##################
def printInfo(self):
self.printText("####################")
self.printText("WINDOW SELECTOR INFO BEGIN")
self.printText("####################")
self.printText("Sampling frequency [Hz] = {:.6f}".format(self.getSampleFreq()))
self.printText("Sites = {}".format(", ".join(self.getSites())))
self.printText("####################")
self.printText("WINDOW SELECTOR INFO END")
self.printText("####################")
def printAllSiteInfo(self):
for s in self.getSites():
self.printSiteInfo(s)
def printSiteInfo(self, site):
self.printText("####################")
self.printText("WINDOW SELECTOR SITE INFO BEGIN")
self.printText("####################")
self.printText("Sampling frequency [Hz] = {:.6f}".format(self.getSampleFreq()))
self.printText("Site = {}".format(site))
self.printText("Site global index information")
numLevels = self.decParams.getNumLevels()
for iDec in xrange(0, numLevels):
self.printText("Decimation Level = {:d}".format(iDec))
ranges = self.getSpecRanges()
for sF in sorted(list(ranges[site][iDec].keys())):
startTime1, endTime1 = gIndex2datetime(ranges[site][iDec][sF][0], self.proj.getRefTime(), self.getSampleFreq()/self.decParams.getDecFactor(iDec), self.winParams.getWindowSize(iDec), self.winParams.getOverlap(iDec))
startTime2, endTime2 = gIndex2datetime(ranges[site][iDec][sF][1], self.proj.getRefTime(), self.getSampleFreq()/self.decParams.getDecFactor(iDec), self.winParams.getWindowSize(iDec), self.winParams.getOverlap(iDec))
self.printText(
"Measurement file = {}\ttime range = {} - {}\tGlobal Indices Range = {:d} - {:d}".format(
sF, startTime1, endTime2, ranges[site][iDec][sF][0], ranges[site][iDec][sF][1]
)
)
self.printText("####################")
self.printText("WINDOW SELECTOR SITE INFO END")
self.printText("####################")
def printSharedIndices(self):
self.printText("####################")
self.printText("WINDOW SELECTOR SHARED INDICES INFO BEGIN")
numLevels = self.decParams.getNumLevels()
for iDec in xrange(0, numLevels):
self.printText("####################")
self.printText("Decimation Level = {:d}".format(iDec))
self.printText("Number of shared windows = {:d}".format(self.getNumSharedWindows(iDec)))
self.printText("Shared Window Indices: {}".format(list2ranges(self.getSharedWindows()[iDec])))
self.printText("NOTE: These are the shared windows at each decimation level. Windows for each evaluation frequency might vary depending on masks")
self.printText("####################")
self.printText("WINDOW SELECTOR SHARED INDICES INFO END")
self.printText("####################")
def printDatetimeConstraints(self):
# calculate datetime constraints
self.calcDatetimeConstraints()
# print out the info
self.printText("####################")
self.printText("WINDOW SELECTOR CONSTRAINT INFO BEGIN")
self.printText("####################")
self.printText("Datetime constraints")
numLevels = self.decParams.getNumLevels()
for iDec in xrange(0, numLevels):
self.printText("Decimation Level = {:d}".format(iDec))
for d in self.getLevelDatetimeConstraints(iDec):
self.printText("Constraint {} - {}".format(d[0], d[1]))
self.printText("####################")
self.printText("WINDOW SELECTOR CONSTRAINT INFO END")
self.printText("####################")
def printWindowMasks(self):
self.printText("####################")
self.printText("WINDOW SELECTOR MASK INFO BEGIN")
self.printText("####################")
for s in self.getSites():
self.printText("Site = {}".format(s))
if len(self.getMasks()[s]) == 0:
self.printText("\tNo masks for this site")
else:
for mask in self.getMasks()[s]:
self.printText("\tMask = {}".format(mask.getMaskName()))
self.printText("####################")
self.printText("WINDOW SELECTOR MASK INFO END")
self.printText("####################")
def printWindowsForFrequency(self):
self.printText("####################")
self.printText("WINDOW SELECTOR FREQUENCY WINDOWS INFO BEGIN")
self.printText("####################")
for iDec in xrange(0, self.getDecParams().getNumLevels()):
evalFreq = self.getDecParams().getEvalFrequenciesForLevel(iDec)
unmaskedWindows = self.getNumSharedWindows(iDec)
for eIdx, eFreq in enumerate(evalFreq):
maskedWindows = self.getWindowsForFreq(iDec, eIdx)
self.printText("Evaluation frequency = {:.6f}, shared windows = {:d}, windows after masking = {:d}".format(eFreq, unmaskedWindows, len(maskedWindows)))
self.printText("{}".format(list2ranges(maskedWindows)))
self.printText("####################")
self.printText("WINDOW SELECTOR FREQUENCY WINDOWS INFO END")
self.printText("####################")
def printText(self, infoStr):
generalPrint("Window Selector Info", infoStr)
def printWarning(self, warnStr):
warningPrint("Window Selector Warning", warnStr)
|
apache-2.0
| -2,665,271,720,281,101,300
| 35.856838
| 222
| 0.681663
| false
| 3.270573
| false
| false
| false
|
amahabal/PySeqsee
|
farg/core/util.py
|
1
|
3569
|
# Copyright (C) 2011, 2012 Abhijit Mahabal
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this
# program. If not, see <http://www.gnu.org/licenses/>
"""General utilities."""
import colorsys
import random
def HSVToColorString(hue, saturation, value):
"""Convert from HSV to RGB color space."""
rgb = ('%02x' % (255.0 * x) for x in colorsys.hsv_to_rgb(hue, saturation, value))
return '#' + ''.join(rgb)
def Toss(x):
"""x is a number between 0 and 1. Returns true with probability x."""
return random.uniform(0, 1) <= x
def ChooseAboutN(n, choices):
"""Choose in a way that the expected number of choices is n.
Args:
n: The expected number of responses.
choices: an iterable of 2-tuples, where the second value is the weight.
An example to show how it works: let choices contain 5 things with weights 10, 20, 30
40, and 50 (thus summing to 150), and let n=3. Then we will keep the first item in the
output with probability 3 * 10/150 (i.e., 20%).
Returns:
A list of a roughly n-sized subset of choices.
"""
choices = list(choices) # Needed since we iterate twice over the iterable.
total = sum(w for _c, w in choices)
return [x[0] for x in choices if Toss(1.0 * n * x[1] / total)]
def WeightedChoice(choices):
"""Chooses an item, biased by weight.
Args:
choices: an iterable of 2-tuples, where the second value is the weight.
Returns:
An element of choices.
"""
choices = list(choices) # Needed since we iterate twice over the iterable.
total = sum(weight for item, weight in choices)
random_sum = random.uniform(0, total)
upto = 0
for item, weight in choices:
if upto + weight > random_sum:
return item
upto += weight
assert False, "Shouldn't get here"
def SelectWeightedByActivation(ltm, choices):
"""Given an ltm and nodes in ltm, chooses one biased by activation."""
choices = ((x, ltm.GetNode(content=x).GetActivation(current_time=0)) for x in choices)
return WeightedChoice(choices)
def UnweightedChoice(choices):
"""Chooses one item uniformly randomly from is an iterable."""
choices = list(choices) # Needed since we need to choose nth element and need length.
random_choice = random.uniform(0, len(choices))
return choices[int(random_choice)]
def WeightedShuffle(choices):
"""Shuffle items by weight.
Args:
choices: an iterable of 2-tuples, where the second value is the weight.
Yields:
Repeatedly yields first elements of the 2-tuple, resulting, when complete, in a shuffle.
"""
total = sum(weight for item, weight in choices)
while total > 0:
random_val = random.uniform(0, total)
upto = 0
for idx, choice in enumerate(choices):
item, weight = choice
if upto + weight > random_val:
total -= weight
choices = choices[0:idx] + choices[idx + 1:]
yield item
continue
upto += weight
def Squash(val, cap):
"""Use a sigmoidal squashing function to squash to 100."""
if val < 1:
return val
return cap * val / (cap - 1.0 + val)
|
gpl-3.0
| -742,354,667,693,318,000
| 32.046296
| 93
| 0.692351
| false
| 3.656762
| false
| false
| false
|
lbjay/cds-invenio
|
modules/bibformat/lib/bibformat_bfx_engine_config.py
|
1
|
5843
|
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
"""BibFormat BFX engine configuration."""
__revision__ = "$Id$"
import os
from invenio.config import CFG_ETCDIR
CFG_BIBFORMAT_BFX_TEMPLATES_PATH = "%s%sbibformat%sformat_templates" % (CFG_ETCDIR, os.sep, os.sep)
CFG_BIBFORMAT_BFX_FORMAT_TEMPLATE_EXTENSION = "bfx"
CFG_BIBFORMAT_BFX_ELEMENT_NAMESPACE = "http://cdsware.cern.ch/invenio/"
CFG_BIBFORMAT_BFX_LABEL_DEFINITIONS = {
#record is a reserved keyword, don't use it
#define one or more addresses for each name or zero if you plan to define them later
'controlfield': [r'/???'],
'datafield': [r'/?????'],
'datafield.subfield': [r'datafield/?'],
'recid': [r'/001'],
'article_id': [],
'language': [r'/041__/a'],
'title': [r'/245__/a'],
'subtitle': [r'/245__/b'],
'secondary_title': [r'/773__/p'],
'first_author': [r'/100__/a'],
'author': [r'/100__/a',
r'/700__/a'],
'author.surname': [r'author#(?P<value>.*),[ ]*(.*)'],
'author.names': [r'author#(.*),[ ]*(?P<value>.*)'],
'abstract': [r'/520__/a'],
'publisher': [r'/260__/b'],
'publisher_location': [r'/260__/a'],
'issn': [r'/022__/a'],
'doi': [r'/773__/a'],
'journal_name_long': [r'/222__/a',
r'/210__/a',
r'/773__/p',
r'/909C4/p'],
'journal_name_short': [r'/210__/a',
r'/773__/p',
r'/909C4/p'],
'journal_name': [r'/773__/p',
r'/909C4/p'],
'journal_volume': [r'/773__/v',
r'/909C4/v'],
'journal_issue': [r'/773__/n'],
'pages': [r'/773__/c',
r'/909C4/c'],
'first_page': [r'/773__/c#(?P<value>\d*)-(\d*)',
r'/909C4/c#(?P<value>\d*)-(\d*)'],
'last_page': [r'/773__/c#(\d*)-(?P<value>\d*)',
r'/909C4/c#(\d*)-(?P<value>\d*)'],
'date': [r'/260__/c'],
'year': [r'/773__/y#(.*)(?P<value>\d\d\d\d).*',
r'/260__/c#(.*)(?P<value>\d\d\d\d).*',
r'/925__/a#(.*)(?P<value>\d\d\d\d).*',
r'/909C4/y'],
'doc_type': [r'/980__/a'],
'doc_status': [r'/980__/c'],
'uri': [r'/8564_/u',
r'/8564_/q'],
'subject': [r'/65017/a'],
'keyword': [r'/6531_/a'],
'day': [],
'month': [],
'creation_date': [],
'reference': []
}
CFG_BIBFORMAT_BFX_ERROR_MESSAGES = \
{
'ERR_BFX_TEMPLATE_REF_NO_NAME' : 'Error: Missing attribute "name" in TEMPLATE_REF.',
'ERR_BFX_TEMPLATE_NOT_FOUND' : 'Error: Template %s not found.',
'ERR_BFX_ELEMENT_NO_NAME' : 'Error: Missing attribute "name" in ELEMENT.',
'ERR_BFX_FIELD_NO_NAME' : 'Error: Missing attribute "name" in FIELD.',
'ERR_BFX_LOOP_NO_OBJECT' : 'Error: Missing attribute "object" in LOOP.',
'ERR_BFX_NO_SUCH_FIELD' : 'Error: Field %s is not defined',
'ERR_BFX_IF_NO_NAME' : 'Error: Missing attrbute "name" in IF.',
'ERR_BFX_TEXT_NO_VALUE' : 'Error: Missing attribute "value" in TEXT.',
'ERR_BFX_INVALID_RE' : 'Error: Invalid regular expression: %s',
'ERR_BFX_INVALID_OPERATOR_NAME' : 'Error: Name %s is not recognised as a valid operator name.',
'ERR_BFX_INVALID_DISPLAY_TYPE' : 'Error: Invalid display type. Must be one of: value, tag, ind1, ind2, code; received: %s',
'ERR_BFX_IF_WRONG_SYNTAX' : 'Error: Invalid syntax of IF statement.',
'ERR_BFX_DUPLICATE_NAME' : 'Error: Duplicate name: %s.',
'ERR_BFX_TEMPLATE_NO_NAME' : 'Error: No name defined for the template.',
'ERR_BFX_NO_TEMPLATES_FOUND' : 'Error: No templates found in the document.',
'ERR_BFX_TOO_MANY_TEMPLATES' : 'Error: More than one templates found in the document. No format found.'
}
CFG_BIBFORMAT_BFX_WARNING_MESSAGES = \
{
'WRN_BFX_TEMPLATE_NO_DESCRIPTION' : 'Warning: No description entered for the template.',
'WRN_BFX_TEMPLATE_NO_CONTENT' : 'Warning: No content type specified for the template. Using default: text/xml.',
'WRN_BFX_NO_FORMAT_FOUND' : 'Warning: No format found. Will look for a default template.'
}
|
gpl-2.0
| -8,086,134,669,331,546,000
| 49.808696
| 146
| 0.479719
| false
| 3.445165
| false
| false
| false
|
TurboTurtle/sos
|
sos/collector/__init__.py
|
1
|
54982
|
# Copyright Red Hat 2020, Jake Hunsaker <jhunsake@redhat.com>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
import fnmatch
import inspect
import json
import os
import random
import re
import string
import socket
import shutil
import subprocess
import sys
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
from getpass import getpass
from pathlib import Path
from pipes import quote
from textwrap import fill
from sos.cleaner import SoSCleaner
from sos.collector.sosnode import SosNode
from sos.collector.exceptions import ControlPersistUnsupportedException
from sos.options import ClusterOption
from sos.component import SoSComponent
from sos import __version__
COLLECTOR_CONFIG_DIR = '/etc/sos/groups.d'
class SoSCollector(SoSComponent):
"""Collector is the formerly standalone sos-collector project, brought into
sos natively in 4.0
It is meant to collect reports from an arbitrary number of remote nodes,
as well as the localhost, at the same time. These nodes may be either user
defined, defined by some clustering software, or both.
"""
desc = 'Collect an sos report from multiple nodes simultaneously'
arg_defaults = {
'all_logs': False,
'alloptions': False,
'allow_system_changes': False,
'become_root': False,
'case_id': False,
'chroot': 'auto',
'clean': False,
'cluster_options': [],
'cluster_type': None,
'domains': [],
'enable_plugins': [],
'encrypt_key': '',
'encrypt_pass': '',
'group': None,
'image': '',
'jobs': 4,
'keywords': [],
'label': '',
'list_options': False,
'log_size': 0,
'map_file': '/etc/sos/cleaner/default_mapping',
'master': '',
'nodes': [],
'no_env_vars': False,
'no_local': False,
'nopasswd_sudo': False,
'no_pkg_check': False,
'no_update': False,
'only_plugins': [],
'password': False,
'password_per_node': False,
'plugin_options': [],
'plugin_timeout': None,
'preset': '',
'save_group': '',
'since': '',
'skip_commands': [],
'skip_files': [],
'skip_plugins': [],
'sos_opt_line': '',
'ssh_key': '',
'ssh_port': 22,
'ssh_user': 'root',
'timeout': 600,
'verify': False,
'usernames': [],
'upload': False,
'upload_url': None,
'upload_directory': None,
'upload_user': None,
'upload_pass': None,
}
def __init__(self, parser, parsed_args, cmdline_args):
super(SoSCollector, self).__init__(parser, parsed_args, cmdline_args)
os.umask(0o77)
self.client_list = []
self.node_list = []
self.master = False
self.retrieved = 0
self.cluster = None
self.cluster_type = None
# add manifest section for collect
self.manifest.components.add_section('collect')
# shorthand reference
self.collect_md = self.manifest.components.collect
# placeholders in manifest organization
self.collect_md.add_field('cluster_type', 'none')
self.collect_md.add_list('node_list')
# add a place to set/get the sudo password, but do not expose it via
# the CLI, because security is a thing
setattr(self.opts, 'sudo_pw', '')
# get the local hostname and addresses to filter from results later
self.hostname = socket.gethostname()
try:
self.ip_addrs = list(set([
i[4][0] for i in socket.getaddrinfo(socket.gethostname(), None)
]))
except Exception:
# this is almost always a DNS issue with reverse resolution
# set a safe fallback and log the issue
self.log_error(
"Could not get a list of IP addresses from this hostnamne. "
"This may indicate a DNS issue in your environment"
)
self.ip_addrs = ['127.0.0.1']
self._parse_options()
self.clusters = self.load_clusters()
if not self.opts.list_options:
try:
self.parse_node_strings()
self.parse_cluster_options()
self._check_for_control_persist()
self.log_debug('Executing %s' % ' '.join(s for s in sys.argv))
self.log_debug("Found cluster profiles: %s"
% self.clusters.keys())
self.verify_cluster_options()
except KeyboardInterrupt:
self.exit('Exiting on user cancel', 130)
except Exception:
raise
def load_clusters(self):
"""Loads all cluster types supported by the local installation for
future comparison and/or use
"""
import sos.collector.clusters
package = sos.collector.clusters
supported_clusters = {}
clusters = self._load_modules(package, 'clusters')
for cluster in clusters:
supported_clusters[cluster[0]] = cluster[1](self.commons)
return supported_clusters
def _load_modules(self, package, submod):
"""Helper to import cluster and host types"""
modules = []
for path in package.__path__:
if os.path.isdir(path):
modules.extend(self._find_modules_in_path(path, submod))
return modules
def _find_modules_in_path(self, path, modulename):
"""Given a path and a module name, find everything that can be imported
and then import it
path - the filesystem path of the package
modulename - the name of the module in the package
E.G. a path of 'clusters', and a modulename of 'ovirt' equates to
importing sos.collector.clusters.ovirt
"""
modules = []
if os.path.exists(path):
for pyfile in sorted(os.listdir(path)):
if not pyfile.endswith('.py'):
continue
if '__' in pyfile:
continue
fname, ext = os.path.splitext(pyfile)
modname = 'sos.collector.%s.%s' % (modulename, fname)
modules.extend(self._import_modules(modname))
return modules
def _import_modules(self, modname):
"""Import and return all found classes in a module"""
mod_short_name = modname.split('.')[2]
module = __import__(modname, globals(), locals(), [mod_short_name])
modules = inspect.getmembers(module, inspect.isclass)
for mod in modules:
if mod[0] in ('SosHost', 'Cluster'):
modules.remove(mod)
return modules
def parse_node_strings(self):
"""Parses the given --nodes option(s) to properly format the regex
list that we use. We cannot blindly split on ',' chars since it is a
valid regex character, so we need to scan along the given strings and
check at each comma if we should use the preceeding string by itself
or not, based on if there is a valid regex at that index.
"""
if not self.opts.nodes:
return
nodes = []
if not isinstance(self.opts.nodes, list):
self.opts.nodes = [self.opts.nodes]
for node in self.opts.nodes:
idxs = [i for i, m in enumerate(node) if m == ',']
idxs.append(len(node))
start = 0
pos = 0
for idx in idxs:
try:
pos = idx
reg = node[start:idx]
re.compile(re.escape(reg))
# make sure we aren't splitting a regex value
if '[' in reg and ']' not in reg:
continue
nodes.append(reg.lstrip(','))
start = idx
except re.error:
continue
if pos != len(node):
nodes.append(node[pos+1:])
self.opts.nodes = nodes
@classmethod
def add_parser_options(cls, parser):
# Add the supported report passthru options to a group for logical
# grouping in --help display
sos_grp = parser.add_argument_group(
'Report Passthru Options',
'These options control how report is run on nodes'
)
sos_grp.add_argument('-a', '--alloptions', action='store_true',
help='Enable all sos report options')
sos_grp.add_argument('--all-logs', action='store_true',
help='Collect logs regardless of size')
sos_grp.add_argument('--allow-system-changes', action='store_true',
default=False,
help=('Allow sosreport to run commands that may '
'alter system state'))
sos_grp.add_argument('--chroot', default='',
choices=['auto', 'always', 'never'],
help="chroot executed commands to SYSROOT")
sos_grp.add_argument('-e', '--enable-plugins', action="extend",
help='Enable specific plugins for sosreport')
sos_grp.add_argument('-k', '--plugin-options', action="extend",
help='Plugin option as plugname.option=value')
sos_grp.add_argument('--log-size', default=0, type=int,
help='Limit the size of individual logs (in MiB)')
sos_grp.add_argument('-n', '--skip-plugins', action="extend",
help='Skip these plugins')
sos_grp.add_argument('-o', '--only-plugins', action="extend",
default=[],
help='Run these plugins only')
sos_grp.add_argument('--no-env-vars', action='store_true',
default=False,
help='Do not collect env vars in sosreports')
sos_grp.add_argument('--plugin-timeout', type=int, default=None,
help='Set the global plugin timeout value')
sos_grp.add_argument('--since', default=None,
help=('Escapes archived files older than date. '
'This will also affect --all-logs. '
'Format: YYYYMMDD[HHMMSS]'))
sos_grp.add_argument('--skip-commands', default=[], action='extend',
dest='skip_commands',
help="do not execute these commands")
sos_grp.add_argument('--skip-files', default=[], action='extend',
dest='skip_files',
help="do not collect these files")
sos_grp.add_argument('--verify', action="store_true",
help='perform pkg verification during collection')
# Add the collector specific options to a separate group to keep
# everything organized
collect_grp = parser.add_argument_group(
'Collector Options',
'These options control how collect runs locally'
)
collect_grp.add_argument('-b', '--become', action='store_true',
dest='become_root',
help='Become root on the remote nodes')
collect_grp.add_argument('--case-id', help='Specify case number')
collect_grp.add_argument('--cluster-type',
help='Specify a type of cluster profile')
collect_grp.add_argument('-c', '--cluster-option',
dest='cluster_options', action='append',
help=('Specify a cluster options used by a '
'profile and takes the form of '
'cluster.option=value'))
collect_grp.add_argument('--group', default=None,
help='Use a predefined group JSON file')
collect_grp.add_argument('--save-group', default='',
help='Save a resulting node list to a group')
collect_grp.add_argument('--image',
help=('Specify the container image to use for'
' containerized hosts.'))
collect_grp.add_argument('-i', '--ssh-key', help='Specify an ssh key')
collect_grp.add_argument('-j', '--jobs', default=4, type=int,
help='Number of concurrent nodes to collect')
collect_grp.add_argument('-l', '--list-options', action="store_true",
help='List options available for profiles')
collect_grp.add_argument('--label',
help='Assign a label to the archives')
collect_grp.add_argument('--master', help='Specify a master node')
collect_grp.add_argument('--nopasswd-sudo', action='store_true',
help='Use passwordless sudo on nodes')
collect_grp.add_argument('--nodes', action="append",
help=('Provide a comma delimited list of '
'nodes, or a regex to match against'))
collect_grp.add_argument('--no-pkg-check', action='store_true',
help=('Do not run package checks. Use this '
'with --cluster-type if there are rpm '
'or apt issues on node'))
collect_grp.add_argument('--no-local', action='store_true',
help='Do not collect a report from localhost')
collect_grp.add_argument('-p', '--ssh-port', type=int,
help='Specify SSH port for all nodes')
collect_grp.add_argument('--password', action='store_true',
default=False,
help='Prompt for user password for nodes')
collect_grp.add_argument('--password-per-node', action='store_true',
default=False,
help='Prompt for password for each node')
collect_grp.add_argument('--preset', default='', required=False,
help='Specify a sos preset to use')
collect_grp.add_argument('--sos-cmd', dest='sos_opt_line',
help=('Manually specify the commandline '
'for sos report on nodes'))
collect_grp.add_argument('--ssh-user',
help='Specify an SSH user. Default root')
collect_grp.add_argument('--timeout', type=int, required=False,
help='Timeout for sosreport on each node.')
collect_grp.add_argument("--upload", action="store_true",
default=False,
help="Upload archive to a policy-default "
"location")
collect_grp.add_argument("--upload-url", default=None,
help="Upload the archive to specified server")
collect_grp.add_argument("--upload-directory", default=None,
help="Specify upload directory for archive")
collect_grp.add_argument("--upload-user", default=None,
help="Username to authenticate with")
collect_grp.add_argument("--upload-pass", default=None,
help="Password to authenticate with")
# Group the cleaner options together
cleaner_grp = parser.add_argument_group(
'Cleaner/Masking Options',
'These options control how data obfuscation is performed'
)
cleaner_grp.add_argument('--clean', '--cleaner', '--mask',
dest='clean',
default=False, action='store_true',
help='Obfuscate sensistive information')
cleaner_grp.add_argument('--domains', dest='domains', default=[],
action='extend',
help='Additional domain names to obfuscate')
cleaner_grp.add_argument('--keywords', action='extend', default=[],
dest='keywords',
help='List of keywords to obfuscate')
cleaner_grp.add_argument('--no-update', action='store_true',
default=False, dest='no_update',
help='Do not update the default cleaner map')
cleaner_grp.add_argument('--map', dest='map_file',
default='/etc/sos/cleaner/default_mapping',
help=('Provide a previously generated mapping'
' file for obfuscation'))
cleaner_grp.add_argument('--usernames', dest='usernames', default=[],
action='extend',
help='List of usernames to obfuscate')
def _check_for_control_persist(self):
"""Checks to see if the local system supported SSH ControlPersist.
ControlPersist allows OpenSSH to keep a single open connection to a
remote host rather than building a new session each time. This is the
same feature that Ansible uses in place of paramiko, which we have a
need to drop in sos-collector.
This check relies on feedback from the ssh binary. The command being
run should always generate stderr output, but depending on what that
output reads we can determine if ControlPersist is supported or not.
For our purposes, a host that does not support ControlPersist is not
able to run sos-collector.
Returns
True if ControlPersist is supported, else raise Exception.
"""
ssh_cmd = ['ssh', '-o', 'ControlPersist']
cmd = subprocess.Popen(ssh_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = cmd.communicate()
err = err.decode('utf-8')
if 'Bad configuration option' in err or 'Usage:' in err:
raise ControlPersistUnsupportedException
return True
def exit(self, msg, error=1):
"""Used to safely terminate if sos-collector encounters an error"""
self.log_error(msg)
try:
self.close_all_connections()
except Exception:
pass
self.cleanup()
sys.exit(error)
def _parse_options(self):
"""From commandline options, defaults, etc... build a set of commons
to hand to other collector mechanisms
"""
self.commons = {
'cmdlineopts': self.opts,
'need_sudo': True if self.opts.ssh_user != 'root' else False,
'tmpdir': self.tmpdir,
'hostlen': len(self.opts.master) or len(self.hostname),
'policy': self.policy
}
def parse_cluster_options(self):
opts = []
if not isinstance(self.opts.cluster_options, list):
self.opts.cluster_options = [self.opts.cluster_options]
if self.opts.cluster_options:
for option in self.opts.cluster_options:
cluster = option.split('.')[0]
name = option.split('.')[1].split('=')[0]
try:
# there are no instances currently where any cluster option
# should contain a legitimate space.
value = option.split('=')[1].split()[0]
except IndexError:
# conversion to boolean is handled during validation
value = 'True'
opts.append(
ClusterOption(name, value, value.__class__, cluster)
)
self.opts.cluster_options = opts
def verify_cluster_options(self):
"""Verify that requested cluster options exist"""
if self.opts.cluster_options:
for opt in self.opts.cluster_options:
match = False
for clust in self.clusters:
for option in self.clusters[clust].options:
if opt.name == option.name and opt.cluster == clust:
match = True
opt.value = self._validate_option(option, opt)
break
if not match:
self.exit('Unknown cluster option provided: %s.%s'
% (opt.cluster, opt.name))
def _validate_option(self, default, cli):
"""Checks to make sure that the option given on the CLI is valid.
Valid in this sense means that the type of value given matches what a
cluster profile expects (str for str, bool for bool, etc).
For bool options, this will also convert the string equivalent to an
actual boolean value
"""
if not default.opt_type == bool:
if not default.opt_type == cli.opt_type:
msg = "Invalid option type for %s. Expected %s got %s"
self.exit(msg % (cli.name, default.opt_type, cli.opt_type))
return cli.value
else:
val = cli.value.lower()
if val not in ['true', 'on', 'yes', 'false', 'off', 'no']:
msg = ("Invalid value for %s. Accepted values are: 'true', "
"'false', 'on', 'off', 'yes', 'no'.")
self.exit(msg % cli.name)
else:
if val in ['true', 'on', 'yes']:
return True
else:
return False
def log_info(self, msg):
"""Log info messages to both console and log file"""
self.soslog.info(msg)
def log_warn(self, msg):
"""Log warn messages to both console and log file"""
self.soslog.warn(msg)
def log_error(self, msg):
"""Log error messages to both console and log file"""
self.soslog.error(msg)
def log_debug(self, msg):
"""Log debug message to both console and log file"""
caller = inspect.stack()[1][3]
msg = '[sos_collector:%s] %s' % (caller, msg)
self.soslog.debug(msg)
def list_options(self):
"""Display options for available clusters"""
sys.stdout.write('\nThe following clusters are supported by this '
'installation\n')
sys.stdout.write('Use the short name with --cluster-type or cluster '
'options (-c)\n\n')
for cluster in sorted(self.clusters):
sys.stdout.write(" {:<15} {:30}\n".format(
cluster,
self.clusters[cluster].cluster_name))
_opts = {}
for _cluster in self.clusters:
for opt in self.clusters[_cluster].options:
if opt.name not in _opts.keys():
_opts[opt.name] = opt
else:
for clust in opt.cluster:
if clust not in _opts[opt.name].cluster:
_opts[opt.name].cluster.append(clust)
sys.stdout.write('\nThe following cluster options are available:\n\n')
sys.stdout.write(' {:25} {:15} {:<10} {:10} {:<}\n'.format(
'Cluster',
'Option Name',
'Type',
'Default',
'Description'
))
for _opt in sorted(_opts, key=lambda x: _opts[x].cluster):
opt = _opts[_opt]
optln = ' {:25} {:15} {:<10} {:<10} {:<10}\n'.format(
', '.join(c for c in sorted(opt.cluster)),
opt.name,
opt.opt_type.__name__,
str(opt.value),
opt.description)
sys.stdout.write(optln)
sys.stdout.write('\nOptions take the form of cluster.name=value'
'\nE.G. "ovirt.no-database=True" or '
'"pacemaker.offline=False"\n')
def delete_tmp_dir(self):
"""Removes the temp directory and all collected sosreports"""
shutil.rmtree(self.tmpdir)
def _get_archive_name(self):
"""Generates a name for the tarball archive"""
nstr = 'sos-collector'
if self.opts.label:
nstr += '-%s' % self.opts.label
if self.opts.case_id:
nstr += '-%s' % self.opts.case_id
dt = datetime.strftime(datetime.now(), '%Y-%m-%d')
try:
string.lowercase = string.ascii_lowercase
except NameError:
pass
rand = ''.join(random.choice(string.lowercase) for x in range(5))
return '%s-%s-%s' % (nstr, dt, rand)
def _get_archive_path(self):
"""Returns the path, including filename, of the tarball we build
that contains the collected sosreports
"""
self.arc_name = self._get_archive_name()
compr = 'gz'
return self.tmpdir + '/' + self.arc_name + '.tar.' + compr
def _fmt_msg(self, msg):
width = 80
_fmt = ''
for line in msg.splitlines():
_fmt = _fmt + fill(line, width, replace_whitespace=False) + '\n'
return _fmt
def _load_group_config(self):
"""
Attempts to load the host group specified on the command line.
Host groups are defined via JSON files, typically saved under
/etc/sos/groups.d/, although users can specify a full filepath
on the commandline to point to one existing anywhere on the system
Host groups define a list of nodes and/or regexes and optionally the
master and cluster-type options.
"""
grp = self.opts.group
paths = [
grp,
os.path.join(Path.home(), '.config/sos/groups.d/%s' % grp),
os.path.join(COLLECTOR_CONFIG_DIR, grp)
]
fname = None
for path in paths:
if os.path.exists(path):
fname = path
break
if fname is None:
raise OSError("no group definition for %s" % grp)
self.log_debug("Loading host group %s" % fname)
with open(fname, 'r') as hf:
_group = json.load(hf)
for key in ['master', 'cluster_type']:
if _group[key]:
self.log_debug("Setting option '%s' to '%s' per host group"
% (key, _group[key]))
setattr(self.opts, key, _group[key])
if _group['nodes']:
self.log_debug("Adding %s to node list" % _group['nodes'])
self.opts.nodes.extend(_group['nodes'])
def write_host_group(self):
"""
Saves the results of this run of sos-collector to a host group file
on the system so it can be used later on.
The host group will save the options master, cluster_type, and nodes
as determined by sos-collector prior to execution of sosreports.
"""
cfg = {
'name': self.opts.save_group,
'master': self.opts.master,
'cluster_type': self.cluster.cluster_type[0],
'nodes': [n for n in self.node_list]
}
if os.getuid() != 0:
group_path = os.path.join(Path.home(), '.config/sos/groups.d')
# create the subdir within the user's home directory
os.makedirs(group_path, exist_ok=True)
else:
group_path = COLLECTOR_CONFIG_DIR
fname = os.path.join(group_path, cfg['name'])
with open(fname, 'w') as hf:
json.dump(cfg, hf)
os.chmod(fname, 0o644)
return fname
def prep(self):
self.policy.set_commons(self.commons)
if (not self.opts.password and not
self.opts.password_per_node):
self.log_debug('password not specified, assuming SSH keys')
msg = ('sos-collector ASSUMES that SSH keys are installed on all '
'nodes unless the --password option is provided.\n')
self.ui_log.info(self._fmt_msg(msg))
if ((self.opts.password or (self.opts.password_per_node and
self.opts.master))
and not self.opts.batch):
self.log_debug('password specified, not using SSH keys')
msg = ('Provide the SSH password for user %s: '
% self.opts.ssh_user)
self.opts.password = getpass(prompt=msg)
if ((self.commons['need_sudo'] and not self.opts.nopasswd_sudo)
and not self.opts.batch):
if not self.opts.password and not self.opts.password_per_node:
self.log_debug('non-root user specified, will request '
'sudo password')
msg = ('A non-root user has been provided. Provide sudo '
'password for %s on remote nodes: '
% self.opts.ssh_user)
self.opts.sudo_pw = getpass(prompt=msg)
else:
if not self.opts.nopasswd_sudo:
self.opts.sudo_pw = self.opts.password
if self.opts.become_root:
if not self.opts.ssh_user == 'root':
if self.opts.batch:
msg = ("Cannot become root without obtaining root "
"password. Do not use --batch if you need "
"to become root remotely.")
self.exit(msg, 1)
self.log_debug('non-root user asking to become root remotely')
msg = ('User %s will attempt to become root. '
'Provide root password: ' % self.opts.ssh_user)
self.opts.root_password = getpass(prompt=msg)
self.commons['need_sudo'] = False
else:
self.log_info('Option to become root but ssh user is root.'
' Ignoring request to change user on node')
self.opts.become_root = False
if self.opts.group:
try:
self._load_group_config()
except Exception as err:
self.log_error("Could not load specified group %s: %s"
% (self.opts.group, err))
self._exit(1)
self.policy.pre_work()
if self.opts.master:
self.connect_to_master()
self.opts.no_local = True
else:
try:
can_run_local = True
local_sudo = None
skip_local_msg = (
"Local sos report generation forcibly skipped due "
"to lack of root privileges.\nEither use --nopasswd-sudo, "
"run as root, or do not use --batch so that you will be "
"prompted for a password\n"
)
if (not self.opts.no_local and (os.getuid() != 0 and not
self.opts.nopasswd_sudo)):
if not self.opts.batch:
msg = ("Enter local sudo password to generate local "
"sos report: ")
local_sudo = getpass(msg)
if local_sudo == '':
self.ui_log.info(skip_local_msg)
can_run_local = False
self.opts.no_local = True
local_sudo = None
else:
self.ui_log.info(skip_local_msg)
can_run_local = False
self.opts.no_local = True
self.master = SosNode('localhost', self.commons,
local_sudo=local_sudo,
load_facts=can_run_local)
except Exception as err:
self.log_debug("Unable to determine local installation: %s" %
err)
self.exit('Unable to determine local installation. Use the '
'--no-local option if localhost should not be '
'included.\nAborting...\n', 1)
self.collect_md.add_field('master', self.master.address)
self.collect_md.add_section('nodes')
self.collect_md.nodes.add_section(self.master.address)
self.master.set_node_manifest(getattr(self.collect_md.nodes,
self.master.address))
if self.opts.cluster_type:
if self.opts.cluster_type == 'none':
self.cluster = self.clusters['jbon']
else:
self.cluster = self.clusters[self.opts.cluster_type]
self.cluster_type = self.opts.cluster_type
self.cluster.master = self.master
else:
self.determine_cluster()
if self.cluster is None and not self.opts.nodes:
msg = ('Cluster type could not be determined and no nodes provided'
'\nAborting...')
self.exit(msg, 1)
elif self.cluster is None and self.opts.nodes:
self.log_info("Cluster type could not be determined, but --nodes "
"is provided. Attempting to continue using JBON "
"cluster type and the node list")
self.cluster = self.clusters['jbon']
self.cluster_type = 'none'
self.collect_md.add_field('cluster_type', self.cluster_type)
if self.cluster:
self.master.cluster = self.cluster
self.cluster.setup()
if self.cluster.cluster_ssh_key:
if not self.opts.ssh_key:
self.log_debug("Updating SSH key to %s per cluster"
% self.cluster.cluster_ssh_key)
self.opts.ssh_key = self.cluster.cluster_ssh_key
self.get_nodes()
if self.opts.save_group:
gname = self.opts.save_group
try:
fname = self.write_host_group()
self.log_info("Wrote group '%s' to %s" % (gname, fname))
except Exception as err:
self.log_error("Could not save group %s: %s" % (gname, err))
def display_nodes(self):
"""Prints a list of nodes to collect from, if available. If no nodes
are discovered or provided, abort.
"""
self.ui_log.info('')
if not self.node_list and not self.master.connected:
self.exit('No nodes were detected, or nodes do not have sos '
'installed.\nAborting...')
self.ui_log.info('The following is a list of nodes to collect from:')
if self.master.connected and self.master.hostname is not None:
if not (self.master.local and self.opts.no_local):
self.ui_log.info('\t%-*s' % (self.commons['hostlen'],
self.master.hostname))
for node in sorted(self.node_list):
self.ui_log.info("\t%-*s" % (self.commons['hostlen'], node))
self.ui_log.info('')
if not self.opts.batch:
try:
input("\nPress ENTER to continue with these nodes, or press "
"CTRL-C to quit\n")
self.ui_log.info("")
except KeyboardInterrupt:
self.exit("Exiting on user cancel", 130)
def configure_sos_cmd(self):
"""Configures the sosreport command that is run on the nodes"""
self.sos_cmd = 'sosreport --batch '
if self.opts.sos_opt_line:
filt = ['&', '|', '>', '<', ';']
if any(f in self.opts.sos_opt_line for f in filt):
self.log_warn('Possible shell script found in provided sos '
'command. Ignoring --sos-opt-line entirely.')
self.opts.sos_opt_line = None
else:
self.sos_cmd = '%s %s' % (
self.sos_cmd, quote(self.opts.sos_opt_line))
self.log_debug("User specified manual sosreport command. "
"Command set to %s" % self.sos_cmd)
return True
sos_opts = []
if self.opts.case_id:
sos_opts.append('--case-id=%s' % (quote(self.opts.case_id)))
if self.opts.alloptions:
sos_opts.append('--alloptions')
if self.opts.all_logs:
sos_opts.append('--all-logs')
if self.opts.verify:
sos_opts.append('--verify')
if self.opts.log_size:
sos_opts.append(('--log-size=%s' % quote(str(self.opts.log_size))))
if self.opts.sysroot:
sos_opts.append('-s %s' % quote(self.opts.sysroot))
if self.opts.chroot:
sos_opts.append('-c %s' % quote(self.opts.chroot))
if self.opts.compression_type != 'auto':
sos_opts.append('-z %s' % (quote(self.opts.compression_type)))
self.sos_cmd = self.sos_cmd + ' '.join(sos_opts)
self.log_debug("Initial sos cmd set to %s" % self.sos_cmd)
self.commons['sos_cmd'] = self.sos_cmd
self.collect_md.add_field('initial_sos_cmd', self.sos_cmd)
def connect_to_master(self):
"""If run with --master, we will run cluster checks again that
instead of the localhost.
"""
try:
self.master = SosNode(self.opts.master, self.commons)
self.ui_log.info('Connected to %s, determining cluster type...'
% self.opts.master)
except Exception as e:
self.log_debug('Failed to connect to master: %s' % e)
self.exit('Could not connect to master node. Aborting...', 1)
def determine_cluster(self):
"""This sets the cluster type and loads that cluster's cluster.
If no cluster type is matched and no list of nodes is provided by
the user, then we abort.
If a list of nodes is given, this is not run, however the cluster
can still be run if the user sets a --cluster-type manually
"""
checks = list(self.clusters.values())
for cluster in self.clusters.values():
checks.remove(cluster)
cluster.master = self.master
if cluster.check_enabled():
cname = cluster.__class__.__name__
self.log_debug("Installation matches %s, checking for layered "
"profiles" % cname)
for remaining in checks:
if issubclass(remaining.__class__, cluster.__class__):
rname = remaining.__class__.__name__
self.log_debug("Layered profile %s found. "
"Checking installation"
% rname)
remaining.master = self.master
if remaining.check_enabled():
self.log_debug("Installation matches both layered "
"profile %s and base profile %s, "
"setting cluster type to layered "
"profile" % (rname, cname))
cluster = remaining
break
self.cluster = cluster
self.cluster_type = cluster.name()
self.commons['cluster'] = self.cluster
self.ui_log.info(
'Cluster type set to %s' % self.cluster_type)
break
def get_nodes_from_cluster(self):
"""Collects the list of nodes from the determined cluster cluster"""
if self.cluster_type:
nodes = self.cluster._get_nodes()
self.log_debug('Node list: %s' % nodes)
return nodes
return []
def reduce_node_list(self):
"""Reduce duplicate entries of the localhost and/or master node
if applicable"""
if (self.hostname in self.node_list and self.opts.no_local):
self.node_list.remove(self.hostname)
for i in self.ip_addrs:
if i in self.node_list:
self.node_list.remove(i)
# remove the master node from the list, since we already have
# an open session to it.
if self.master:
for n in self.node_list:
if n == self.master.hostname or n == self.opts.master:
self.node_list.remove(n)
self.node_list = list(set(n for n in self.node_list if n))
self.log_debug('Node list reduced to %s' % self.node_list)
self.collect_md.add_list('node_list', self.node_list)
def compare_node_to_regex(self, node):
"""Compares a discovered node name to a provided list of nodes from
the user. If there is not a match, the node is removed from the list"""
for regex in self.opts.nodes:
try:
regex = fnmatch.translate(regex)
if re.match(regex, node):
return True
except re.error as err:
msg = 'Error comparing %s to provided node regex %s: %s'
self.log_debug(msg % (node, regex, err))
return False
def get_nodes(self):
""" Sets the list of nodes to collect sosreports from """
if not self.master and not self.cluster:
msg = ('Could not determine a cluster type and no list of '
'nodes or master node was provided.\nAborting...'
)
self.exit(msg)
try:
nodes = self.get_nodes_from_cluster()
if self.opts.nodes:
for node in nodes:
if self.compare_node_to_regex(node):
self.node_list.append(node)
else:
self.node_list = nodes
except Exception as e:
self.log_debug("Error parsing node list: %s" % e)
self.log_debug('Setting node list to --nodes option')
self.node_list = self.opts.nodes
for node in self.node_list:
if any(i in node for i in ('*', '\\', '?', '(', ')', '/')):
self.node_list.remove(node)
# force add any non-regex node strings from nodes option
if self.opts.nodes:
for node in self.opts.nodes:
if any(i in node for i in '*\\?()/[]'):
continue
if node not in self.node_list:
self.log_debug("Force adding %s to node list" % node)
self.node_list.append(node)
if not self.master:
host = self.hostname.split('.')[0]
# trust the local hostname before the node report from cluster
for node in self.node_list:
if host == node.split('.')[0]:
self.node_list.remove(node)
self.node_list.append(self.hostname)
self.reduce_node_list()
try:
self.commons['hostlen'] = len(max(self.node_list, key=len))
except (TypeError, ValueError):
self.commons['hostlen'] = len(self.opts.master)
def _connect_to_node(self, node):
"""Try to connect to the node, and if we can add to the client list to
run sosreport on
Positional arguments
node - a tuple specifying (address, password). If no password, set
to None
"""
try:
client = SosNode(node[0], self.commons, password=node[1])
client.set_cluster(self.cluster)
if client.connected:
self.client_list.append(client)
self.collect_md.nodes.add_section(node[0])
client.set_node_manifest(getattr(self.collect_md.nodes,
node[0]))
else:
client.close_ssh_session()
except Exception:
pass
def intro(self):
"""Print the intro message and prompts for a case ID if one is not
provided on the command line
"""
disclaimer = ("""\
This utility is used to collect sosreports from multiple \
nodes simultaneously. It uses OpenSSH's ControlPersist feature \
to connect to nodes and run commands remotely. If your system \
installation of OpenSSH is older than 5.6, please upgrade.
An archive of sosreport tarballs collected from the nodes will be \
generated in %s and may be provided to an appropriate support representative.
The generated archive may contain data considered sensitive \
and its content should be reviewed by the originating \
organization before being passed to any third party.
No configuration changes will be made to the system running \
this utility or remote systems that it connects to.
""")
self.ui_log.info("\nsos-collector (version %s)\n" % __version__)
intro_msg = self._fmt_msg(disclaimer % self.tmpdir)
self.ui_log.info(intro_msg)
prompt = "\nPress ENTER to continue, or CTRL-C to quit\n"
if not self.opts.batch:
try:
input(prompt)
self.ui_log.info("")
except KeyboardInterrupt:
self.exit("Exiting on user cancel", 130)
if not self.opts.case_id and not self.opts.batch:
msg = 'Please enter the case id you are collecting reports for: '
self.opts.case_id = input(msg)
def execute(self):
if self.opts.list_options:
self.list_options()
self.cleanup()
raise SystemExit
self.intro()
self.configure_sos_cmd()
self.prep()
self.display_nodes()
self.archive_name = self._get_archive_name()
self.setup_archive(name=self.archive_name)
self.archive_path = self.archive.get_archive_path()
self.archive.makedirs('sos_logs', 0o755)
self.collect()
self.cleanup()
def collect(self):
""" For each node, start a collection thread and then tar all
collected sosreports """
if self.master.connected:
self.client_list.append(self.master)
self.ui_log.info("\nConnecting to nodes...")
filters = [self.master.address, self.master.hostname]
nodes = [(n, None) for n in self.node_list if n not in filters]
if self.opts.password_per_node:
_nodes = []
for node in nodes:
msg = ("Please enter the password for %s@%s: "
% (self.opts.ssh_user, node[0]))
node_pwd = getpass(msg)
_nodes.append((node[0], node_pwd))
nodes = _nodes
try:
pool = ThreadPoolExecutor(self.opts.jobs)
pool.map(self._connect_to_node, nodes, chunksize=1)
pool.shutdown(wait=True)
if (self.opts.no_local and
self.client_list[0].address == 'localhost'):
self.client_list.pop(0)
self.report_num = len(self.client_list)
if self.report_num == 0:
self.exit("No nodes connected. Aborting...")
elif self.report_num == 1:
if self.client_list[0].address == 'localhost':
self.exit(
"Collection would only gather from localhost due to "
"failure to either enumerate or connect to cluster "
"nodes. Assuming single collection from localhost is "
"not desired.\n"
"Aborting..."
)
self.ui_log.info("\nBeginning collection of sosreports from %s "
"nodes, collecting a maximum of %s "
"concurrently\n"
% (self.report_num, self.opts.jobs))
pool = ThreadPoolExecutor(self.opts.jobs)
pool.map(self._collect, self.client_list, chunksize=1)
pool.shutdown(wait=True)
except KeyboardInterrupt:
self.log_error('Exiting on user cancel\n')
os._exit(130)
except Exception as err:
self.log_error('Could not connect to nodes: %s' % err)
os._exit(1)
if hasattr(self.cluster, 'run_extra_cmd'):
self.ui_log.info('Collecting additional data from master node...')
files = self.cluster._run_extra_cmd()
if files:
self.master.collect_extra_cmd(files)
msg = '\nSuccessfully captured %s of %s sosreports'
self.log_info(msg % (self.retrieved, self.report_num))
self.close_all_connections()
if self.retrieved > 0:
arc_name = self.create_cluster_archive()
else:
msg = 'No sosreports were collected, nothing to archive...'
self.exit(msg, 1)
if self.opts.upload and self.get_upload_url():
try:
self.policy.upload_archive(arc_name)
self.ui_log.info("Uploaded archive successfully")
except Exception as err:
self.ui_log.error("Upload attempt failed: %s" % err)
def _collect(self, client):
"""Runs sosreport on each node"""
try:
if not client.local:
client.sosreport()
else:
if not self.opts.no_local:
client.sosreport()
if client.retrieved:
self.retrieved += 1
except Exception as err:
self.log_error("Error running sosreport: %s" % err)
def close_all_connections(self):
"""Close all ssh sessions for nodes"""
for client in self.client_list:
self.log_debug('Closing SSH connection to %s' % client.address)
client.close_ssh_session()
def create_cluster_archive(self):
"""Calls for creation of tar archive then cleans up the temporary
files created by sos-collector"""
map_file = None
arc_paths = []
for host in self.client_list:
for fname in host.file_list:
arc_paths.append(fname)
do_clean = False
if self.opts.clean:
hook_commons = {
'policy': self.policy,
'tmpdir': self.tmpdir,
'sys_tmp': self.sys_tmp,
'options': self.opts,
'manifest': self.manifest
}
try:
self.ui_log.info('')
cleaner = SoSCleaner(in_place=True,
hook_commons=hook_commons)
cleaner.set_target_path(self.tmpdir)
map_file, arc_paths = cleaner.execute()
do_clean = True
except Exception as err:
self.ui_log.error("ERROR: unable to obfuscate reports: %s"
% err)
try:
self.log_info('Creating archive of sosreports...')
for fname in arc_paths:
dest = fname.split('/')[-1]
if do_clean:
dest = cleaner.obfuscate_string(dest)
name = os.path.join(self.tmpdir, fname)
self.archive.add_file(name, dest=dest)
if map_file:
# regenerate the checksum for the obfuscated archive
checksum = cleaner.get_new_checksum(fname)
if checksum:
name = os.path.join('checksums', fname.split('/')[-1])
name += '.sha256'
self.archive.add_string(checksum, name)
self.archive.add_file(self.sos_log_file,
dest=os.path.join('sos_logs', 'sos.log'))
self.archive.add_file(self.sos_ui_log_file,
dest=os.path.join('sos_logs', 'ui.log'))
if self.manifest is not None:
self.archive.add_final_manifest_data(
self.opts.compression_type
)
if do_clean:
_dir = os.path.join(self.tmpdir, self.archive._name)
cleaner.obfuscate_file(
os.path.join(_dir, 'sos_logs', 'sos.log'),
short_name='sos.log'
)
cleaner.obfuscate_file(
os.path.join(_dir, 'sos_logs', 'ui.log'),
short_name='ui.log'
)
cleaner.obfuscate_file(
os.path.join(_dir, 'sos_reports', 'manifest.json'),
short_name='manifest.json'
)
arc_name = self.archive.finalize(self.opts.compression_type)
final_name = os.path.join(self.sys_tmp, os.path.basename(arc_name))
if do_clean:
final_name = cleaner.obfuscate_string(
final_name.replace('.tar', '-obfuscated.tar')
)
os.rename(arc_name, final_name)
if map_file:
# rename the map file to match the collector archive name, not
# the temp dir it was constructed in
map_name = cleaner.obfuscate_string(
os.path.join(self.sys_tmp,
"%s_private_map" % self.archive_name)
)
os.rename(map_file, map_name)
self.ui_log.info("A mapping of obfuscated elements is "
"available at\n\t%s" % map_name)
self.soslog.info('Archive created as %s' % final_name)
self.ui_log.info('\nThe following archive has been created. '
'Please provide it to your support team.')
self.ui_log.info('\t%s\n' % final_name)
return final_name
except Exception as err:
msg = ("Could not finalize archive: %s\n\nData may still be "
"available uncompressed at %s" % (err, self.archive_path))
self.exit(msg, 2)
|
gpl-2.0
| -8,600,018,873,667,082,000
| 42.258851
| 79
| 0.524299
| false
| 4.396098
| false
| false
| false
|
0x1001/BabyMonitor
|
app/add_finger_print.py
|
1
|
1251
|
def add_finger_print(file_path):
import wave
import analyzer
import storage
import recording
import config
import os
a = analyzer.Analyzer()
s = storage.Storage(config.Config("../config.json"))
waveFile = wave.open(file_path)
waveData = waveFile.readframes(waveFile.getnframes())
rec = recording.Recording(waveData, waveFile.getframerate(), waveFile.getsampwidth(), waveFile.getnchannels())
finger_print = a.finger_print(rec)
finger_print.set_name(os.path.basename(file_path))
s.add_finger_print(finger_print)
if __name__ == "__main__":
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", type=str, help="Path to wave file", dest="file")
parser.add_argument("-d", "--dir", type=str, help="Path to folder with wave files", dest="dir")
args = parser.parse_args()
if args.dir is not None:
waves = [os.path.join(args.dir, file_name) for file_name in os.listdir(args.dir) if file_name.endswith(".wav")]
elif args.file is not None:
waves = [args.file]
else:
parser.print_help()
waves = []
for wave in waves:
print "Processing: " + wave
add_finger_print(wave)
|
gpl-2.0
| -7,902,132,046,651,514,000
| 28.809524
| 119
| 0.643485
| false
| 3.504202
| false
| false
| false
|
benschneider/sideprojects1
|
density_matrix/fft_filter_test.py
|
1
|
1683
|
from scipy.signal.signaltools import _next_regular
from matplotlib import pyplot as plt
from numpy.fft import fft, rfftn, irfftn, fftshift # for real data can take advantage of symmetries
import numpy as np
import codecs, json
# from scipy.signal import remez, freqz, lfilter
# lpf = remez(21, [0, 0.2, 0.3, 0.5], [1.0, 0.0])
# w, h = freqz(lpf)
#
# t = np.arange(0, 1.0, 1.00/1000)
# # s = np.sin(2*np.pi*100*t) + np.sin(2*np.pi*200*t)
# noise_amp = 5.0
# s = np.sin(2*np.pi*100*t) + np.sin(2*np.pi*200*t) # +noise_amp * np.random.randn(len(t))
#
# # sout = lfilter(lpf, 1, s)
# # plt.figure(1)
# # plt.plot(s[:100])
# # plt.plot(sout[:100])
#
# ft = fftshift(fft(s)/len(s))
# # ft2 = np.fft.fft(sout[40:])/len(sout)
# # plt.plot(20.0*np.log10(np.abs(ft2)))
# # # plt.plot((np.abs(ft)))
# # plt.show()
#
# shap0 = np.array(s.shape) - 1
# fshape = [_next_regular(int(d)) for d in shap0] # padding to optimal size for FFTPACK
# ft11 = fftshift(rfftn(s, fshape)/fshape)
#
# plt.figure(3)
# # plt.plot(w/(2*np.pi), abs(h))
# # plt.plot(20.0*np.log10(np.abs(ft11)))
# plt.plot(np.abs(ft11))
#
# plt.figure(4)
# #plt.plot(20.0*np.log10(np.abs(ft)))
# plt.plot(np.abs(ft))
# plt.show()
# a = np.random.rand(5)
# b = np.random.rand(5) + 0.1*a
# a = np.arange(10).reshape(2,5) # a 2 by 5 array
# b = a.tolist() # nested lists with same data, indices
# file_path = "blub.json" ## your path variable
# def save_json(file_path, stuff):
# json.dump(stuff, codecs.open(file_path, 'w', encoding='utf-8'), separators=(',', ':'), sort_keys=True, indent=4)
#
# def load_json(file_path):
# obj_text = codecs.open(file_path, 'r', encoding='utf-8').read()
# return json.loads(obj_text)
|
gpl-2.0
| -3,275,720,611,350,555,000
| 32
| 118
| 0.623886
| false
| 2.256032
| false
| false
| false
|
KhronosGroup/COLLADA-CTS
|
Core/Gui/Dialog/FSettingsScrolledSizer.py
|
1
|
3448
|
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
import wx
import wx.lib.scrolledpanel
from Core.Gui.Dialog.FSettingSizer import *
from Core.Common.FConstants import *
class FSettingsScrolledSizer(wx.BoxSizer):
def __init__(self, parent, testProcedure, applicationMap, settings = None,
editable = True):
wx.BoxSizer.__init__(self, wx.VERTICAL)
self.__settingSizers = []
title = wx.StaticText(parent, wx.ID_ANY, "Test Settings")
scrolledPanel = wx.lib.scrolledpanel.ScrolledPanel(parent, wx.ID_ANY,
style=wx.SUNKEN_BORDER)
self.Add(title, 0, wx.ALIGN_CENTER | wx.ALL, 5)
self.Add(scrolledPanel, 1, wx.EXPAND | wx.TOP, 5)
topSizer = wx.BoxSizer(wx.VERTICAL)
for step, app, op, setting in testProcedure.GetStepGenerator():
sizer = FSettingSizer(scrolledPanel, applicationMap, editable,
self.__OnUpdateList)
if (settings == None):
default = testProcedure.GetGlobalSetting(step)
else:
default = settings[step]
if (op == VALIDATE and op not in OPS_NEEDING_APP):
sizer.SetOperation(">>", op, ">>" + op)
sizer.Enable(False)
else:
sizer.SetOperation(app, op, "[" + app + "]" + op,
testProcedure.GetSettingManager(), default)
topSizer.Add(sizer, 0, wx.EXPAND | wx.ALL, 5)
self.__settingSizers.append(sizer)
padSizer = wx.BoxSizer(wx.VERTICAL)
padSizer.Add(topSizer, 1, wx.EXPAND | wx.ALL, 5)
scrolledPanel.SetSizer(padSizer)
scrolledPanel.SetAutoLayout(True)
scrolledPanel.SetupScrolling(scroll_x = False)
def IsSettingOk(self):
for settingSizer in self.__settingSizers:
if (settingSizer.GetOperation() == VALIDATE): continue
if (settingSizer.GetSettingName() == None):
return False
return True
def GetSettings(self):
settings = []
for settingSizer in self.__settingSizers:
settings.append(settingSizer.GetSetting())
return settings
def __OnUpdateList(self):
for sizer in self.__settingSizers:
sizer.UpdateList()
|
mit
| 8,386,046,570,576,897,000
| 48.271429
| 466
| 0.639211
| false
| 4.299252
| false
| false
| false
|
3upperm2n/DIGITS
|
tools/create_db.py
|
2
|
20860
|
#!/usr/bin/env python
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import sys
import os.path
import time
import argparse
import logging
import re
import shutil
import math
import random
from collections import Counter
import threading
import Queue
try:
import digits
except ImportError:
# Add path for DIGITS package
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import digits.config
digits.config.load_config()
from digits import utils, log
import numpy as np
import PIL.Image
import lmdb
import h5py
from cStringIO import StringIO
# must call digits.config.load_config() before caffe to set the path
import caffe.io
try:
import caffe_pb2
except ImportError:
# See issue #32
from caffe.proto import caffe_pb2
logger = logging.getLogger('digits.tools.create_db')
class Error(Exception):
pass
class BadInputFileError(Error):
"""Input file is empty"""
pass
class ParseLineError(Error):
"""Failed to parse a line in the input file"""
pass
class LoadError(Error):
"""Failed to load image[s]"""
pass
class WriteError(Error):
"""Failed to write image[s]"""
pass
def create_db(input_file, output_file,
image_width, image_height, image_channels,
backend,
resize_mode = None,
image_folder = None,
shuffle = True,
mean_files = None,
**kwargs):
"""
Create a database of images from a list of image paths
Raises exceptions on errors
Arguments:
input_file -- a textfile containing labelled image paths
output_file -- the location to store the created database
image_width -- image resize width
image_height -- image resize height
image_channels -- image channels
backend -- the DB format (lmdb/hdf5)
Keyword arguments:
resize_mode -- passed to utils.image.resize_image()
shuffle -- if True, shuffle the images in the list before creating
mean_files -- a list of mean files to save
"""
### Validate arguments
if not os.path.exists(input_file):
raise ValueError('input_file does not exist')
if os.path.exists(output_file):
logger.warning('removing existing database')
if os.path.isdir(output_file):
shutil.rmtree(output_file, ignore_errors=True)
else:
os.remove(output_file)
if image_width <= 0:
raise ValueError('invalid image width')
if image_height <= 0:
raise ValueError('invalid image height')
if image_channels not in [1,3]:
raise ValueError('invalid number of channels')
if resize_mode not in [None, 'crop', 'squash', 'fill', 'half_crop']:
raise ValueError('invalid resize_mode')
if image_folder is not None and not os.path.exists(image_folder):
raise ValueError('image_folder does not exist')
if mean_files:
for mean_file in mean_files:
if os.path.exists(mean_file):
logger.warning('overwriting existing mean file "%s"!' % mean_file)
else:
dirname = os.path.dirname(mean_file)
if not dirname:
dirname = '.'
if not os.path.exists(dirname):
raise ValueError('Cannot save mean file at "%s"' % mean_file)
compute_mean = bool(mean_files)
### Load lines from input_file into a load_queue
load_queue = Queue.Queue()
image_count = _fill_load_queue(input_file, load_queue, shuffle)
# Start some load threads
batch_size = _calculate_batch_size(image_count)
num_threads = _calculate_num_threads(batch_size, shuffle)
write_queue = Queue.Queue(2*batch_size)
summary_queue = Queue.Queue()
for i in xrange(num_threads):
p = threading.Thread(target=_load_thread,
args=(load_queue, write_queue, summary_queue,
image_width, image_height, image_channels,
resize_mode, image_folder, compute_mean)
)
p.daemon = True
p.start()
start = time.time()
if backend == 'lmdb':
_create_lmdb(image_count, write_queue, batch_size, output_file,
summary_queue, num_threads,
mean_files, **kwargs)
elif backend == 'hdf5':
_create_hdf5(image_count, write_queue, batch_size, output_file,
image_width, image_height, image_channels,
summary_queue, num_threads,
mean_files, **kwargs)
else:
raise ValueError('invalid backend')
logger.info('Database created after %d seconds.' % (time.time() - start))
def _create_lmdb(image_count, write_queue, batch_size, output_file,
summary_queue, num_threads,
mean_files = None,
encoding = None,
lmdb_map_size = None,
**kwargs):
"""
Create an LMDB
Keyword arguments:
encoding -- image encoding format
lmdb_map_size -- the initial LMDB map size
"""
wait_time = time.time()
threads_done = 0
images_loaded = 0
images_written = 0
image_sum = None
batch = []
compute_mean = bool(mean_files)
db = lmdb.open(output_file,
map_size=lmdb_map_size,
map_async=True,
max_dbs=0)
while (threads_done < num_threads) or not write_queue.empty():
# Send update every 2 seconds
if time.time() - wait_time > 2:
logger.debug('Processed %d/%d' % (images_written, image_count))
wait_time = time.time()
processed_something = False
if not summary_queue.empty():
result_count, result_sum = summary_queue.get()
images_loaded += result_count
# Update total_image_sum
if compute_mean and result_count > 0 and result_sum is not None:
if image_sum is None:
image_sum = result_sum
else:
image_sum += result_sum
threads_done += 1
processed_something = True
if not write_queue.empty():
image, label = write_queue.get()
datum = _array_to_datum(image, label, encoding)
batch.append(datum)
if len(batch) == batch_size:
_write_batch_lmdb(db, batch, images_written)
images_written += len(batch)
batch = []
processed_something = True
if not processed_something:
time.sleep(0.2)
if len(batch) > 0:
_write_batch_lmdb(db, batch, images_written)
images_written += len(batch)
if images_loaded == 0:
raise LoadError('no images loaded from input file')
logger.debug('%s images loaded' % images_loaded)
if images_written == 0:
raise WriteError('no images written to database')
logger.info('%s images written to database' % images_written)
if compute_mean:
_save_means(image_sum, images_written, mean_files)
db.close()
def _create_hdf5(image_count, write_queue, batch_size, output_file,
image_width, image_height, image_channels,
summary_queue, num_threads,
mean_files = None,
compression = None,
**kwargs):
"""
Create an HDF5 file
Keyword arguments:
compression -- dataset compression format
"""
wait_time = time.time()
threads_done = 0
images_loaded = 0
images_written = 0
image_sum = None
batch = []
compute_mean = bool(mean_files)
db = h5py.File(output_file, 'w')
data_dset = db.create_dataset('data', (0,image_channels,image_height,image_width), maxshape=(None,image_channels,image_height,image_width),
chunks=True, compression=compression, dtype='float32')
label_dset = db.create_dataset('label', (0,), maxshape=(None,),
chunks=True, compression=compression, dtype='float32')
while (threads_done < num_threads) or not write_queue.empty():
# Send update every 2 seconds
if time.time() - wait_time > 2:
logger.debug('Processed %d/%d' % (images_written, image_count))
wait_time = time.time()
processed_something = False
if not summary_queue.empty():
result_count, result_sum = summary_queue.get()
images_loaded += result_count
# Update total_image_sum
if compute_mean and result_count > 0 and result_sum is not None:
if image_sum is None:
image_sum = result_sum
else:
image_sum += result_sum
threads_done += 1
processed_something = True
if not write_queue.empty():
batch.append(write_queue.get())
if len(batch) == batch_size:
_write_batch_hdf5(batch, data_dset, label_dset)
images_written += len(batch)
batch = []
processed_something = True
if not processed_something:
time.sleep(0.2)
if len(batch) > 0:
_write_batch_hdf5(batch, data_dset, label_dset)
images_written += len(batch)
if images_loaded == 0:
raise LoadError('no images loaded from input file')
logger.debug('%s images loaded' % images_loaded)
if images_written == 0:
raise WriteError('no images written to database')
logger.info('%s images written to database' % images_written)
if compute_mean:
_save_means(image_sum, images_written, mean_files)
db.close()
def _fill_load_queue(filename, queue, shuffle):
"""
Fill the queue with data from the input file
Print the category distribution
Returns the number of lines added to the queue
NOTE: This can be slow on a large input file, but we need the total image
count in order to report the progress, so we might as well read it all
"""
total_lines = 0
valid_lines = 0
distribution = Counter()
with open(filename) as infile:
if shuffle:
lines = infile.readlines() # less memory efficient
random.shuffle(lines)
for line in lines:
total_lines += 1
try:
result = _parse_line(line, distribution)
valid_lines += 1
queue.put(result)
except ParseLineError:
pass
else:
for line in infile: # more memory efficient
total_lines += 1
try:
result = _parse_line(line, distribution)
valid_lines += 1
queue.put(result)
except ParseLineError:
pass
logger.debug('%s total lines in file' % total_lines)
if valid_lines == 0:
raise BadInputFileError('No valid lines in input file')
logger.info('%s valid lines in file' % valid_lines)
for key in sorted(distribution):
logger.debug('Category %s has %d images.' % (key, distribution[key]))
return valid_lines
def _parse_line(line, distribution):
"""
Parse a line in the input file into (path, label)
"""
line = line.strip()
if not line:
raise ParseLineError
# Expect format - [/]path/to/file.jpg 123
match = re.match(r'(.+)\s+(\d+)\s*$', line)
if match is None:
raise ParseLineError
path = match.group(1)
label = int(match.group(2))
distribution[label] += 1
return path, label
def _calculate_batch_size(image_count):
"""
Calculates an appropriate batch size for creating this database
"""
return min(100, image_count)
def _calculate_num_threads(batch_size, shuffle):
"""
Calculates an appropriate number of threads for creating this database
"""
if shuffle:
return min(10, int(round(math.sqrt(batch_size))))
else:
#XXX This is the only way to preserve order for now
# This obviously hurts performance considerably
return 1
def _load_thread(load_queue, write_queue, summary_queue,
image_width, image_height, image_channels,
resize_mode, image_folder, compute_mean):
"""
Consumes items in load_queue
Produces items to write_queue
Stores cumulative results in summary_queue
"""
images_added = 0
if compute_mean:
image_sum = _initial_image_sum(image_width, image_height, image_channels)
else:
image_sum = None
while not load_queue.empty():
try:
path, label = load_queue.get(True, 0.05)
except Queue.Empty:
continue
# prepend path with image_folder, if appropriate
if not utils.is_url(path) and image_folder and not os.path.isabs(path):
path = os.path.join(image_folder, path)
try:
image = utils.image.load_image(path)
except utils.errors.LoadImageError:
logger.warning('[%s] %s: %s' % (path, type(e).__name__, e) )
continue
image = utils.image.resize_image(image,
image_height, image_width,
channels = image_channels,
resize_mode = resize_mode,
)
if compute_mean:
image_sum += image
write_queue.put((image, label))
images_added += 1
summary_queue.put((images_added, image_sum))
def _initial_image_sum(width, height, channels):
"""
Returns an array of zeros that will be used to store the accumulated sum of images
"""
if channels == 1:
return np.zeros((height, width), np.float64)
else:
return np.zeros((height, width, channels), np.float64)
def _array_to_datum(image, label, encoding):
"""
Create a caffe Datum from a numpy.ndarray
"""
if not encoding:
# Transform to caffe's format requirements
if image.ndim == 3:
# Transpose to (channels, height, width)
image = image.transpose((2,0,1))
if image.shape[0] == 3:
# channel swap
# XXX see issue #59
image = image[[2,1,0],...]
elif image.ndim == 2:
# Add a channels axis
image = image[np.newaxis,:,:]
else:
raise Exception('Image has unrecognized shape: "%s"' % image.shape)
datum = caffe.io.array_to_datum(image, label)
else:
datum = caffe_pb2.Datum()
if image.ndim == 3:
datum.channels = image.shape[2]
else:
datum.channels = 1
datum.height = image.shape[0]
datum.width = image.shape[1]
datum.label = label
s = StringIO()
if encoding == 'png':
PIL.Image.fromarray(image).save(s, format='PNG')
elif encoding == 'jpg':
PIL.Image.fromarray(image).save(s, format='JPEG', quality=90)
else:
raise ValueError('Invalid encoding type')
datum.data = s.getvalue()
datum.encoded = True
return datum
def _write_batch_lmdb(db, batch, image_count):
"""
Write a batch to an LMDB database
"""
try:
with db.begin(write=True) as lmdb_txn:
for i, datum in enumerate(batch):
key = '%08d_%d' % (image_count + i, datum.label)
lmdb_txn.put(key, datum.SerializeToString())
except lmdb.MapFullError:
# double the map_size
curr_limit = db.info()['map_size']
new_limit = curr_limit*2
logger.debug('Doubling LMDB map size to %sMB ...' % (new_limit>>20,))
try:
db.set_mapsize(new_limit) # double it
except AttributeError as e:
version = tuple(int(x) for x in lmdb.__version__.split('.'))
if version < (0,87):
raise Error('py-lmdb is out of date (%s vs 0.87)' % lmdb.__version__)
else:
raise e
# try again
_write_batch_lmdb(db, batch, image_count)
def _write_batch_hdf5(batch, data_dset, label_dset):
"""
Write a batch to an HDF5 database
"""
if batch[0][0].ndim == 2:
data_batch = np.array([i[0][...,np.newaxis] for i in batch])
else:
data_batch = np.array([i[0] for i in batch])
# Transpose to (channels, height, width)
data_batch = data_batch.transpose((0,3,1,2))
label_batch = np.array([i[1] for i in batch])
# resize dataset
if data_dset.len() == 0:
data_dset.resize(data_batch.shape)
label_dset.resize(label_batch.shape)
else:
data_dset.resize(data_dset.len()+len(batch),axis=0)
label_dset.resize(label_dset.len()+len(batch),axis=0)
data_dset[-len(batch):] = data_batch
label_dset[-len(batch):] = label_batch
def _save_means(image_sum, image_count, mean_files):
"""
Save mean[s] to file
"""
mean = np.around(image_sum / image_count).astype(np.uint8)
for mean_file in mean_files:
if mean_file.lower().endswith('.npy'):
np.save(mean_file, mean)
elif mean_file.lower().endswith('.binaryproto'):
data = mean
# Transform to caffe's format requirements
if data.ndim == 3:
# Transpose to (channels, height, width)
data = data.transpose((2,0,1))
if data.shape[0] == 3:
# channel swap
# XXX see issue #59
data = data[[2,1,0],...]
elif mean.ndim == 2:
# Add a channels axis
data = data[np.newaxis,:,:]
blob = caffe_pb2.BlobProto()
blob.num = 1
blob.channels, blob.height, blob.width = data.shape
blob.data.extend(data.astype(float).flat)
with open(mean_file, 'wb') as outfile:
outfile.write(blob.SerializeToString())
elif mean_file.lower().endswith(('.jpg', '.jpeg', '.png')):
image = PIL.Image.fromarray(mean)
image.save(mean_file)
else:
logger.warning('Unrecognized file extension for mean file: "%s"' % mean_file)
continue
logger.info('Mean saved at "%s"' % mean_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create-Db tool - DIGITS')
### Positional arguments
parser.add_argument('input_file',
help='An input file of labeled images')
parser.add_argument('output_file',
help='Path to the output database')
parser.add_argument('width',
type=int,
help='width of resized images'
)
parser.add_argument('height',
type=int,
help='height of resized images'
)
### Optional arguments
parser.add_argument('-c', '--channels',
type=int,
default=3,
help='channels of resized images (1 for grayscale, 3 for color [default])'
)
parser.add_argument('-r', '--resize_mode',
help='resize mode for images (must be "crop", "squash" [default], "fill" or "half_crop")'
)
parser.add_argument('-m', '--mean_file', action='append',
help="location to output the image mean (doesn't save mean if not specified)")
parser.add_argument('-f', '--image_folder',
help='folder containing the images (if the paths in input_file are not absolute)')
parser.add_argument('-s', '--shuffle',
action='store_true',
help='Shuffle images before saving'
)
parser.add_argument('-e', '--encoding',
help = 'Image encoding format (jpg/png)'
)
parser.add_argument('-C', '--compression',
help = 'Database compression format (gzip)'
)
parser.add_argument('-b', '--backend',
default='lmdb',
help = 'The database backend - lmdb[default] or hdf5')
parser.add_argument('--lmdb_map_size',
type=int,
help = 'The initial map size for LMDB (in MB)')
args = vars(parser.parse_args())
if args['lmdb_map_size']:
# convert from MB to B
args['lmdb_map_size'] <<= 20
try:
create_db(args['input_file'], args['output_file'],
args['width'], args['height'], args['channels'],
args['backend'],
resize_mode = args['resize_mode'],
image_folder = args['image_folder'],
shuffle = args['shuffle'],
mean_files = args['mean_file'],
encoding = args['encoding'],
compression = args['compression'],
lmdb_map_size = args['lmdb_map_size']
)
except Exception as e:
logger.error('%s: %s' % (type(e).__name__, e.message))
raise
|
bsd-3-clause
| -4,733,648,372,469,106,000
| 31.492212
| 143
| 0.573058
| false
| 3.97409
| false
| false
| false
|
bgaultier/laboitepro
|
boites/migrations/0009_pushbutton.py
|
1
|
1086
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-09-06 07:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('boites', '0008_auto_20170801_1406'),
]
operations = [
migrations.CreateModel(
name='PushButton',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_name', models.SlugField(help_text="Veuillez saisir ici le nom de l'\xe9v\xe9nement IFTTT", verbose_name='IFTTT event name')),
('api_key', models.SlugField(help_text='Veuillez saisir ici votre cl\xe9 IFTTT', verbose_name="IFTTT cl\xe9 d'API")),
('last_triggered', models.DateTimeField(null=True, verbose_name='Derni\xe8re activit\xe9')),
('boite', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='boites.Boite', verbose_name='Bo\xeete')),
],
),
]
|
agpl-3.0
| 1,413,793,480,327,889,000
| 40.769231
| 149
| 0.627993
| false
| 3.537459
| false
| false
| false
|
krother/maze_run
|
leftovers/part2_before_pylint.py
|
1
|
11490
|
# TODO: fix and check all command-line arguments
from util import debug_print
from pygame import image, Rect, Surface
from pygame.locals import KEYDOWN, KEYUP, USEREVENT
import pygame
import sys
import random
import json
import os
from collections import namedtuple
from functools import partial
import argparse
import logging
#logging.basicConfig(filename='random_levels.log', level=logging.INFO)
log = logging.getLogger('moves')
log.addHandler(logging.FileHandler('moves.log', mode='w'))
log.setLevel(logging.INFO)
eventlog = logging.getLogger('events')
eventlog.addHandler(logging.StreamHandler(sys.stderr))
#fmt='%(asctime)s %(message)s'
#eventlog.addFormatter(logging.Formatter(fmt), datefmt='%m/%d/%Y %I:%M:%S %p')
eventlog.setLevel(logging.WARNING)
Position = namedtuple("Position", ["x", "y"])
# ------------ CONSTANTS ----------------
CONFIG_PATH = os.path.split(__file__)[0]
TILE_POSITION_FILE = CONFIG_PATH + 'tiles.json'
TILE_IMAGE_FILE = CONFIG_PATH + '../images/tiles.xpm'
SIZE = 32
SPEED = 4
LEFT = Position(-1, 0)
RIGHT = Position(1, 0)
UP = Position(0, -1)
DOWN = Position(0, 1)
DIRECTIONS = {
276: LEFT, 275: RIGHT,
273: UP, 274: DOWN
}
KEY_REPEAT_TIME = 250
DRAW_REPEAT_TIME = 100
UPDATE_REPEAT_TIME = 20
MOVE_GHOST_TIME = 250
KEY_REPEATED, DRAW, UPDATE, MOVE_GHOST, EXIT = range(USEREVENT + 1, USEREVENT + 6)
# ------------- LOADING TILES -----------
def get_tile_rect(pos):
"""Converts tile indices to a pygame.Rect"""
return Rect(pos.x*SIZE, pos.y*SIZE, SIZE, SIZE)
def load_tiles(json_fn):
"""Loads tile positions from a JSON file name"""
tiles = {}
jd = json.loads(open(json_fn).read())
for tile in jd.values():
abbrev = tile["abbrev"]
pos = Position(tile["x"], tile["y"])
rect = get_tile_rect(pos)
tiles[abbrev] = rect
return tiles
# ------------- GENERATING MAZES ------------
class MazeGenerator:
"""Generates two-dimensional mazes consisting of walls and dots."""
@staticmethod
def create_grid_string(dots, xsize, ysize):
grid = ""
for y in range(ysize):
for x in range(xsize):
grid += "." if Position(x, y) in dots else "#"
grid += "\n"
return grid
@staticmethod
def get_all_dot_positions(xsize, ysize):
return [Position(x, y) for x in range(1, xsize-1) for y in range(1, ysize-1)]
@staticmethod
def get_neighbors(pos):
return [
Position(pos.x , pos.y-1), Position(pos.x , pos.y+1),
Position(pos.x-1, pos.y ), Position(pos.x+1, pos.y ),
Position(pos.x-1, pos.y-1), Position(pos.x+1, pos.y-1),
Position(pos.x-1, pos.y+1), Position(pos.x+1, pos.y+1)
]
@staticmethod
def generate_dot_positions(xsize, ysize):
positions = MazeGenerator.get_all_dot_positions(xsize, ysize)
dots = set()
while positions != []:
pos = random.choice(positions)
neighbors = MazeGenerator.get_neighbors(pos)
free = [nb in dots for nb in neighbors]
if free.count(True) < 5:
dots.add(pos)
positions.remove(pos)
return dots
@staticmethod
def create_maze(size):
"""Returns a size.x * size.y maze as a string"""
dots = MazeGenerator.generate_dot_positions(size.x, size.y)
maze = MazeGenerator.create_grid_string(dots, size.x, size.y)
return maze
# ------------- DRAWING GRIDS --------------
class TileGrid:
def __init__(self, data):
self._grid = self.parse_grid(data)
def __repr__(self):
return "\n".join(["".join(row) for row in self._grid])
def parse_grid(self, data):
"""Parses the string representation into a nested list"""
return [list(row) for row in data.strip().split("\n")]
@property
def rows(self):
return self._grid
@property
def xsize(self):
return len(self.rows[0])
@property
def ysize(self):
return len(self.rows)
def __getitem__(self, pos):
return self._grid[pos.y][pos.x]
def __setitem__(self, pos, value):
self._grid[pos.y][pos.x] = value
def __iter__(self):
"""Iterate over all grid tiles"""
for y, row in enumerate(self.rows):
for x, char in enumerate(row):
pos = Position(x, y)
yield pos, char
def find_tile(self, query='*'):
"""Returns a Position tuple for the given char on the level"""
for pos, char in self:
if char == query:
return pos
def draw_grid(self, tile_img, tiles):
"""Returns an image of a tile-based grid"""
#debug_print("drawing level", data)
img = Surface((self.xsize * SIZE, self.ysize * SIZE))
for pos, char in self:
rect = get_tile_rect(pos)
img.blit(tile_img, rect, tiles[char])
return img
# ------------- SPRITES --------------
class Sprite:
def __init__(self, maze, tile, startpos):
self.maze = maze
self.tile = tile
self.pos = startpos
self.anim_direction = None
self.anim_offset = Position(0, 0)
def move(self, direction):
"""Handles moves on a level"""
if not self.is_moving():
old = self.pos
new = Position(old.x + direction.x, old.y + direction.y)
if self.maze[new] in [" ", ".", "x"]:
self.pos = new
self.anim_direction = direction
self.anim_offset = Position(-direction.x * SIZE, -direction.y * SIZE)
def is_moving(self):
return self.anim_direction
def arrives_on_new_tile(self):
pass
def draw(self, img, tile_img, tiles):
"""Returns an image of a tile-based grid"""
rect = get_tile_rect(self.pos)
rect = Rect([rect.x + self.anim_offset.x, rect.y + self.anim_offset.y, rect.w, rect.h])
img.blit(tile_img, rect, tiles[self.tile])
def animate(self):
if self.anim_direction:
ofs_x = self.anim_offset.x + self.anim_direction.x * SPEED
ofs_y = self.anim_offset.y + self.anim_direction.y * SPEED
self.anim_offset = Position(ofs_x, ofs_y)
if ofs_x == 0 and ofs_y == 0:
self.arrives_on_new_tile()
self.anim_direction = None
class Ghost(Sprite):
def random_move(self, event):
direction = random.choice([LEFT, RIGHT, UP, DOWN])
self.move(direction)
class Player(Sprite):
def arrives_on_new_tile(self):
tile = self.maze[self.pos]
if tile == '.':
self.maze[self.pos] = ' ' # eats dot
elif tile == 'x':
exit_game()
def handle_key(self, key):
"""Handles key events in the game"""
direction = DIRECTIONS.get(key)
if direction:
self.move(direction)
# ------------- EVENT LOOP --------------
def event_loop(callbacks, delay=10, repeat=KEY_REPEAT_TIME):
"""Processes events and updates callbacks."""
repeat_key = None
running = True
while running:
pygame.event.pump()
event = pygame.event.poll()
action = callbacks.get(event.type)
if action:
action(event)
elif event.type == EXIT:
running = False
eventlog.critical('exit event received: ' + str(event))
else:
eventlog.info('unhandled event: ' + str(event))
pygame.time.delay(delay)
# ------------- GAME MECHANICS --------------
def exit_game():
eve = pygame.event.Event(EXIT)
pygame.event.post(eve)
# ------------- MAIN GAME --------------
class MazeRun:
def create_display(self):
pygame.init()
pygame.display.set_mode((800, 600))
self.display = pygame.display.get_surface()
def create_tiles(self):
self.tile_img = image.load(TILE_IMAGE_FILE)
self.tiles = load_tiles(TILE_POSITION_FILE)
def load_level(self, fn):
data = open(fn).read()
self.maze = TileGrid(data)
def create_random_maze(self, size):
maze_data = MazeGenerator.create_maze(size)
self.maze = TileGrid(maze_data)
self.maze[Position(size.x-2, size.y-2)] = 'x'
log.info("random level created\n" + str(self.maze))
def create_sprites(self, size):
self.player = Player(self.maze, '*', Position(1, 1))
self.ghost = Ghost(self.maze, 'g', Position(size.x-2, 1))
def draw(self, event):
img = self.maze.draw_grid(self.tile_img, self.tiles)
self.player.draw(img, self.tile_img, self.tiles)
self.ghost.draw(img, self.tile_img, self.tiles)
rect = Rect((0, 0, self.maze.xsize*SIZE, self.maze.ysize*SIZE))
self.display.blit(img, rect, rect)
pygame.display.update()
def handle_key(self, event):
"""Handles key events in the game"""
direction = DIRECTIONS.get(event.key)
if direction:
self.player.move(direction)
self.check_collision()
def check_collision(self):
if self.player.pos == self.ghost.pos:
exit_game()
def update(self, event):
"""Manages recurring checks in the game"""
self.check_collision()
self.player.animate()
self.ghost.animate()
def start_game(self):
callbacks = {
KEYDOWN: self.handle_key,
DRAW: self.draw,
UPDATE: self.update,
MOVE_GHOST: self.ghost.random_move
}
pygame.time.set_timer(DRAW, DRAW_REPEAT_TIME)
pygame.time.set_timer(UPDATE, UPDATE_REPEAT_TIME)
pygame.time.set_timer(MOVE_GHOST, MOVE_GHOST_TIME)
event_loop(callbacks)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Start the MazeRun game.')
parser.add_argument('--x', type=int, default=12,
help='x size of random maze')
parser.add_argument('--y', type=int, default=7,
help='y size of random maze')
parser.add_argument('--ghost',
#dest="MOVE_GHOST_TIME", action="store_const",
type=int, default=500,
help='ghost speed (moves every .. milliseconds)')
parser.add_argument('--load', type=str, default=None,
help='load maze from text file')
parser.add_argument('--replay', type=str, default=None,
help='log file to replay from')
parser.add_argument('-v', '--verbose', action="store_true",
help='print debugging information')
#parser.add_argument('words', type=str, nargs='+',
# help='the word for which characters are counted')
#parser.add_argument("-v", "--verbosity", type=int, choices=[0, 1, 2],
# positional arguments: without dashes
# optional: with --
# g = parser.add_mutually_exclusive_group()
# g.add_argument(...)
# g.add_argument(...)
# -d delay=50 game speed
# -g ghost speed
# -x, -y size of the grid
# -r replay from logfile
# -l load level from file
# optional arguments
# --verbose
# --help info
args = parser.parse_args()
size = Position(args.x, args.y)
mr = MazeRun()
mr.create_display()
mr.create_tiles()
mr.create_random_maze(size)
mr.create_sprites(size)
mr.start_game()
#mr.load_level(LEVEL_FILE)
|
mit
| -7,573,846,476,509,047,000
| 29.15748
| 95
| 0.573716
| false
| 3.513761
| false
| false
| false
|
neozerosv/ciberues
|
agente-servidor.py
|
1
|
2142
|
#!/usr/bin/env python
# ------------------------------
# importacion
# ------------------------------
import socket, sys, time, ConfigParser
def poner_mensaje( tipo , mensaje ):
# -----------------------------
# Colocar mensajes con formato
# y marca de tiempo
# -----------------------------
print time.strftime('%Y-%m-%d-%X') + " " + tipo + ": " + mensaje
def activar_configuracion():
# ------------------------------
# Variables del servidor desde
# un archivo de configuracion
# ------------------------------
configuracion = "./configuracion/agente-servidor.cfg"
global direccion
global puerto
global clave
try:
cfg = ConfigParser.ConfigParser()
cfg.read([configuracion])
puerto = int(cfg.get('servidor','puerto'))
clave = cfg.get('servidor','clave')
clientes = cfg.get('servidor','clientes')
except:
poner_mensaje( 'ERROR' , "No se pudo leer el archivo de configuracion " + configuracion )
poner_mensaje( 'AVISO' , "Se tomaran los valores por omision: 6470 root" )
puerto = 6470
clave = 'root'
if __name__ == "__main__":
activar_configuracion()
# ------------------------------
# parametros a utilizar
# ------------------------------
if( len(sys.argv) == 3 ):
continuar = True
direccion = sys.argv[1]
comando = sys.argv[2]
agente = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
try:
agente.connect( ( direccion, puerto ) )
except:
poner_mensaje ( 'ERROR' , "No se pudo establecer la conexion en la direccion: "+ direccion +" con el puerto: " + str(puerto) )
continuar = False
if ( continuar == True ):
data, server = agente.recvfrom( 100 )
poner_mensaje ( 'MENSAJE' , data )
agente.send( clave )
data, server = agente.recvfrom( 100 )
poner_mensaje ( 'MENSAJE' , data )
agente.send( comando )
data, server = agente.recvfrom( 100 )
poner_mensaje ( 'MENSAJE' , data )
agente.close()
else:
print "--------------------------------------------------------------"
print " Tiene que mandar cuatro parametros"
print " agente-servidor.py <direccion> <comando>"
print "--------------------------------------------------------------"
|
gpl-2.0
| 6,846,871,219,183,194,000
| 31.953846
| 129
| 0.546218
| false
| 2.906377
| true
| false
| false
|
urq/awssql
|
shellsql.py
|
1
|
2032
|
import os
import pickle
import sys
import click
from shellsql import aws
from shellsql.models import Dataset
from shellsql.cache import TTLCache, FileCache
from shellsql import utils
@click.group()
def cli():
pass
@cli.command()
def hello():
click.echo("hello world!")
@cli.command()
@click.argument('columns', nargs=-1)
def select(columns):
data = Dataset.from_file(sys.stdin)
data.select(columns).write(sys.stdout)
@cli.command()
def headers():
print '\n'.join(sorted(Dataset.from_file(sys.stdin).headers))
@cli.command()
@click.option('--region', default='us-east-1')
@click.option('--cache-dir', default='~/.shellsql-cache')
@click.option('--cache-ttl', default=60)
@click.argument('profiles', nargs=-1)
def get(profiles, cache_dir, cache_ttl, region):
# if not os.path.exists(os.path.expanduser(cache_dir)):
# os.mkdir(os.path.expanduser(cache_dir))
# cache = TTLCache(FileCache(os.path.expanduser(cache_dir)))
# data = cache.get(profile)
# if data is None:
# data = aws.get_instances_as_table(profile, region)
# print 'inserting into cache'
# cache.insert(profile, pickle.dumps(data))
# else:
# data = pickle.loads(data)
data = reduce(lambda x, y: x.merge(y),
[aws.get_instances_as_table(profile, region)
for profile in profiles])
data.write(sys.stdout)
#@cli.command()
#@click.argument('predicate')
#def map(columns, predicate):
#data = Dataset.from_file(sys.stdin)
#def func(row): exec predicate + '; return row' in globals(), locals()
#data = data.map(func)
#data.write(sys.stdout)
@cli.command()
@click.argument('predicate')
def filter(predicate):
data = Dataset.from_file(sys.stdin)
utils.import_execution_env()
print "predicate: {}".format(predicate)
func_str = """filter_func = lambda row: {}""".format(predicate)
print func_str
exec(func_str)
data = data.filter(filter_func)
data.write(sys.stdout)
if __name__ == '__main__':
cli()
|
apache-2.0
| -1,326,841,644,984,561,400
| 27.222222
| 74
| 0.656004
| false
| 3.288026
| false
| false
| false
|
vijayendrabvs/ssl-neutron
|
neutron/plugins/nec/ofc_driver_base.py
|
1
|
5143
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
# @author: Akihiro MOTOKI
from abc import ABCMeta, abstractmethod
import six
@six.add_metaclass(ABCMeta)
class OFCDriverBase(object):
"""OpenFlow Controller (OFC) Driver Specification.
OFCDriverBase defines the minimum set of methods required by this plugin.
It would be better that other methods like update_* are implemented.
"""
@abstractmethod
def create_tenant(self, description, tenant_id=None):
"""Create a new tenant at OpenFlow Controller.
:param description: A description of this tenant.
:param tenant_id: A hint of OFC tenant ID.
A driver could use this id as a OFC id or ignore it.
:returns: ID of the tenant created at OpenFlow Controller.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def delete_tenant(self, ofc_tenant_id):
"""Delete a tenant at OpenFlow Controller.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def create_network(self, ofc_tenant_id, description, network_id=None):
"""Create a new network on specified OFC tenant at OpenFlow Controller.
:param ofc_tenant_id: a OFC tenant ID in which a new network belongs.
:param description: A description of this network.
:param network_id: A hint of an ID of OFC network.
:returns: ID of the network created at OpenFlow Controller.
ID returned must be unique in the OpenFlow Controller.
If a network is identified in conjunction with other information
such as a tenant ID, such information should be included in the ID.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def delete_network(self, ofc_network_id):
"""Delete a netwrok at OpenFlow Controller.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def create_port(self, ofc_network_id, portinfo,
port_id=None, filters=None):
"""Create a new port on specified network at OFC.
:param ofc_network_id: a OFC tenant ID in which a new port belongs.
:param portinfo: An OpenFlow information of this port.
{'datapath_id': Switch ID that a port connected.
'port_no': Port Number that a port connected on a Swtich.
'vlan_id': VLAN ID that a port tagging.
'mac': Mac address.
}
:param port_id: A hint of an ID of OFC port.
ID returned must be unique in the OpenFlow Controller.
If a port is identified in combination with a network or
a tenant, such information should be included in the ID.
:param filters: A list of packet filter associated with the port.
Each element is a tuple (neutron ID, OFC ID)
:returns: ID of the port created at OpenFlow Controller.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def delete_port(self, ofc_port_id):
"""Delete a port at OpenFlow Controller.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def convert_ofc_tenant_id(self, context, ofc_tenant_id):
"""Convert old-style ofc tenand id to new-style one.
:param context: neutron context object
:param ofc_tenant_id: ofc_tenant_id to be converted
"""
pass
@abstractmethod
def convert_ofc_network_id(self, context, ofc_network_id,
tenant_id):
"""Convert old-style ofc network id to new-style one.
:param context: neutron context object
:param ofc_network_id: ofc_network_id to be converted
:param tenant_id: neutron tenant_id of the network
"""
pass
@abstractmethod
def convert_ofc_port_id(self, context, ofc_port_id,
tenant_id, network_id):
"""Convert old-style ofc port id to new-style one.
:param context: neutron context object
:param ofc_port_id: ofc_port_id to be converted
:param tenant_id: neutron tenant_id of the port
:param network_id: neutron network_id of the port
"""
pass
|
apache-2.0
| 7,191,177,591,923,439,000
| 36.268116
| 79
| 0.642815
| false
| 4.236409
| false
| false
| false
|
Lujeni/matterllo
|
core/models.py
|
1
|
3323
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ast import literal_eval
from django.db import models
from django.conf import settings
class Board(models.Model):
name = models.CharField(max_length=100)
webhook_activate = models.BooleanField(default=False)
trello_board_id = models.CharField(max_length=100)
trello_token = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
ordering = ["name"]
verbose_name_plural = "boards"
def __str__(self):
return self.name
def __unicode__(self):
return "{}".format(self.name)
class Webhook(models.Model):
name = models.CharField(max_length=50)
incoming_webhook_url = models.CharField(max_length=300, unique=True)
icon_url = models.CharField(
max_length=250,
default="http://maffrigby.com/wp-content/uploads/2015/05/trello-icon.png",
)
username = models.CharField(max_length=30, default="Matterllo")
board = models.ManyToManyField(Board)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
ordering = ["name"]
verbose_name_plural = "webhooks"
def __str__(self):
return "{} :: {}".format(self.name, self.incoming_webhook_url)
def __unicode__(self):
return "{} :: {}".format(self.name, self.incoming_webhook_url)
class Bridge(models.Model):
EVENT_CHOICES = (
# card
("addAttachmentToCard", "addAttachmentToCard"),
("addLabelToCard", "addLabelToCard"),
("addMemberToCard", "addMemberToCard"),
("commentCard", "commentCard"),
("copyCard", "copyCard"),
("createCard", "createCard"),
("emailCard", "emailCard"),
("moveCardFromBoard", "moveCardFromBoard"),
("moveCardToBoard", "moveCardToBoard"),
("removeLabelFromCard", "removeLabelFromCard"),
("removeMemberFromCard", "removeMemberFromCard"),
(
"updateCard",
"updateCard (include moveCardToList, renameCard, renameCardDesc, updateCardDueDate, removeCardDueDate, archiveCard, unarchiveCard)",
),
# checklist
("addChecklistToCard", "addChecklistToCard"),
("createCheckItem", "createCheckItem"),
("updateCheckItemStateOnCard", "updateCheckItemStateOnCard"),
# list
("archiveList", "archiveList"),
("createList", "createList"),
("moveListFromBoard", "moveCardFromBoard"),
("moveListToBoard", "moveListToBoard"),
("renameList", "renameList"),
("updateList", "updateList"),
)
webhook = models.ForeignKey(Webhook, on_delete=models.CASCADE)
board = models.ForeignKey(Board, on_delete=models.CASCADE)
events = models.CharField(max_length=700)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name_plural = "bridges"
def __str__(self):
return "{}::{}".format(self.board, self.webhook)
def __unicode__(self):
return "{}::{}".format(self.board, self.webhook)
def events_as_list(self):
return literal_eval(self.events)
|
mit
| 9,003,355,092,251,951,000
| 30.647619
| 144
| 0.63858
| false
| 3.754802
| false
| false
| false
|
solus-project/evolve-sc
|
solus_sc/main_window.py
|
2
|
8096
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# This file is part of solus-sc
#
# Copyright © 2013-2018 Ikey Doherty <ikey@solus-project.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
from .appsystem import AppSystem
from .groups import ScGroupsView
from .installed_view import ScInstalledView
from .sidebar import ScSidebar
from .updates_view import ScUpdatesView
from .basket import BasketView
from .search import ScSearchView
from .thirdparty import ThirdPartyView
from .settings_view import ScSettingsView
from gi.repository import Gtk, Gdk, GLib, Gio
import sys
import threading
class ScPlaceholderBox(Gtk.VBox):
""" So we don't show empty boxes :) """
def __init__(self):
Gtk.VBox.__init__(self)
lab = Gtk.Label("Sorry, this page is not yet implemented.")
self.add(lab)
class ScMainWindow(Gtk.ApplicationWindow):
groups_view = None
main_layout = None
sidebar = None
stack = None
sidebar_revealer = None
appsystem = None
# Pages
installed_view = None
updates_view = None
search_view = None
third_party = None
settings_view = None
prev_button = None
app = None
# Default open mode
mode_open = None
action_bar = None
did_map_once = False
def show_updates(self):
""" Switch to updates view """
self.sidebar.preselect_row("updates")
def show_search(self):
""" Switch to search view """
self.sidebar.preselect_row("search")
def do_delete_event(self, event, udata=None):
""" For now just propagate the event """
return False
def do_back(self):
""" Handle back navigation """
nom = self.stack.get_visible_child_name()
if nom == "installed":
self.installed_view.handle_back()
elif nom == "home":
self.groups_view.handle_back()
elif nom == "search":
self.search_view.handle_back()
else:
print("Shouldn't be happening boss")
def handle_back(self, btn, udata=None):
self.do_back()
def set_can_back(self, can_back):
self.prev_button.set_sensitive(can_back)
def update_back(self, nom):
""" Update back navigation """
sensitive = False
if nom == "installed":
sensitive = self.installed_view.can_back()
elif nom == "home":
sensitive = self.groups_view.can_back()
elif nom == "search":
sensitive = self.search_view.can_back()
self.set_can_back(sensitive)
def init_children(self):
self.installed_view.init_view()
# If we're not allowed to refresh on metered connections, only
# show the cached results on startup
settings = Gio.Settings.new("com.solus-project.software-center")
mon = Gio.NetworkMonitor.get_default()
if mon is not None:
can_net = settings.get_boolean("update-on-metered")
if not can_net and mon.get_network_metered():
self.updates_view.init_view()
return
GLib.idle_add(self.updates_view.external_refresh)
def init_view(self):
""" Our first ever show """
self.sidebar_revealer.set_reveal_child(True)
self.sidebar.preselect_row(self.mode_open)
self.stack.set_visible_child_name(self.mode_open)
return False
def on_mapped(self, w, udata=None):
if self.did_map_once:
return
self.did_map_once = True
GLib.timeout_add(200, self.init_view)
def on_button_press_event(self, widget, event):
if event.button == 8: # Back button
self.do_back()
def on_key_press_event(self, widget, event):
# check event modifiers
ctrl = (event.state & Gdk.ModifierType.CONTROL_MASK)
# check if search view hotkey was pressed
if ctrl and event.keyval == Gdk.keyval_from_name('f'):
self.show_search()
def __init__(self, app):
Gtk.ApplicationWindow.__init__(self, application=app)
self.app = app
self.mode_open = "home"
self.appsystem = AppSystem()
self.set_icon_name("system-software-install")
# Set up the headerbar. Because GNOME n stuff.
headerbar = Gtk.HeaderBar()
headerbar.set_show_close_button(True)
self.set_titlebar(headerbar)
self.prev_button = Gtk.Button.new_from_icon_name(
"go-previous-symbolic", Gtk.IconSize.BUTTON)
headerbar.pack_start(self.prev_button)
self.prev_button.connect("clicked", self.handle_back)
# Window title
self.set_title(_("Software Center"))
self.get_style_context().add_class("solus-sc")
self.set_position(Gtk.WindowPosition.CENTER)
self.set_default_size(950, 650)
self.stack = Gtk.Stack()
self.stack.get_style_context().add_class("main-view")
self.set_can_back(False)
# We'll add view switching later
try:
self.init_first()
except Exception as e:
print(e)
sys.exit(1)
def init_first(self):
self.basket = BasketView(self)
self.groups_view = ScGroupsView(self)
# Main horizontal layout (Sidebar|VIEW)
self.main_layout = Gtk.HBox(0)
self.add(self.main_layout)
self.sidebar = ScSidebar(self, self.stack)
self.sidebar_revealer = Gtk.Revealer()
self.sidebar_revealer.add(self.sidebar)
self.sidebar_revealer.set_reveal_child(False)
self.main_layout.pack_start(self.sidebar_revealer, False, False, 0)
sep = Gtk.Separator()
sep.set_orientation(Gtk.Orientation.VERTICAL)
sep.get_style_context().add_class("sidebar-separator")
self.main_layout.pack_start(sep, False, False, 0)
tmpvbox = Gtk.VBox(0)
tmpvbox.pack_start(self.stack, True, True, 0)
tmpvbox.pack_start(self.basket, False, False, 0)
self.main_layout.pack_start(tmpvbox, True, True, 0)
# Dummy view for first time showing the application
self.dummy_widget = Gtk.EventBox()
# Supported views
self.stack.add_titled(self.dummy_widget, "empty", "empty")
# Main view, primary view, when opening the software center
self.stack.add_titled(self.groups_view, "home", _("Home"))
self.updates_view = ScUpdatesView(self.basket, self.appsystem)
# The page where updates are display
self.stack.add_titled(self.updates_view, "updates", _("Updates"))
# Package view for installed page
self.installed_view = ScInstalledView(self, self.basket, self.appsystem)
# This page shows the locally instaleld items
self.stack.add_titled(self.installed_view, "installed", _("Installed"))
self.third_party = ThirdPartyView(self)
# Software made available from outside the Solus software repos
self.stack.add_titled(self.third_party, "3rd-party", _("Third Party"))
# Search view
self.search_view = ScSearchView(self)
# The search page
self.stack.add_titled(self.search_view, "search", _("Search"))
self.settings_view = ScSettingsView(self)
# The settings page
self.stack.add_titled(self.settings_view, "settings", _("Settings"))
# set up intro animation
self.stack.set_visible_child_name("empty")
self.stack.set_transition_type(Gtk.StackTransitionType.SLIDE_UP)
revel = Gtk.RevealerTransitionType.SLIDE_RIGHT
self.sidebar_revealer.set_transition_type(revel)
self.connect("map-event", self.on_mapped)
self.connect("button-press-event", self.on_button_press_event)
self.connect("key-press-event", self.on_key_press_event)
t = threading.Thread(target=self.init_children)
t.start()
self.show_all()
|
gpl-2.0
| -6,569,056,492,320,678,000
| 32.17623
| 80
| 0.627548
| false
| 3.713303
| false
| false
| false
|
bundgus/python-playground
|
sqlalchemy-playground/sqlalchemy-playground.py
|
1
|
4317
|
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey
from sqlalchemy.sql import select
from sqlalchemy.sql import and_, or_, not_
from sqlalchemy.sql import text
from sqlalchemy import func, desc
print(sqlalchemy.__version__)
engine = create_engine('sqlite:///:memory:', echo=True)
metadata = MetaData()
users = Table('users', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(50)),
Column('fullname', String(50)),
)
addresses = Table('addresses', metadata,
Column('id', Integer, primary_key=True),
Column('user_id', None, ForeignKey('users.id')),
Column('email_address', String, nullable=False)
)
metadata.create_all(engine)
conn = engine.connect()
ins = users.insert()
conn.execute(ins, name='jack', fullname='Jack Jones')
conn.execute(ins, id=2, name='wendy', fullname='Wendy Williams')
conn.execute(addresses.insert(), [
{'user_id': 1, 'email_address' : 'jack@yahoo.com'},
{'user_id': 1, 'email_address' : 'jack@msn.com'},
{'user_id': 2, 'email_address' : 'www@www.org'},
{'user_id': 2, 'email_address' : 'wendy@aol.com'},
])
'''
s = select([users])
result = conn.execute(s)
for row in result:
print(row)
result = conn.execute(s)
row = result.fetchone()
print("name:", row['name'], "; fullname:", row['fullname'])
print("name:", row[1], "; fullname:", row[2])
for row in conn.execute(s):
print("name:", row[users.c.name], "; fullname:", row[users.c.fullname])
s = select([users.c.name, users.c.fullname])
result = conn.execute(s)
for row in result:
print (row)
for row in conn.execute(select([users, addresses]).where(users.c.id == addresses.c.user_id)):
print (row)
print(and_(
users.c.name.like('j%'), users.c.id == addresses.c.user_id, \
or_(
addresses.c.email_address == 'wendy@aol.com',
addresses.c.email_address == 'jack@yahoo.com'
), \
not_(users.c.id > 5)))
s = select([(users.c.fullname +
", " + addresses.c.email_address).
label('title')]).\
where(
and_(
users.c.id == addresses.c.user_id,
users.c.name.between('m', 'z'),
or_(
addresses.c.email_address.like('%@aol.com'),
addresses.c.email_address.like('%@msn.com')
)
)
)
result = conn.execute(s)
for row in result:
print (row)
s = text(
"SELECT users.fullname || ', ' || addresses.email_address AS title "
"FROM users, addresses "
"WHERE users.id = addresses.user_id "
"AND users.name BETWEEN :x AND :y "
"AND (addresses.email_address LIKE :e1 "
"OR addresses.email_address LIKE :e2)")
result = conn.execute(s, x='m', y='z', e1='%@aol.com', e2='%@msn.com')
for row in result:
print (row)
stmt = select([
addresses.c.user_id,
func.count(addresses.c.id).label('num_addresses')]).\
order_by(desc("num_addresses"))
result = conn.execute(stmt)
for row in result:
print (row)
print (users.join(addresses))
print(users.join(addresses))
print(users.join(addresses,
addresses.c.email_address.like(users.c.name + '%')
))
s = select([users.c.fullname]).select_from(
users.join(addresses,
addresses.c.email_address.like(users.c.name + '%'))
)
result = conn.execute(s)
for row in result:
print(row)
s = select([users.c.fullname]).select_from(users.outerjoin(addresses))
result = conn.execute(s)
for row in result:
print(row)
stmt = select([users.c.name]).order_by(users.c.name)
result = conn.execute(stmt)
for row in result:
print(row)
stmt = select([users.c.name]).order_by(users.c.name.desc())
result = conn.execute(stmt)
for row in result:
print(row)
stmt = select([users.c.name, func.count(addresses.c.id)]).\
select_from(users.join(addresses)).\
group_by(users.c.name)
result = conn.execute(stmt)
for row in result:
print(row)
'''
stmt = users.update().\
values(fullname="Fullname: " + users.c.name)
result = conn.execute(stmt)
conn.execute(users.delete().where(users.c.name > 'm'))
# result.close()
|
mit
| -1,070,303,591,381,381,500
| 26.496815
| 93
| 0.604587
| false
| 3.245865
| false
| false
| false
|
Horniman/Horniman_Hack
|
satalite/models.py
|
1
|
1056
|
"""Satalite date models"""
from __future__ import division, absolute_import, print_function, unicode_literals
from django.db import models
from db import models as db_models
class LogTempLive(models.Model):
"""Messages from Buoy"""
sensor = models.ForeignKey(db_models.Sensor)
processed = models.BooleanField(default = False)
data = models.CharField(max_length = 48)
time = models.DateTimeField()
#class LogTempBackup(models.Model):
# """Backup data"""
# sensor = models.ForeignKey(db_models.Sensor)
# byyy = models.CharField(max_length=4)
# bm = models.CharField(max_length=2)
# bd = models.CharField(max_length=2)
# bh = models.CharField(max_length=2)
# eyyy = models.CharField(max_length=4)
# em = models.CharField(max_length=2)
# ed = models.CharField(max_length=2)
# eh = models.CharField(max_length=2)
# sst = models.FloatField()
# sstanom = models.FloatField()
# hotspot = models.FloatField()
# dhw = models.FloatField()
# lat = models.FloatField()
# long = models.FloatField()
|
gpl-2.0
| 3,972,958,697,875,750,400
| 34.2
| 82
| 0.686553
| false
| 3.259259
| false
| false
| false
|
masaohamanaka/mbed
|
workspace_tools/targets.py
|
1
|
59025
|
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
CORE_LABELS = {
"ARM7TDMI-S": ["ARM7"],
"Cortex-M0" : ["M0", "CORTEX_M"],
"Cortex-M0+": ["M0P", "CORTEX_M"],
"Cortex-M1" : ["M1", "CORTEX_M"],
"Cortex-M3" : ["M3", "CORTEX_M"],
"Cortex-M4" : ["M4", "CORTEX_M"],
"Cortex-M4F" : ["M4", "CORTEX_M"],
"Cortex-M7" : ["M7", "CORTEX_M"],
"Cortex-M7F" : ["M7", "CORTEX_M"],
"Cortex-A9" : ["A9", "CORTEX_A"]
}
import os
import binascii
import struct
import shutil
from workspace_tools.patch import patch
from paths import TOOLS_BOOTLOADERS
class Target:
def __init__(self):
# ARM Core
self.core = None
# Is the disk provided by the interface chip of this board virtual?
self.is_disk_virtual = False
# list of toolchains that are supported by the mbed SDK for this target
self.supported_toolchains = None
# list of extra specific labels
self.extra_labels = []
# list of macros (-D)
self.macros = []
# Default online compiler:
self.default_toolchain = "ARM"
self.name = self.__class__.__name__
# Code used to determine devices' platform
# This code is prefix in URL link provided in mbed.htm (in mbed disk)
self.detect_code = []
def program_cycle_s(self):
return 4 if self.is_disk_virtual else 1.5
def get_labels(self):
return [self.name] + CORE_LABELS[self.core] + self.extra_labels
def init_hooks(self, hook, toolchain_name):
pass
### MCU Support ###
class CM4_UARM(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.supported_toolchains = ["uARM"]
self.default_toolchain = "uARM"
class CM4_ARM(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class CM4F_UARM(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.supported_toolchains = ["uARM"]
self.default_toolchain = "uARM"
class CM4F_ARM(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
### NXP ###
# This class implements the post-link patching step needed by LPC targets
class LPCTarget(Target):
def __init__(self):
Target.__init__(self)
def init_hooks(self, hook, toolchain_name):
hook.hook_add_binary("post", self.lpc_patch)
@staticmethod
def lpc_patch(t_self, resources, elf, binf):
t_self.debug("LPC Patch: %s" % os.path.split(binf)[1])
patch(binf)
class LPC11C24(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11XX_11CXX', 'LPC11CXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
class LPC1114(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11XX_11CXX', 'LPC11XX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
class LPC11U24(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'LPC11U24_401']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.detect_code = ["1040"]
class OC_MBUINO(LPC11U24):
def __init__(self):
LPC11U24.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.macros = ['TARGET_LPC11U24']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
class LPC11U24_301(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
class LPC11U34_421(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
class MICRONFCBOARD(LPC11U34_421):
def __init__(self):
LPC11U34_421.__init__(self)
self.macros = ['LPC11U34_421', 'APPNEARME_MICRONFCBOARD']
self.extra_labels = ['NXP', 'LPC11UXX', 'APPNEARME_MICRONFCBOARD']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
class LPC11U35_401(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
class LPC11U35_501(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'MCU_LPC11U35_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR" , "IAR"]
self.default_toolchain = "uARM"
class LPC11U35_501_IBDAP(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'MCU_LPC11U35_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR" , "IAR"]
self.default_toolchain = "uARM"
class XADOW_M0(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'MCU_LPC11U35_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
class LPC11U35_Y5_MBUG(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'MCU_LPC11U35_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR" , "IAR"]
self.default_toolchain = "uARM"
class LPC11U37_501(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
class LPCCAPPUCCINO(LPC11U37_501):
def __init__(self):
LPC11U37_501.__init__(self)
class ARCH_GPRS(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'LPC11U37_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
class LPC11U68(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC11U6X']
self.supported_toolchains = ["ARM", "uARM", "GCC_CR", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.detect_code = ["1168"]
class LPC1347(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC13XX']
self.supported_toolchains = ["ARM", "GCC_ARM","IAR"]
class LPC1549(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC15XX']
self.supported_toolchains = ["uARM", "GCC_CR", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.detect_code = ["1549"]
class LPC1768(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC176X', 'MBED_LPC1768']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CS", "GCC_CR", "IAR"]
self.detect_code = ["1010"]
class ARCH_PRO(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC176X']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CS", "GCC_CR", "IAR"]
self.macros = ['TARGET_LPC1768']
self.supported_form_factors = ["ARDUINO"]
class UBLOX_C027(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC176X']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CS", "GCC_CR", "IAR"]
self.macros = ['TARGET_LPC1768']
self.supported_form_factors = ["ARDUINO"]
class XBED_LPC1768(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC176X', 'XBED_LPC1768']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CS", "GCC_CR", "IAR"]
self.macros = ['TARGET_LPC1768']
self.detect_code = ["1010"]
class LPC2368(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "ARM7TDMI-S"
self.extra_labels = ['NXP', 'LPC23XX']
self.supported_toolchains = ["ARM", "GCC_ARM", "GCC_CR"]
class LPC2460(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "ARM7TDMI-S"
self.extra_labels = ['NXP', 'LPC2460']
self.supported_toolchains = ["GCC_ARM"]
class LPC810(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC81X']
self.supported_toolchains = ["uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.is_disk_virtual = True
class LPC812(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC81X']
self.supported_toolchains = ["uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.detect_code = ["1050"]
class LPC824(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC82X']
self.supported_toolchains = ["uARM", "GCC_ARM","GCC_CR", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
class SSCI824(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC82X']
self.supported_toolchains = ["uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
self.is_disk_virtual = True
class LPC4088(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['NXP', 'LPC408X']
self.supported_toolchains = ["ARM", "GCC_CR", "GCC_ARM", "IAR"]
self.is_disk_virtual = True
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['ARM_STD', 'ARM_MICRO']:
hook.hook_add_binary("post", self.binary_hook)
@staticmethod
def binary_hook(t_self, resources, elf, binf):
if not os.path.isdir(binf):
# Regular binary file, nothing to do
LPCTarget.lpc_patch(t_self, resources, elf, binf)
return
outbin = open(binf + ".temp", "wb")
partf = open(os.path.join(binf, "ER_IROM1"), "rb")
# Pad the fist part (internal flash) with 0xFF to 512k
data = partf.read()
outbin.write(data)
outbin.write('\xFF' * (512*1024 - len(data)))
partf.close()
# Read and append the second part (external flash) in chunks of fixed size
chunksize = 128 * 1024
partf = open(os.path.join(binf, "ER_IROM2"), "rb")
while True:
data = partf.read(chunksize)
outbin.write(data)
if len(data) < chunksize:
break
partf.close()
outbin.close()
# Remove the directory with the binary parts and rename the temporary
# file to 'binf'
shutil.rmtree(binf, True)
os.rename(binf + '.temp', binf)
t_self.debug("Generated custom binary file (internal flash + SPIFI)")
LPCTarget.lpc_patch(t_self, resources, elf, binf)
class LPC4088_DM(LPC4088):
pass
class LPC4330_M4(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['NXP', 'LPC43XX', 'LPC4330']
self.supported_toolchains = ["ARM", "GCC_CR", "IAR", "GCC_ARM"]
class LPC4330_M0(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC43XX', 'LPC4330']
self.supported_toolchains = ["ARM", "GCC_CR", "IAR"]
class LPC4337(LPCTarget):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['NXP', 'LPC43XX', 'LPC4337']
self.supported_toolchains = ["ARM"]
class LPC1800(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC43XX']
self.supported_toolchains = ["ARM", "GCC_CR", "IAR"]
class LPC11U37H_401(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
### Freescale ###
class KL05Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
class KL25Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["ARM", "GCC_CW_EWL", "GCC_CW_NEWLIB", "GCC_ARM","IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.detect_code = ["0200"]
class KL26Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["ARM","GCC_ARM","IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
class KL43Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["GCC_ARM", "ARM"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
class KL46Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["GCC_ARM", "ARM", "IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.detect_code = ["0220"]
class K20D50M(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.extra_labels = ['Freescale', 'K20XX']
self.supported_toolchains = ["GCC_ARM", "ARM", "IAR"]
self.is_disk_virtual = True
self.detect_code = ["0230"]
class TEENSY3_1(Target):
OUTPUT_EXT = 'hex'
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.extra_labels = ['Freescale', 'K20XX', 'K20DX256']
self.supported_toolchains = ["GCC_ARM", "ARM"]
self.is_disk_virtual = True
self.detect_code = ["0230"]
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['ARM_STD', 'ARM_MICRO', 'GCC_ARM']:
hook.hook_add_binary("post", self.binary_hook)
@staticmethod
def binary_hook(t_self, resources, elf, binf):
from intelhex import IntelHex
binh = IntelHex()
binh.loadbin(binf, offset = 0)
with open(binf.replace(".bin", ".hex"), "w") as f:
binh.tofile(f, format='hex')
class K22F(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['Freescale', 'KPSDK_MCUS', 'KPSDK_CODE']
self.macros = ["CPU_MK22FN512VLH12", "FSL_RTOS_MBED"]
self.supported_toolchains = ["ARM", "GCC_ARM", "IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.detect_code = ["0201"]
class K64F(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['Freescale', 'KPSDK_MCUS', 'KPSDK_CODE', 'MCU_K64F', 'FRDM']
self.macros = ["CPU_MK64FN1M0VMD12", "FSL_RTOS_MBED"]
self.supported_toolchains = ["ARM", "GCC_ARM", "IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.default_toolchain = "ARM"
self.detect_code = ["0240"]
class MTS_GAMBIT(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['Freescale', 'KPSDK_MCUS', 'KPSDK_CODE', 'MCU_K64F']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.macros = ["CPU_MK64FN1M0VMD12", "FSL_RTOS_MBED", "TARGET_K64F"]
self.is_disk_virtual = True
self.default_toolchain = "ARM"
### STMicro ###
class NUCLEO_F030R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F030R8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0725"]
class NUCLEO_F070RB(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F070RB']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0755"]
class NUCLEO_F072RB(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F072RB']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0730"]
class NUCLEO_F091RC(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F091RC']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0750"]
class NUCLEO_F103RB(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32F1', 'STM32F103RB']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0700"]
class NUCLEO_F302R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F302R8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0705"]
class NUCLEO_F303RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F303RE']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0745"]
class NUCLEO_F334R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F334R8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0735"]
class NUCLEO_F401RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F401RE']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0720"]
class NUCLEO_F411RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F411RE']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0740"]
class NUCLEO_F446RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F446RE']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0777"]
class NUCLEO_L053R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['STM', 'STM32L0', 'STM32L053R8']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0715"]
class NUCLEO_L073RZ(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['STM', 'STM32L0', 'STM32L073RZ']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0760"]
class NUCLEO_L152RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32L1', 'STM32L152RE']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0710"]
class STM32F3XX(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.extra_labels = ['STM', 'STM32F3XX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
class STM32F407(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F4XX']
self.supported_toolchains = ["ARM", "GCC_ARM", "IAR"]
class ARCH_MAX(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F407', 'STM32F407VG']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.supported_form_factors = ["ARDUINO"]
self.macros = ['LSI_VALUE=32000']
def program_cycle_s(self):
return 2
class DISCO_F051R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F051', 'STM32F051R8']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "uARM"
class DISCO_F100RB(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32F1', 'STM32F100RB']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "uARM"
class DISCO_F303VC(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F303', 'STM32F303VC']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "uARM"
class DISCO_F334C8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F334C8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.detect_code = ["0810"]
class DISCO_F407VG(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F407', 'STM32F407VG']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
class DISCO_F429ZI(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F429', 'STM32F429ZI']
self.supported_toolchains = ["GCC_ARM", "IAR"]
self.default_toolchain = "GCC_ARM"
class DISCO_L053C8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['STM', 'STM32L0', 'STM32L053C8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
class DISCO_F746NG(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M7"
self.extra_labels = ['STM', 'STM32F7', 'STM32F746', 'STM32F746NG']
self.supported_toolchains = ["ARM", "uARM", "IAR"]
self.default_toolchain = "uARM"
self.detect_code = ["0815"]
class DISCO_L476VG(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32L4', 'STM32L476VG']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.detect_code = ["0820"]
class MTS_MDOT_F405RG(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F405RG']
self.macros = ['HSE_VALUE=26000000', 'OS_CLOCK=48000000']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.is_disk_virtual = True
self.default_toolchain = "ARM"
class MTS_MDOT_F411RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F411RE']
self.macros = ['HSE_VALUE=26000000', 'OS_CLOCK=96000000', 'USE_PLL_HSE_EXTC=0', 'VECT_TAB_OFFSET=0x00010000']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "ARM"
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['GCC_ARM', 'ARM_STD', 'ARM_MICRO']:
hook.hook_add_binary("post", self.combine_bins)
# combine application binary with bootloader
# bootloader + padding to 64kB + application + md5sum (16 bytes)
@staticmethod
def combine_bins(t_self, resources, elf, binf):
loader = os.path.join(TOOLS_BOOTLOADERS, "MTS_MDOT_F411RE", "bootloader.bin")
target = binf + ".tmp"
if not os.path.exists(loader):
print "Can't find bootloader binary: " + loader
return
outbin = open(target, 'w+b')
part = open(loader, 'rb')
data = part.read()
outbin.write(data)
outbin.write('\xFF' * (64*1024 - len(data)))
part.close()
part = open(binf, 'rb')
data = part.read()
outbin.write(data)
part.close()
outbin.seek(0, 0)
data = outbin.read()
outbin.seek(0, 1)
crc = struct.pack('<I', binascii.crc32(data) & 0xFFFFFFFF)
outbin.write(crc)
outbin.close()
os.remove(binf)
os.rename(target, binf)
class MTS_DRAGONFLY_F411RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F411RE']
self.macros = ['HSE_VALUE=26000000', 'VECT_TAB_OFFSET=0x08010000']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "ARM"
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['GCC_ARM', 'ARM_STD', 'ARM_MICRO']:
hook.hook_add_binary("post", self.combine_bins)
# combine application binary with bootloader
# bootloader + padding to 64kB + application + md5sum (16 bytes)
@staticmethod
def combine_bins(t_self, resources, elf, binf):
loader = os.path.join(TOOLS_BOOTLOADERS, "MTS_DRAGONFLY_F411RE", "bootloader.bin")
target = binf + ".tmp"
if not os.path.exists(loader):
print "Can't find bootloader binary: " + loader
return
outbin = open(target, 'w+b')
part = open(loader, 'rb')
data = part.read()
outbin.write(data)
outbin.write('\xFF' * (64*1024 - len(data)))
part.close()
part = open(binf, 'rb')
data = part.read()
outbin.write(data)
part.close()
outbin.seek(0, 0)
data = outbin.read()
outbin.seek(0, 1)
crc = struct.pack('<I', binascii.crc32(data) & 0xFFFFFFFF)
outbin.write(crc)
outbin.close()
os.remove(binf)
os.rename(target, binf)
class MOTE_L152RC(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32L1', 'STM32L152RC']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.detect_code = ["4100"]
class DISCO_F401VC(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F401', 'STM32F401VC']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "GCC_ARM"
class UBLOX_C029(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F439', 'STM32F439ZI']
self.macros = ['HSE_VALUE=24000000', 'HSE_STARTUP_TIMEOUT=5000']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
class NZ32SC151(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32L1', 'STM32L151RC']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
# After flashing device, how long to delay until we assume program is running
def program_cycle_s(self):
return 1.5
### Nordic ###
class MCU_NRF51(Target):
# the following is a list of possible Nordic softdevices in decreasing order
# of preference.
EXPECTED_SOFTDEVICES_WITH_OFFSETS = [
{
'name' : 's130_nrf51_1.0.0_softdevice.hex',
'boot' : 's130_nrf51_1.0.0_bootloader.hex',
'offset' : 0x1C000
},
{
'name' : 's110_nrf51822_8.0.0_softdevice.hex',
'boot' : 's110_nrf51822_8.0.0_bootloader.hex',
'offset' : 0x18000
},
{
'name' : 's110_nrf51822_7.1.0_softdevice.hex',
'boot' : 's110_nrf51822_7.1.0_bootloader.hex',
'offset' : 0x16000
},
{
'name' : 's110_nrf51822_7.0.0_softdevice.hex',
'boot' : 's110_nrf51822_7.0.0_bootloader.hex',
'offset' : 0x16000
},
{
'name' : 's110_nrf51822_6.0.0_softdevice.hex',
'boot' : 's110_nrf51822_6.0.0_bootloader.hex',
'offset' : 0x14000
}
]
OVERRIDE_BOOTLOADER_FILENAME = "nrf51822_bootloader.hex"
OUTPUT_EXT = 'hex'
MERGE_SOFT_DEVICE = True
MERGE_BOOTLOADER = False
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ["NORDIC", "MCU_NRF51", "MCU_NRF51822"]
self.macros = ['NRF51', 'TARGET_NRF51822']
self.supported_toolchains = ["ARM", "GCC_ARM", "IAR"]
self.is_disk_virtual = True
self.detect_code = ["1070"]
def program_cycle_s(self):
return 6
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['ARM_STD', 'ARM_MICRO', 'GCC_ARM', 'IAR']:
hook.hook_add_binary("post", self.binary_hook)
@staticmethod
def binary_hook(t_self, resources, elf, binf):
# Scan to find the actual paths of soft device
sdf = None
for softdeviceAndOffsetEntry in t_self.target.EXPECTED_SOFTDEVICES_WITH_OFFSETS:
for hexf in resources.hex_files:
if hexf.find(softdeviceAndOffsetEntry['name']) != -1:
t_self.debug("SoftDevice file found %s." % softdeviceAndOffsetEntry['name'])
sdf = hexf
if sdf is not None: break
if sdf is not None: break
if sdf is None:
t_self.debug("Hex file not found. Aborting.")
return
# Look for bootloader file that matches this soft device or bootloader override image
blf = None
if t_self.target.MERGE_BOOTLOADER is True:
for hexf in resources.hex_files:
if hexf.find(t_self.target.OVERRIDE_BOOTLOADER_FILENAME) != -1:
t_self.debug("Bootloader file found %s." % t_self.target.OVERRIDE_BOOTLOADER_FILENAME)
blf = hexf
break
elif hexf.find(softdeviceAndOffsetEntry['boot']) != -1:
t_self.debug("Bootloader file found %s." % softdeviceAndOffsetEntry['boot'])
blf = hexf
break
# Merge user code with softdevice
from intelhex import IntelHex
binh = IntelHex()
binh.loadbin(binf, offset=softdeviceAndOffsetEntry['offset'])
if t_self.target.MERGE_SOFT_DEVICE is True:
t_self.debug("Merge SoftDevice file %s" % softdeviceAndOffsetEntry['name'])
sdh = IntelHex(sdf)
binh.merge(sdh)
if t_self.target.MERGE_BOOTLOADER is True and blf is not None:
t_self.debug("Merge BootLoader file %s" % blf)
blh = IntelHex(blf)
binh.merge(blh)
with open(binf.replace(".bin", ".hex"), "w") as f:
binh.tofile(f, format='hex')
# 16KB Nordic targets are tight on SRAM using S130 (default) so we
# introduce two possible options:
# 1) Use S130 (default) - for this derive from MCU_NRF51_16K
# 2) Use S110 - for this derive from MCU_NRF51_16K_S110
# Note that the 'default' option will track the default choice
# for other Nordic targets, and so can take advantage of other
# future SoftDevice improvements
# The *_BASE targets should *not* be inherited from, as they do not
# specify enough for building a target
# 16KB MCU version, e.g. Nordic nRF51822, Seeed Arch BLE, etc.
class MCU_NRF51_16K_BASE(MCU_NRF51):
def __init__(self):
MCU_NRF51.__init__(self)
self.extra_labels += ['MCU_NORDIC_16K', 'MCU_NRF51_16K']
self.macros += ['TARGET_MCU_NORDIC_16K', 'TARGET_MCU_NRF51_16K']
# derivative class used to create softdevice+bootloader enabled images
class MCU_NRF51_16K_BOOT_BASE(MCU_NRF51_16K_BASE):
def __init__(self):
MCU_NRF51_16K_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_BOOT']
self.macros += ['TARGET_MCU_NRF51_16K_BOOT', 'TARGET_OTA_ENABLED']
self.MERGE_SOFT_DEVICE = True
self.MERGE_BOOTLOADER = True
# derivative class used to create program only images for use with FOTA
class MCU_NRF51_16K_OTA_BASE(MCU_NRF51_16K_BASE):
def __init__(self):
MCU_NRF51_16K_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_OTA']
self.macros += ['TARGET_MCU_NRF51_16K_OTA', 'TARGET_OTA_ENABLED']
self.MERGE_SOFT_DEVICE = False
class MCU_NRF51_16K(MCU_NRF51_16K_BASE):
def __init__(self):
MCU_NRF51_16K_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_S130']
self.macros += ['TARGET_MCU_NRF51_16K_S130']
class MCU_NRF51_16K_S110(MCU_NRF51_16K_BASE):
def __init__(self):
MCU_NRF51_16K_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_S110']
self.macros += ['TARGET_MCU_NRF51_16K_S110']
class MCU_NRF51_16K_BOOT(MCU_NRF51_16K_BOOT_BASE):
def __init__(self):
MCU_NRF51_16K_BOOT_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_S130']
self.macros += ['TARGET_MCU_NRF51_16K_S130']
class MCU_NRF51_16K_BOOT_S110(MCU_NRF51_16K_BOOT_BASE):
def __init__(self):
MCU_NRF51_16K_BOOT_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_S110']
self.macros += ['TARGET_MCU_NRF51_16K_S110']
class MCU_NRF51_16K_OTA(MCU_NRF51_16K_OTA_BASE):
def __init__(self):
MCU_NRF51_16K_OTA_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_S130']
self.macros += ['TARGET_MCU_NRF51_16K_S130']
class MCU_NRF51_16K_OTA_S110(MCU_NRF51_16K_OTA_BASE):
def __init__(self):
MCU_NRF51_16K_OTA_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_S110']
self.macros += ['TARGET_MCU_NRF51_16K_S110']
# 32KB MCU version, e.g. Nordic nRF51-DK, nRF51-Dongle, etc.
class MCU_NRF51_32K(MCU_NRF51):
def __init__(self):
MCU_NRF51.__init__(self)
self.extra_labels += ['MCU_NORDIC_32K', 'MCU_NRF51_32K']
self.macros += ['TARGET_MCU_NORDIC_32K', 'TARGET_MCU_NRF51_32K']
class MCU_NRF51_32K_BOOT(MCU_NRF51_32K):
def __init__(self):
MCU_NRF51_32K.__init__(self)
self.extra_labels += ['MCU_NRF51_32K_BOOT']
self.macros += ['TARGET_MCU_NRF51_32K_BOOT', 'TARGET_OTA_ENABLED']
self.MERGE_SOFT_DEVICE = True
self.MERGE_BOOTLOADER = True
class MCU_NRF51_32K_OTA(MCU_NRF51_32K):
def __init__(self):
MCU_NRF51_32K.__init__(self)
self.extra_labels += ['MCU_NRF51_32K_OTA']
self.macros += ['TARGET_MCU_NRF51_32K_OTA', 'TARGET_OTA_ENABLED']
self.MERGE_SOFT_DEVICE = False
#
# nRF51 based development kits
#
# This one is special for legacy reasons
class NRF51822(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
self.extra_labels += ['NRF51822', 'NRF51822_MKIT']
self.macros += ['TARGET_NRF51822_MKIT']
class NRF51822_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['NRF51822', 'NRF51822_MKIT']
self.macros += ['TARGET_NRF51822_MKIT']
class NRF51822_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['NRF51822', 'NRF51822_MKIT']
self.macros += ['TARGET_NRF51822_MKIT']
class ARCH_BLE(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
self.supported_form_factors = ["ARDUINO"]
class ARCH_BLE_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['ARCH_BLE']
self.macros += ['TARGET_ARCH_BLE']
self.supported_form_factors = ["ARDUINO"]
class ARCH_BLE_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['ARCH_BLE']
self.macros += ['TARGET_ARCH_BLE']
self.supported_form_factors = ["ARDUINO"]
class ARCH_LINK(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
self.extra_labels += ['ARCH_BLE']
self.macros += ['TARGET_ARCH_BLE']
self.supported_form_factors = ["ARDUINO"]
class ARCH_LINK_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['ARCH_BLE', 'ARCH_LINK']
self.macros += ['TARGET_ARCH_BLE', 'TARGET_ARCH_LINK']
self.supported_form_factors = ["ARDUINO"]
class ARCH_LINK_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['ARCH_BLE', 'ARCH_LINK']
self.macros += ['TARGET_ARCH_BLE', 'TARGET_ARCH_LINK']
self.supported_form_factors = ["ARDUINO"]
class SEEED_TINY_BLE(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
class SEEED_TINY_BLE_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['SEEED_TINY_BLE']
self.macros += ['TARGET_SEEED_TINY_BLE']
class SEEED_TINY_BLE_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['SEEED_TINY_BLE']
self.macros += ['TARGET_SEEED_TINY_BLE']
class HRM1017(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
self.macros += ['TARGET_NRF_LFCLK_RC']
class HRM1017_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['HRM1017']
self.macros += ['TARGET_HRM1017', 'TARGET_NRF_LFCLK_RC']
class HRM1017_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['HRM1017']
self.macros += ['TARGET_HRM1017', 'TARGET_NRF_LFCLK_RC']
class RBLAB_NRF51822(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
self.supported_form_factors = ["ARDUINO"]
class RBLAB_NRF51822_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['RBLAB_NRF51822']
self.macros += ['TARGET_RBLAB_NRF51822']
self.supported_form_factors = ["ARDUINO"]
class RBLAB_NRF51822_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['RBLAB_NRF51822']
self.macros += ['TARGET_RBLAB_NRF51822']
self.supported_form_factors = ["ARDUINO"]
class RBLAB_BLENANO(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
class RBLAB_BLENANO_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['RBLAB_BLENANO']
self.macros += ['TARGET_RBLAB_BLENANO']
class RBLAB_BLENANO_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['RBLAB_BLENANO']
self.macros += ['TARGET_RBLAB_BLENANO']
class NRF51822_Y5_MBUG(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
class WALLBOT_BLE(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
class WALLBOT_BLE_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['WALLBOT_BLE']
self.macros += ['TARGET_WALLBOT_BLE']
class WALLBOT_BLE_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['WALLBOT_BLE']
self.macros += ['TARGET_WALLBOT_BLE']
class DELTA_DFCM_NNN40(MCU_NRF51_32K):
def __init__(self):
MCU_NRF51_32K.__init__(self)
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.macros += ['TARGET_NRF_LFCLK_RC']
def program_cycle_s(self):
return 10
class DELTA_DFCM_NNN40_BOOT(MCU_NRF51_32K_BOOT):
def __init__(self):
MCU_NRF51_32K_BOOT.__init__(self)
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.extra_labels += ['DELTA_DFCM_NNN40']
self.macros += ['TARGET_DELTA_DFCM_NNN40', 'TARGET_NRF_LFCLK_RC']
def program_cycle_s(self):
return 10
class DELTA_DFCM_NNN40_OTA(MCU_NRF51_32K_OTA):
def __init__(self):
MCU_NRF51_32K_OTA.__init__(self)
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.extra_labels += ['DELTA_DFCM_NNN40']
self.macros += ['TARGET_DELTA_DFCM_NNN40', 'TARGET_NRF_LFCLK_RC']
def program_cycle_s(self):
return 10
class NRF51_DK(MCU_NRF51_32K):
def __init__(self):
MCU_NRF51_32K.__init__(self)
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.supported_form_factors = ["ARDUINO"]
class NRF51_DK_BOOT(MCU_NRF51_32K_BOOT):
def __init__(self):
MCU_NRF51_32K_BOOT.__init__(self)
self.extra_labels = ['NRF51_DK']
self.macros += ['TARGET_NRF51_DK']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.supported_form_factors = ["ARDUINO"]
class NRF51_DK_OTA(MCU_NRF51_32K_OTA):
def __init__(self):
MCU_NRF51_32K_OTA.__init__(self)
self.extra_labels = ['NRF51_DK']
self.macros += ['TARGET_NRF51_DK']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.supported_form_factors = ["ARDUINO"]
class NRF51_DONGLE(MCU_NRF51_32K):
def __init__(self):
MCU_NRF51_32K.__init__(self)
class NRF51_DONGLE_BOOT(MCU_NRF51_32K_BOOT):
def __init__(self):
MCU_NRF51_32K_BOOT.__init__(self)
self.extra_labels = ['NRF51_DONGLE']
self.macros += ['TARGET_NRF51_DONGLE']
class NRF51_DONGLE_OTA(MCU_NRF51_32K_OTA):
def __init__(self):
MCU_NRF51_32K_OTA.__init__(self)
self.extra_labels = ['NRF51_DONGLE']
self.macros += ['TARGET_NRF51_DONGLE']
class NRF51_MICROBIT(MCU_NRF51_16K_S110):
def __init__(self):
MCU_NRF51_16K_S110.__init__(self)
self.EXPECTED_SOFTDEVICES_WITH_OFFSETS = [
{
'name' : 's110_nrf51822_8.0.0_softdevice.hex',
'boot' : 's110_nrf51822_8.0.0_bootloader.hex',
'offset' : 0x18000
},
{
'name' : 's110_nrf51822_7.1.0_softdevice.hex',
'boot' : 's110_nrf51822_7.1.0_bootloader.hex',
'offset' : 0x16000
}
]
self.macros += ['TARGET_NRF_LFCLK_RC']
class NRF51_MICROBIT_BOOT(MCU_NRF51_16K_BOOT_S110):
def __init__(self):
MCU_NRF51_16K_BOOT_S110.__init__(self)
self.extra_labels += ['NRF51_MICROBIT']
self.macros += ['TARGET_NRF51_MICROBIT', 'TARGET_NRF_LFCLK_RC']
class NRF51_MICROBIT_OTA(MCU_NRF51_16K_OTA_S110):
def __init__(self):
MCU_NRF51_16K_OTA_S110.__init__(self)
self.extra_labels += ['NRF51_MICROBIT']
self.macros += ['TARGET_NRF51_MICROBIT', 'TARGET_NRF_LFCLK_RC']
class NRF51_MICROBIT_B(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
self.extra_labels += ['NRF51_MICROBIT']
self.macros += ['TARGET_NRF51_MICROBIT', 'TARGET_NRF_LFCLK_RC']
class NRF51_MICROBIT_B_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['NRF51_MICROBIT']
self.macros += ['TARGET_NRF51_MICROBIT', 'TARGET_NRF_LFCLK_RC']
class NRF51_MICROBIT_B_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['NRF51_MICROBIT']
self.macros += ['TARGET_NRF51_MICROBIT', 'TARGET_NRF_LFCLK_RC']
### ARM ###
class ARM_MPS2_Target(Target):
def __init__(self):
Target.__init__(self)
self.OUTPUT_EXT = 'axf'
def init_hooks(self, hook, toolchain_name):
hook.hook_add_binary("replace", self.output_axf)
@staticmethod
def output_axf(t_self, resources, elf, bin):
shutil.copy(elf, bin)
t_self.debug("Passing ELF file %s" % bin)
class ARM_MPS2_M0(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M0']
self.macros = ['CMSDK_CM0']
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M0P(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M0P']
self.macros = ['CMSDK_CM0plus']
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M1(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M1"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M1']
self.macros = ['CMSDK_CM1']
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M3(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M3']
self.macros = ['CMSDK_CM3']
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M4(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M4']
self.macros = ['CMSDK_CM4']
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M7(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M7']
self.macros = ['CMSDK_CM7']
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2(ARM_MPS2_M4):
pass
### Renesas ###
class RZ_A1H(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-A9"
self.extra_labels = ['RENESAS', 'MBRZA1H']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.supported_form_factors = ["ARDUINO"]
self.default_toolchain = "ARM"
def program_cycle_s(self):
return 2
### Maxim Integrated ###
class MAXWSNENV(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['Maxim', 'MAX32610']
self.macros = ['__SYSTEM_HFX=24000000']
self.supported_toolchains = ["GCC_ARM", "IAR", "ARM"]
self.default_toolchain = "ARM"
class MAX32600MBED(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['Maxim', 'MAX32600']
self.macros = ['__SYSTEM_HFX=24000000']
self.supported_toolchains = ["GCC_ARM", "IAR", "ARM"]
self.default_toolchain = "ARM"
### Silicon Labs ###
class EFM32GG_STK3700(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['Silicon_Labs', 'EFM32']
self.macros = ['EFM32GG990F1024']
self.supported_toolchains = ["GCC_ARM", "ARM", "uARM"]
self.default_toolchain = "ARM"
class EFM32LG_STK3600(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['Silicon_Labs', 'EFM32']
self.macros = ['EFM32LG990F256']
self.supported_toolchains = ["GCC_ARM", "ARM", "uARM"]
self.default_toolchain = "ARM"
class EFM32WG_STK3800(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['Silicon_Labs', 'EFM32']
self.macros = ['EFM32WG990F256']
self.supported_toolchains = ["GCC_ARM", "ARM", "uARM"]
self.default_toolchain = "ARM"
class EFM32ZG_STK3200(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Silicon_Labs', 'EFM32']
self.macros = ['EFM32ZG222F32']
self.supported_toolchains = ["GCC_ARM", "uARM"]
self.default_toolchain = "uARM"
class EFM32HG_STK3400(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Silicon_Labs', 'EFM32']
self.macros = ['EFM32HG322F64']
self.supported_toolchains = ["GCC_ARM", "uARM"]
self.default_toolchain = "uARM"
##WIZnet
class WIZWIKI_W7500(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['WIZNET', 'W7500x', 'WIZwiki_W7500']
self.supported_toolchains = ["uARM", "ARM"]
self.default_toolchain = "ARM"
self.supported_form_factors = ["ARDUINO"]
class SAMR21G18A(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Atmel', 'SAM_CortexM0+', 'SAMR21']
self.macros = ['__SAMR21G18A__', 'I2C_MASTER_CALLBACK_MODE=true', 'EXTINT_CALLBACK_MODE=true', 'USART_CALLBACK_MODE=true', 'TC_ASYNC=true']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "GCC_ARM"
# Get a single instance for each target
TARGETS = [
### NXP ###
LPC11C24(),
LPC11U24(),
OC_MBUINO(), # LPC11U24
LPC11U24_301(),
LPC11U34_421(),
MICRONFCBOARD(), # LPC11U34_421
LPC11U35_401(),
LPC11U35_501(), # LPC11U35_501
LPC11U35_501_IBDAP(), # LPC11U35_501
XADOW_M0(), # LPC11U35_501
LPC11U35_Y5_MBUG(), # LPC11U35_501
LPC11U37_501(),
LPCCAPPUCCINO(), # LPC11U37_501
ARCH_GPRS(), # LPC11U37_501
LPC11U68(),
LPC1114(),
LPC1347(),
LPC1549(),
LPC1768(), # LPC1768
ARCH_PRO(), # LPC1768
UBLOX_C027(), # LPC1768
XBED_LPC1768(), # LPC1768
LPC2368(),
LPC2460(),
LPC810(),
LPC812(),
LPC824(),
SSCI824(), # LPC824
LPC4088(),
LPC4088_DM(),
LPC4330_M4(),
LPC4330_M0(),
LPC4337(),
LPC11U37H_401(),
### Freescale ###
KL05Z(),
KL25Z(),
KL26Z(),
KL43Z(),
KL46Z(),
K20D50M(),
TEENSY3_1(),
K22F(),
K64F(),
MTS_GAMBIT(), # FRDM K64F
### STMicro ###
NUCLEO_F030R8(),
NUCLEO_F070RB(),
NUCLEO_F072RB(),
NUCLEO_F091RC(),
NUCLEO_F103RB(),
NUCLEO_F302R8(),
NUCLEO_F303RE(),
NUCLEO_F334R8(),
NUCLEO_F401RE(),
NUCLEO_F411RE(),
NUCLEO_F446RE(),
NUCLEO_L053R8(),
NUCLEO_L073RZ(),
NUCLEO_L152RE(),
STM32F3XX(),
STM32F407(),
DISCO_F051R8(),
DISCO_F100RB(),
DISCO_F303VC(),
DISCO_F334C8(),
DISCO_F746NG(),
DISCO_F407VG(), # STM32F407
ARCH_MAX(), # STM32F407
DISCO_F429ZI(),
DISCO_L053C8(),
DISCO_L476VG(),
MTS_MDOT_F405RG(),
MTS_MDOT_F411RE(),
MOTE_L152RC(),
MTS_DRAGONFLY_F411RE(),
DISCO_F401VC(),
UBLOX_C029(), # STM32F439
NZ32SC151(), # STM32L151
### Nordic ###
NRF51822(), # nRF51_16K
NRF51822_BOOT(), # nRF51_16K
NRF51822_OTA(), # nRF51_16K
ARCH_BLE(), # nRF51_16K
ARCH_BLE_BOOT(), # nRF51_16K
ARCH_BLE_OTA(), # nRF51_16K
ARCH_LINK(), # nRF51_16K
ARCH_LINK_BOOT(), # nRF51_16K
ARCH_LINK_OTA(), # nRF51_16K
SEEED_TINY_BLE(), # nRF51_16K
SEEED_TINY_BLE_BOOT(), # nRF51_16K
SEEED_TINY_BLE_OTA(), # nRF51_16K
HRM1017(), # nRF51_16K
HRM1017_BOOT(), # nRF51_16K
HRM1017_OTA(), # nRF51_16K
RBLAB_NRF51822(), # nRF51_16K
RBLAB_NRF51822_BOOT(), # nRF51_16K
RBLAB_NRF51822_OTA(), # nRF51_16K
RBLAB_BLENANO(), # nRF51_16K
RBLAB_BLENANO_BOOT(), # nRF51_16K
RBLAB_BLENANO_OTA(), # nRF51_16K
NRF51822_Y5_MBUG(), # nRF51_16K
WALLBOT_BLE(), # nRF51_16K
WALLBOT_BLE_BOOT(), # nRF51_16K
WALLBOT_BLE_OTA(), # nRF51_16K
DELTA_DFCM_NNN40(), # nRF51_16K
DELTA_DFCM_NNN40_BOOT(),# nRF51_16K
DELTA_DFCM_NNN40_OTA(), # nRF51_16K
NRF51_DK(), # nRF51_32K
NRF51_DK_BOOT(), # nRF51_32K
NRF51_DK_OTA(), # nRF51_32K
NRF51_DONGLE(), # nRF51_32K
NRF51_DONGLE_BOOT(), # nRF51_32K
NRF51_DONGLE_OTA(), # nRF51_32K
NRF51_MICROBIT(), # nRF51_16K - S110
NRF51_MICROBIT_B(), # nRF51_16K - default
### ARM ###
ARM_MPS2_M0(),
ARM_MPS2_M0P(),
ARM_MPS2_M1(),
ARM_MPS2_M3(),
ARM_MPS2_M4(),
ARM_MPS2_M7(),
ARM_MPS2(),
### Renesas ###
RZ_A1H(),
### Maxim Integrated ###
MAXWSNENV(),
MAX32600MBED(),
### Silicon Labs ###
EFM32GG_STK3700(),
EFM32LG_STK3600(),
EFM32WG_STK3800(),
EFM32ZG_STK3200(),
EFM32HG_STK3400(),
### WIZnet ###
WIZWIKI_W7500(),
SAMR21G18A(),
]
# Map each target name to its unique instance
TARGET_MAP = {}
for t in TARGETS:
TARGET_MAP[t.name] = t
TARGET_NAMES = TARGET_MAP.keys()
# Some targets with different name have the same exporters
EXPORT_MAP = { }
# Detection APIs
def get_target_detect_codes():
""" Returns dictionary mapping detect_code -> platform_name
"""
result = {}
for target in TARGETS:
for detect_code in target.detect_code:
result[detect_code] = target.name
return result
|
apache-2.0
| 8,006,120,062,554,410,000
| 32.902929
| 147
| 0.573469
| false
| 2.88659
| false
| false
| false
|
myarjunar/QGIS
|
tests/src/python/test_qgsproject.py
|
1
|
5476
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsProject.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from builtins import chr
from builtins import range
__author__ = 'Sebastian Dietrich'
__date__ = '19/11/2015'
__copyright__ = 'Copyright 2015, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import qgis # NOQA
from qgis.core import QgsProject, QgsApplication, QgsUnitTypes, QgsCoordinateReferenceSystem
from qgis.testing import start_app, unittest
from utilities import (unitTestDataPath)
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsProject(unittest.TestCase):
def __init__(self, methodName):
"""Run once on class initialization."""
unittest.TestCase.__init__(self, methodName)
self.messageCaught = False
def test_makeKeyTokens_(self):
# see http://www.w3.org/TR/REC-xml/#d0e804 for a list of valid characters
invalidTokens = []
validTokens = []
# all test tokens will be generated by prepending or inserting characters to this token
validBase = "valid"
# some invalid characters, not allowed anywhere in a token
# note that '/' must not be added here because it is taken as a separator by makeKeyTokens_()
invalidChars = "+*,;<>|!$%()=?#\x01"
# generate the characters that are allowed at the start of a token (and at every other position)
validStartChars = ":_"
charRanges = [
(ord('a'), ord('z')),
(ord('A'), ord('Z')),
(0x00F8, 0x02FF),
(0x0370, 0x037D),
(0x037F, 0x1FFF),
(0x200C, 0x200D),
(0x2070, 0x218F),
(0x2C00, 0x2FEF),
(0x3001, 0xD7FF),
(0xF900, 0xFDCF),
(0xFDF0, 0xFFFD),
# (0x10000, 0xEFFFF), while actually valid, these are not yet accepted by makeKeyTokens_()
]
for r in charRanges:
for c in range(r[0], r[1]):
validStartChars += chr(c)
# generate the characters that are only allowed inside a token, not at the start
validInlineChars = "-.\xB7"
charRanges = [
(ord('0'), ord('9')),
(0x0300, 0x036F),
(0x203F, 0x2040),
]
for r in charRanges:
for c in range(r[0], r[1]):
validInlineChars += chr(c)
# test forbidden start characters
for c in invalidChars + validInlineChars:
invalidTokens.append(c + validBase)
# test forbidden inline characters
for c in invalidChars:
invalidTokens.append(validBase[:4] + c + validBase[4:])
# test each allowed start character
for c in validStartChars:
validTokens.append(c + validBase)
# test each allowed inline character
for c in validInlineChars:
validTokens.append(validBase[:4] + c + validBase[4:])
logger = QgsApplication.messageLog()
logger.messageReceived.connect(self.catchMessage)
prj = QgsProject.instance()
for token in validTokens:
self.messageCaught = False
prj.readEntry("test", token)
myMessage = "valid token '%s' not accepted" % (token)
assert not self.messageCaught, myMessage
for token in invalidTokens:
self.messageCaught = False
prj.readEntry("test", token)
myMessage = "invalid token '%s' accepted" % (token)
assert self.messageCaught, myMessage
logger.messageReceived.disconnect(self.catchMessage)
def catchMessage(self):
self.messageCaught = True
def testCrs(self):
prj = QgsProject.instance()
prj.clear()
self.assertFalse(prj.crs().isValid())
prj.setCrs(QgsCoordinateReferenceSystem.fromOgcWmsCrs('EPSG:3111'))
self.assertEqual(prj.crs().authid(), 'EPSG:3111')
def testEllipsoid(self):
prj = QgsProject.instance()
prj.clear()
prj.setCrs(QgsCoordinateReferenceSystem.fromOgcWmsCrs('EPSG:3111'))
prj.setEllipsoid('WGS84')
self.assertEqual(prj.ellipsoid(), 'WGS84')
# if project has NO crs, then ellipsoid should always be none
prj.setCrs(QgsCoordinateReferenceSystem())
self.assertEqual(prj.ellipsoid(), 'NONE')
def testDistanceUnits(self):
prj = QgsProject.instance()
prj.clear()
prj.setDistanceUnits(QgsUnitTypes.DistanceFeet)
self.assertEqual(prj.distanceUnits(), QgsUnitTypes.DistanceFeet)
def testAreaUnits(self):
prj = QgsProject.instance()
prj.clear()
prj.setAreaUnits(QgsUnitTypes.AreaSquareFeet)
self.assertEqual(prj.areaUnits(), QgsUnitTypes.AreaSquareFeet)
def testReadEntry(self):
prj = QgsProject.instance()
prj.read(os.path.join(TEST_DATA_DIR, 'labeling/test-labeling.qgs'))
# valid key, valid int value
self.assertEqual(prj.readNumEntry("SpatialRefSys", "/ProjectionsEnabled", -1)[0], 0)
# invalid key
self.assertEqual(prj.readNumEntry("SpatialRefSys", "/InvalidKey", -1)[0], -1)
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
| 3,873,158,816,957,129,700
| 32.595092
| 104
| 0.620161
| false
| 3.818689
| true
| false
| false
|
eirnym/aiopg
|
examples/notify.py
|
1
|
1061
|
import asyncio
import aiopg
dsn = 'dbname=aiopg user=aiopg password=passwd host=127.0.0.1'
async def notify(conn):
async with conn.cursor() as cur:
for i in range(5):
msg = "message {}".format(i)
print('Send ->', msg)
await cur.execute("NOTIFY channel, '{}'".format(msg))
await cur.execute("NOTIFY channel, 'finish'")
async def listen(conn):
async with conn.cursor() as cur:
await cur.execute("LISTEN channel")
while True:
msg = await conn.notifies.get()
if msg.payload == 'finish':
return
else:
print('Receive <-', msg.payload)
async def main():
async with aiopg.create_pool(dsn) as pool:
async with pool.acquire() as conn1:
listener = listen(conn1)
async with pool.acquire() as conn2:
notifier = notify(conn2)
await asyncio.gather(listener, notifier)
print("ALL DONE")
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
bsd-2-clause
| 5,568,833,685,149,267,000
| 26.205128
| 65
| 0.572102
| false
| 3.858182
| false
| false
| false
|
loveisbug/liveshow-sh
|
live.py
|
1
|
3864
|
# -*- coding: utf-8 -*-
import urllib2
import HTMLParser
from bs4 import BeautifulSoup
import sys
from datetime import *
import re
import smtplib
from email.MIMEText import MIMEText
def sendmail(subject, content):
email_host = 'smtp host'
email_user = 'sender email'
email_pwd = 'sender pwd'
maillist = ['example@123.com']
me = email_user
msg = MIMEText(content, 'html', 'utf-8')
msg['Subject'] = subject
msg['From'] = me
msg['To'] = ', '.join(maillist)
try:
smtp = smtplib.SMTP(email_host)
smtp.login(email_user, email_pwd)
smtp.sendmail(me, maillist, msg.as_string())
smtp.quit()
print 'email send success.'
except Exception, e:
print e
print 'email send failed.'
livedict = {
'MAO' : 'maosh/1441569/1', # https://site.douban.com/maosh/widget/events/1441569/
'YYT' : 'yuyintang_h/1217192/1',
'QSW' : '187956/11298220/1', # https://site.douban.com/187956/widget/events/11298220/
'OST' : '176416/10189365/1',
'JZC' : 'jzclub/1357869/1',
'HAL' : '273062/191469274/1', # https://site.douban.com/273062/widget/events/191469274/
'MSL' : '290170/192970720/2', # https://site.douban.com/290170/widget/events/192970720/
'696' : 'livebar696/1381481/1', # https://site.douban.com/livebar696/widget/events/1381481/
'YGS' : 'yugongyishan/1431074/2', # https://site.douban.com/yugongyishan/widget/events/1431074/
'MOG' : 'moguspace/191972683/1', # https://site.douban.com/moguspace/widget/events/191972683/
'DDC' : '237627/16619636/2' # https://site.douban.com/237627/widget/events/16619636/
}
def fetchliveshow(livehouse):
baseurl = 'https://site.douban.com/' + livedict[livehouse].split('/')[0] + '/widget/events/' + livedict[livehouse].split('/')[1] + '/?start='
liststyle = int(livedict[livehouse].split('/')[2])
pagedepth = 10
pagecnt = 0
urlrequest = urllib2.Request(baseurl + str(pagecnt))
html_src = urllib2.urlopen(urlrequest).read()
parser = BeautifulSoup(html_src, "html.parser")
try:
eventcnt = re.findall(r'\d+', parser.find('span', 'count').text)
except:
eventcnt = ['0']
if len(eventcnt):
pagecnt = int(eventcnt[0]) / pagedepth + 1
print pagecnt
text = ''
for i in range(0, pagecnt):
urlrequest = urllib2.Request(baseurl + str(i * pagedepth))
html_src = urllib2.urlopen(urlrequest).read()
parser = BeautifulSoup(html_src, "html.parser")
# liststyle 1: 'events-list-s', 'class':'item close' and 'class':'item '
# liststyle 2: 'events-list', 'class':'item'
if liststyle == 1:
elist = parser.find('div', {'class' : 'events-list-s'}).findAll('li', {'class' : 'item '})
elif liststyle == 2:
elist = parser.find('div', {'class' : 'events-list'}).findAll('li', {'class' : 'item'})
else:
elist = []
print len(elist), i
for event in elist:
if event.findNext('span').text.find(u'已结束') != -1:
elist = []
break
eventurl = event.findNext('a')['href']
urlrequest = urllib2.Request(eventurl)
html_src = urllib2.urlopen(urlrequest).read()
parser = BeautifulSoup(html_src, "html.parser")
title = parser.find('h1', {'itemprop' : 'summary'}).contents[0].strip()
try:
datetime = parser.find('li', 'calendar-str-item').text.strip()
except AttributeError:
datetime = next(parser.find('ul', 'calendar-strs ').findNext('li').children).strip()
except:
datetime = ''
prices = parser.findAll('span', 'tickets-info-price')
price = prices[-1].text.strip() if len(prices) else ' '
text += '<b>' + datetime + ' ' + price + '</b><br>' + '<a href="' + eventurl + '">' + title + '</a><br><br>'
if len(elist) < pagedepth:
break
sendmail(livehouse + ' Liveshow - ' + str(date.today()), text)
def main(argv):
if len(argv) > 1:
fetchliveshow(argv[1])
return 0
else:
print "Please input the livehouse: MAO, YYT, QSW, OST."
return 1
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
mit
| 4,368,484,991,240,511,500
| 35.742857
| 142
| 0.653447
| false
| 2.65337
| false
| false
| false
|
SamuelToh/pixelated-user-agent
|
service/test/unit/test_welcome_mail.py
|
1
|
2330
|
#
# Copyright (c) 2014 ThoughtWorks, Inc.
#
# Pixelated is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pixelated is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest
from mockito import verify, mock
from mockito.matchers import Matcher
from email import message_from_file
from pixelated.config.leap import add_welcome_mail
from pixelated.adapter.model.mail import InputMail
class TestWelcomeMail(unittest.TestCase):
def test_add_welcome_mail(self):
mail_store = mock()
input_mail = self._get_welcome_mail()
add_welcome_mail(mail_store)
capture = WelcomeMailCapture()
verify(mail_store).add_mail('INBOX', capture)
capture.assert_mail(input_mail.raw)
def _get_welcome_mail(self):
current_path = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(current_path,
'..',
'..',
'pixelated',
'assets',
'welcome.mail')) as mail_template_file:
mail_template = message_from_file(mail_template_file)
return InputMail.from_python_mail(mail_template)
class WelcomeMailCapture(Matcher):
def matches(self, arg):
self.value = arg
return True
def assert_mail(self, mail):
captured_mail = self._format(self.value)
expected_mail = self._format(mail)
assert captured_mail == expected_mail
def _format(self, mail):
splitter = '\n'
arr = mail.split(splitter)
arr = self._remove_variable_value(arr)
return splitter.join(arr)
def _remove_variable_value(self, arr):
arr.pop(0)
arr.pop(6)
arr.pop(44)
return arr
|
agpl-3.0
| 6,749,108,892,892,384,000
| 30.917808
| 77
| 0.639056
| false
| 4.02418
| false
| false
| false
|
zhlinh/leetcode
|
0057.Insert Interval/solution.py
|
1
|
1441
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: zhlinhng@gmail.com
Version: 0.0.1
Created Time: 2016-02-22
Last_modify: 2016-02-22
******************************************
'''
'''
Given a set of non-overlapping intervals,
insert a new interval into the intervals (merge if necessary).
You may assume that the intervals were initially sorted
according to their start times.
Example 1:
Given intervals [1,3],[6,9], insert and merge [2,5] in as [1,5],[6,9].
Example 2:
Given [1,2],[3,5],[6,7],[8,10],[12,16],
insert and merge [4,9] in as [1,2],[3,10],[12,16].
This is because the new interval [4,9] overlaps with [3,5],[6,7],[8,10].
'''
# Definition for an interval.
class Interval(object):
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution(object):
def insert(self, intervals, newInterval):
"""
:type intervals: List[Interval]
:type newInterval: Interval
:rtype: List[Interval]
"""
ns, ne = newInterval.start, newInterval.end
left, right = [], []
for x in intervals:
if x.end < ns:
left.append(x)
elif x.start > ne:
right.append(x)
else:
ns = min(x.start, ns)
ne = max(x.end, ne)
return left + [Interval(ns, ne)] + right
|
apache-2.0
| -6,815,767,327,999,001,000
| 26.711538
| 72
| 0.529493
| false
| 3.398585
| false
| false
| false
|
shootstar/novatest
|
nova/api/openstack/compute/plugins/v3/cells.py
|
1
|
14865
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The cells extension."""
from oslo.config import cfg
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.cells import rpc_driver
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute
from nova import db
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
ALIAS = "os-cells"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
def make_cell(elem):
elem.set('name')
elem.set('username')
elem.set('type')
elem.set('rpc_host')
elem.set('rpc_port')
caps = xmlutil.SubTemplateElement(elem, 'capabilities',
selector='capabilities')
cap = xmlutil.SubTemplateElement(caps, xmlutil.Selector(0),
selector=xmlutil.get_items)
cap.text = 1
make_capacity(elem)
def make_capacity(cell):
def get_units_by_mb(capacity_info):
return capacity_info['units_by_mb'].items()
capacity = xmlutil.SubTemplateElement(cell, 'capacities',
selector='capacities')
ram_free = xmlutil.SubTemplateElement(capacity, 'ram_free',
selector='ram_free')
ram_free.set('total_mb', 'total_mb')
unit_by_mb = xmlutil.SubTemplateElement(ram_free, 'unit_by_mb',
selector=get_units_by_mb)
unit_by_mb.set('mb', 0)
unit_by_mb.set('unit', 1)
disk_free = xmlutil.SubTemplateElement(capacity, 'disk_free',
selector='disk_free')
disk_free.set('total_mb', 'total_mb')
unit_by_mb = xmlutil.SubTemplateElement(disk_free, 'unit_by_mb',
selector=get_units_by_mb)
unit_by_mb.set('mb', 0)
unit_by_mb.set('unit', 1)
cell_nsmap = {None: wsgi.XMLNS_V10}
class CellTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('cell', selector='cell')
make_cell(root)
return xmlutil.MasterTemplate(root, 1, nsmap=cell_nsmap)
class CellsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('cells')
elem = xmlutil.SubTemplateElement(root, 'cell', selector='cells')
make_cell(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=cell_nsmap)
class CellDeserializer(wsgi.XMLDeserializer):
"""Deserializer to handle xml-formatted cell create requests."""
def _extract_capabilities(self, cap_node):
caps = {}
for cap in cap_node.childNodes:
cap_name = cap.tagName
caps[cap_name] = self.extract_text(cap)
return caps
def _extract_cell(self, node):
cell = {}
cell_node = self.find_first_child_named(node, 'cell')
extract_fns = {
'capabilities': self._extract_capabilities,
'rpc_port': lambda child: int(self.extract_text(child)),
}
for child in cell_node.childNodes:
name = child.tagName
extract_fn = extract_fns.get(name, self.extract_text)
cell[name] = extract_fn(child)
return cell
def default(self, string):
"""Deserialize an xml-formatted cell create request."""
node = xmlutil.safe_minidom_parse_string(string)
return {'body': {'cell': self._extract_cell(node)}}
def _filter_keys(item, keys):
"""
Filters all model attributes except for keys
item is a dict
"""
return dict((k, v) for k, v in item.iteritems() if k in keys)
def _fixup_cell_info(cell_info, keys):
"""
If the transport_url is present in the cell, derive username,
rpc_host, and rpc_port from it.
"""
if 'transport_url' not in cell_info:
return
# Disassemble the transport URL
transport_url = cell_info.pop('transport_url')
try:
transport = rpc_driver.parse_transport_url(transport_url)
except ValueError:
# Just go with None's
for key in keys:
cell_info.setdefault(key, None)
return cell_info
transport_field_map = {'rpc_host': 'hostname', 'rpc_port': 'port'}
for key in keys:
if key in cell_info:
continue
transport_field = transport_field_map.get(key, key)
cell_info[key] = transport[transport_field]
def _scrub_cell(cell, detail=False):
keys = ['name', 'username', 'rpc_host', 'rpc_port']
if detail:
keys.append('capabilities')
cell_info = _filter_keys(cell, keys + ['transport_url'])
_fixup_cell_info(cell_info, keys)
cell_info['type'] = 'parent' if cell['is_parent'] else 'child'
return cell_info
class CellsController(object):
"""Controller for Cell resources."""
def __init__(self):
self.compute_api = compute.API()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _get_cells(self, ctxt, req, detail=False):
"""Return all cells."""
# Ask the CellsManager for the most recent data
items = self.cells_rpcapi.get_cell_info_for_neighbors(ctxt)
items = common.limited(items, req)
items = [_scrub_cell(item, detail=detail) for item in items]
return dict(cells=items)
@wsgi.serializers(xml=CellsTemplate)
def index(self, req):
"""Return all cells in brief."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req)
@wsgi.serializers(xml=CellsTemplate)
def detail(self, req):
"""Return all cells in detail."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req, detail=True)
@wsgi.serializers(xml=CellTemplate)
def info(self, req):
"""Return name and capabilities for this cell."""
context = req.environ['nova.context']
authorize(context)
cell_capabs = {}
my_caps = CONF.cells.capabilities
for cap in my_caps:
key, value = cap.split('=')
cell_capabs[key] = value
cell = {'name': CONF.cells.name,
'type': 'self',
'rpc_host': None,
'rpc_port': 0,
'username': None,
'capabilities': cell_capabs}
return dict(cell=cell)
@wsgi.serializers(xml=CellTemplate)
def capacities(self, req, id=None):
"""Return capacities for a given cell or all cells."""
# TODO(kaushikc): return capacities as a part of cell info and
# cells detail calls in v3, along with capabilities
context = req.environ['nova.context']
authorize(context)
try:
capacities = self.cells_rpcapi.get_capacities(context,
cell_name=id)
except exception.CellNotFound:
msg = (_("Cell %(id)s not found.") % {'id': id})
raise exc.HTTPNotFound(explanation=msg)
return dict(cell={"capacities": capacities})
@wsgi.serializers(xml=CellTemplate)
def show(self, req, id):
"""Return data about the given cell name. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
try:
cell = db.cell_get(context, id)
except exception.CellNotFound:
raise exc.HTTPNotFound()
return dict(cell=_scrub_cell(cell))
def delete(self, req, id):
"""Delete a child or parent cell entry. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
num_deleted = db.cell_delete(context, id)
if num_deleted == 0:
raise exc.HTTPNotFound()
return {}
def _validate_cell_name(self, cell_name):
"""Validate cell name is not empty and doesn't contain '!' or '.'."""
if not cell_name:
msg = _("Cell name cannot be empty")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
if '!' in cell_name or '.' in cell_name:
msg = _("Cell name cannot contain '!' or '.'")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _validate_cell_type(self, cell_type):
"""Validate cell_type is 'parent' or 'child'."""
if cell_type not in ['parent', 'child']:
msg = _("Cell type must be 'parent' or 'child'")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _normalize_cell(self, cell, existing=None):
"""
Normalize input cell data. Normalizations include:
* Converting cell['type'] to is_parent boolean.
* Merging existing transport URL with transport information.
"""
# Start with the cell type conversion
if 'type' in cell:
self._validate_cell_type(cell['type'])
cell['is_parent'] = cell['type'] == 'parent'
del cell['type']
else:
cell['is_parent'] = False
# Now we disassemble the existing transport URL...
transport = {}
if existing and 'transport_url' in existing:
transport = rpc_driver.parse_transport_url(
existing['transport_url'])
# Copy over the input fields
transport_field_map = {
'username': 'username',
'password': 'password',
'hostname': 'rpc_host',
'port': 'rpc_port',
'virtual_host': 'rpc_virtual_host',
}
for key, input_field in transport_field_map.items():
# Set the default value of the field; using setdefault()
# lets us avoid overriding the existing transport URL
transport.setdefault(key, None)
# Only override the value if we're given an override
if input_field in cell:
transport[key] = cell.pop(input_field)
# Now set the transport URL
cell['transport_url'] = rpc_driver.unparse_transport_url(transport)
@wsgi.serializers(xml=CellTemplate)
@wsgi.deserializers(xml=CellDeserializer)
def create(self, req, body):
"""Create a child cell entry."""
context = req.environ['nova.context']
authorize(context)
if 'cell' not in body:
msg = _("No cell information in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
if 'name' not in cell:
msg = _("No cell name in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
self._validate_cell_name(cell['name'])
self._normalize_cell(cell)
cell = db.cell_create(context, cell)
return dict(cell=_scrub_cell(cell))
@wsgi.serializers(xml=CellTemplate)
@wsgi.deserializers(xml=CellDeserializer)
def update(self, req, id, body):
"""Update a child cell entry. 'id' is the cell name to update."""
context = req.environ['nova.context']
authorize(context)
if 'cell' not in body:
msg = _("No cell information in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
cell.pop('id', None)
if 'name' in cell:
self._validate_cell_name(cell['name'])
try:
# NOTE(Vek): There is a race condition here if multiple
# callers are trying to update the cell
# information simultaneously. Since this
# operation is administrative in nature, and
# will be going away in the future, I don't see
# it as much of a problem...
existing = db.cell_get(context, id)
except exception.CellNotFound:
raise exc.HTTPNotFound()
self._normalize_cell(cell, existing)
try:
cell = db.cell_update(context, id, cell)
except exception.CellNotFound:
raise exc.HTTPNotFound()
return dict(cell=_scrub_cell(cell))
def sync_instances(self, req, body):
"""Tell all cells to sync instance info."""
context = req.environ['nova.context']
authorize(context)
project_id = body.pop('project_id', None)
deleted = body.pop('deleted', False)
updated_since = body.pop('updated_since', None)
if body:
msg = _("Only 'updated_since' and 'project_id' are understood.")
raise exc.HTTPBadRequest(explanation=msg)
if updated_since:
try:
timeutils.parse_isotime(updated_since)
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
self.cells_rpcapi.sync_instances(context, project_id=project_id,
updated_since=updated_since, deleted=deleted)
class Cells(extensions.V3APIExtensionBase):
"""Enables cells-related functionality such as adding neighbor cells,
listing neighbor cells, and getting the capabilities of the local cell.
"""
name = "Cells"
alias = ALIAS
namespace = "http://docs.openstack.org/compute/ext/cells/api/v3"
version = 1
def get_resources(self):
coll_actions = {
'detail': 'GET',
'info': 'GET',
'sync_instances': 'POST',
'capacities': 'GET',
}
memb_actions = {
'capacities': 'GET',
}
res = extensions.ResourceExtension(ALIAS, CellsController(),
collection_actions=coll_actions,
member_actions=memb_actions)
return [res]
def get_controller_extensions(self):
return []
|
apache-2.0
| 837,730,049,796,058,100
| 34.141844
| 78
| 0.592398
| false
| 4.072603
| false
| false
| false
|
Upstream-Research/csv-tools
|
csv_tools/csv_prepend.py
|
1
|
11652
|
## Copyright (c) 2016-2017 Upstream Research, Inc. All Rights Reserved. ##
## Subject to an 'MIT' License. See LICENSE file in top-level directory ##
## #python-3.x
## python 2 does not work due mostly to issues with csv and io modules with unicode data
help_text = (
"CSV-PREPEND tool version 20170918\n"
"Insert a header row into a CSV stream\n"
"\n"
"csv-prepend [OPTIONS] ColumnValueList [InputFile]\n"
"\n"
"OPTIONS\n"
" -E {E} Input file text encoding (e.g. 'utf-8', 'windows-1252')\n"
" -e {E} Output file text encoding (e.g. 'utf-8', 'windows-1252')\n"
" -K {N} Number of rows to skip from the input (default=0)\n"
" -N {N} Maximum number of rows to read (default=ALL)\n"
" -n {N} Maximum number of rows to write (default=ALL)\n"
" -o {F} Output file name\n"
" -S {S} Input file field delimiter (default ',')\n"
" -s {S} Output file field delimiter (default ',')\n"
"\n"
"ColumnValueList is a comma separated list of values to be inserted as \n"
"the first row.\n"
"It is possible to replace the header row using the -K option.\n"
)
import sys
import csv
import io
from ._csv_helpers import (
decode_delimiter_name
,decode_charset_name
,decode_newline
)
def main(arg_list, stdin, stdout, stderr):
in_io = stdin
out_io = stdout
err_io = stderr
show_help = False
input_file_name = None
output_file_name = None
input_delimiter = ','
output_delimiter = ','
# 'std' will be translated to the standard line break decided by csv_helpers.decode_newline
input_row_terminator = 'std'
output_row_terminator = 'std'
input_charset_name = 'utf_8_sig'
output_charset_name = 'utf_8'
output_charset_error_mode = 'strict' # 'strict' | 'ignore' | 'replace' | 'backslashreplace'
input_charset_error_mode = 'strict' # 'strict' | 'ignore' | 'replace' | 'backslashreplace'
csv_cell_width_limit = 4*1024*1024 # python default is 131072 = 0x00020000
input_row_start_offset = 0
input_row_count_max = None
output_row_count_max = None
head_row_str = None
# [20160916 [db] I avoided using argparse in order to retain some flexibility for command syntax]
arg_count = len(arg_list)
arg_index = 1
while (arg_index < arg_count):
arg = arg_list[arg_index]
if (arg == "--help"
or arg == "-?"
):
show_help = True
elif (arg == "-o"
or arg == "--output"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
output_file_name = arg
elif (arg == "-E"
or arg == "--charset-in"
or arg == "--encoding-in"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
input_charset_name = arg
elif (arg == "-e"
or arg == "--charset-out"
or arg == "--encoding-out"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
output_charset_name = arg
elif (arg == "--charset-in-error-mode"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
input_charset_error_mode = arg
elif (arg == "--charset-out-error-mode"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
output_charset_error_mode = arg
elif (arg == "--charset-error-mode"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
input_charset_error_mode = arg
output_charset_error_mode = arg
elif (arg == "-S"
or arg == "--separator-in"
or arg == "--delimiter-in"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
input_delimiter = arg
elif (arg == "-s"
or arg == "--separator-out"
or arg == "--delimiter-out"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
output_delimiter = arg
elif (arg == "-W"
or arg == "--terminator-in"
or arg == "--newline-in"
or arg == "--endline-in"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
input_row_terminator = arg
elif (arg == "-w"
or arg == "--terminator-out"
or arg == "--newline-out"
or arg == "--endline-out"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
output_row_terminator = arg
elif (arg == "--cell-width-limit"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
csv_cell_width_limit = int(arg)
elif (arg == "-K"
or arg == "--row-offset-in"
or arg == "--offset"
or arg == "--skip"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
input_row_start_offset = int(arg)
elif (arg == "-N"
or arg == "--row-count-in"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
if ('ALL' == arg.upper()):
input_row_count_max = None
else:
input_row_count_max = int(arg)
elif (arg == "-n"
or arg == "--row-count-out"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
if ('ALL' == arg.upper()):
output_row_count_max = None
else:
output_row_count_max = int(arg)
elif (None != arg
and 0 < len(arg)
):
if (None == head_row_str):
head_row_str = arg
elif (None == input_file_name):
input_file_name = arg
arg_index += 1
head_row = None
if (None != head_row_str):
head_row = head_row_str.split(',')
if (None == head_row):
show_help = True
if (show_help):
out_io.write(help_text)
else:
input_charset_name = decode_charset_name(input_charset_name)
output_charset_name = decode_charset_name(output_charset_name)
input_row_terminator = decode_newline(input_row_terminator)
output_row_terminator = decode_newline(output_row_terminator)
input_delimiter = decode_delimiter_name(input_delimiter)
output_delimiter = decode_delimiter_name(output_delimiter)
in_file = None
out_file = None
try:
read_text_io_mode = 'rt'
#in_newline_mode = '' # don't translate newline chars
in_newline_mode = input_row_terminator
in_file_id = input_file_name
should_close_in_file = True
if (None == in_file_id):
in_file_id = in_io.fileno()
should_close_in_file = False
in_io = io.open(
in_file_id
,mode=read_text_io_mode
,encoding=input_charset_name
,newline=in_newline_mode
,errors=input_charset_error_mode
,closefd=should_close_in_file
)
if (should_close_in_file):
in_file = in_io
write_text_io_mode = 'wt'
out_newline_mode='' # don't translate newline chars
#out_newline_mode = output_row_terminator
out_file_id = output_file_name
should_close_out_file = True
if (None == out_file_id):
out_file_id = out_io.fileno()
should_close_out_file = False
out_io = io.open(
out_file_id
,mode=write_text_io_mode
,encoding=output_charset_name
,newline=out_newline_mode
,errors=output_charset_error_mode
,closefd=should_close_out_file
)
if (should_close_out_file):
out_file = out_io
in_csv = csv.reader(
in_io
,delimiter=input_delimiter
,lineterminator=input_row_terminator
)
out_csv = csv.writer(
out_io
,delimiter=output_delimiter
,lineterminator=output_row_terminator
)
execute(
in_csv
,out_csv
,input_row_terminator
,output_row_terminator
,input_row_start_offset
,input_row_count_max
,output_row_count_max
,head_row
)
except BrokenPipeError:
pass
finally:
if (None != in_file):
in_file.close()
in_file = None
if (None != out_file):
out_file.close()
out_file = None
def execute(
in_csv
,out_csv
,input_row_terminator
,output_row_terminator
,in_row_offset_start
,in_row_count_max
,out_row_count_max
,new_head_row
):
# first write the new row
out_csv.writerow(new_head_row)
# then write the output using the csv-translate code
# [20170918 [db] This is just a copy of the code from -csv-translate;
# it is a bit overkill to include all of this here]
end_row = None
cr_newline = '\r'
lf_newline = '\n'
crlf_newline = '\r\n'
out_newline = output_row_terminator
in_row_count = 0
out_row_count = 0
in_row = next(in_csv, end_row)
while (end_row != in_row
and (None == in_row_count_max or in_row_count < in_row_count_max)
and (None == out_row_count_max or out_row_count < out_row_count_max)
):
in_row_count += 1
if (in_row_offset_start < in_row_count):
out_row = list(in_row)
column_count = len(out_row)
column_position = 0
while (column_position < column_count):
cell_value = out_row[column_position]
# fix newline characters in the data
# (some tools - like postgres - can't handle mixed newline chars)
if (None != cell_value):
# replace crlf with lf, then we will replace lf's with the output newline,
# this prevents us from turning a crlf into a double newline
cell_value = cell_value.replace(crlf_newline, lf_newline)
cell_value = cell_value.replace(cr_newline, lf_newline)
cell_value = cell_value.replace(lf_newline, out_newline)
out_row[column_position] = cell_value
column_position += 1
out_csv.writerow(out_row)
out_row_count += 1
in_row = next(in_csv, end_row)
def console_main():
main(sys.argv, sys.stdin, sys.stdout, sys.stderr)
if __name__ == "__main__":
console_main()
|
mit
| -3,902,079,733,962,294,300
| 34.202417
| 101
| 0.495795
| false
| 3.827858
| false
| false
| false
|
Oleh-Hrebchuk/OpenVPN-TryFalse
|
vpn/models.py
|
1
|
3592
|
from django.db import models
# Create your models here.
class General(models.Model):
class Meta:
db_table = "general"
permissions = (('admins', "admins manage all settings openvpn"),)
general_vpn_name = models.TextField(max_length=200)
general_project_name = models.TextField(max_length=200)
general_server_ip = models.GenericIPAddressField()
general_server_port = models.IntegerField()
general_project_status = models.TextField(max_length=20)
def __unicode__(self):
return '%s %s' % (self.general_server_port, self.general_vpn_name)
class PathsVPN(models.Model):
class Meta:
db_table = "pathsvpn"
pathsvpn_vpn_path = models.TextField(max_length=200)
pathsvpn_general = models.ForeignKey(General)
def __unicode__(self):
return self.pathsvpn_vpn_path
class Certs(models.Model):
class Meta:
db_table = 'certs'
certs_user_name = models.TextField(max_length=200)
certs_general = models.ForeignKey(General)
class Revoke(models.Model):
class Meta:
db_table = 'revoke'
certs_revoke_name = models.TextField(max_length=200)
certs_revoke_status = models.TextField(max_length=200)
certs_general = models.ForeignKey(General)
class ProgressBarCheckReq(models.Model):
class Meta:
db_table = 'progress_bar_check_req'
progress_percents = models.TextField(max_length=100,default=0)
class ProgressBarInstall(models.Model):
class Meta:
db_table = 'progress_bar_install'
progress_percents = models.TextField(max_length=100,default=0)
class MailConfiguration(models.Model):
class Meta:
db_table = 'mail_configuration'
mail_name = models.TextField(max_length=30)
mail_smtp_server = models.TextField(max_length=30)
mail_smtp_helo = models.TextField(max_length=30)
mail_smtp_email = models.TextField(max_length=30)
mail_port = models.IntegerField()
mail_tls = models.TextField(max_length=30)
mail_pass = models.TextField(max_length=30)
class RouteVPN(models.Model):
class Meta:
db_table = 'route_vpn'
route_name = models.TextField(max_length=100)
status = models.TextField(max_length=100)
user = models.ForeignKey('Users',related_name='route')
class Users(models.Model):
class Meta:
db_table = 'users'
user_routes = models.TextField(max_length=100)
class RouteAccessList(models.Model):
class Meta:
db_table = 'route_access_list'
route_name = models.TextField(max_length=100)
route_general = models.ForeignKey(General)
access = models.TextField(max_length=100)
user = models.ForeignKey('Revoke',related_name='route')
user_name = models.TextField(max_length=100)
class RouteList(models.Model):
class Meta:
db_table = 'route_list'
route_name = models.TextField(max_length=100)
route_general = models.ForeignKey(General)
class Groups(models.Model):
class Meta:
db_table = 'groups'
name_group = models.TextField(max_length=100)
route_general = models.ForeignKey(General)
class GroupsAcl(models.Model):
class Meta:
db_table = 'groups_acl'
acl_name = models.TextField(max_length=100)
acl_general = models.ForeignKey(General)
acl_group = models.ForeignKey(Groups)
class GroupsUsersAcl(models.Model):
class Meta:
db_table = 'groups_users_acl'
group_name = models.TextField(max_length=100)
group_general = models.ForeignKey(General)
group_user_name = models.TextField(max_length=100)
|
gpl-3.0
| 8,188,553,040,697,901,000
| 23.772414
| 74
| 0.679287
| false
| 3.563492
| false
| false
| false
|
jhh/puka
|
puka/middleware/debug.py
|
1
|
1769
|
import os
from django.conf import settings
from django.db import connection
def terminal_width():
"""
Function to compute the terminal width.
WARNING: This is not my code, but I've been using it forever and
I don't remember where it came from.
"""
width = 0
try:
import fcntl
import struct
import termios
s = struct.pack("HHHH", 0, 0, 0, 0)
x = fcntl.ioctl(1, termios.TIOCGWINSZ, s)
width = struct.unpack("HHHH", x)[1]
except:
pass
if width <= 0:
try:
width = int(os.environ["COLUMNS"])
except:
pass
if width <= 0:
width = 80
return width
class SqlPrintingMiddleware:
"""
Middleware which prints out a list of all SQL queries done
for each view that is processed. This is only useful for debugging.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
indentation = 2
pad = " " * indentation
if len(connection.queries) > 0 and settings.DEBUG:
width = terminal_width()
total_time = 0.0
for query in connection.queries:
nice_sql = query["sql"].replace('"', "")
sql = f"\033[1;31m[{query['time']}]\033[0m {nice_sql}"
total_time = total_time + float(query["time"])
while len(sql) > width - indentation:
print(f"{pad}{sql[: width - indentation]}")
sql = sql[width - indentation :]
print(f"{pad}{sql}\n")
print(f"{pad}\033[1;32m[TOTAL TIME: {str(total_time)} seconds]\033[0m")
return response
|
mit
| 3,804,612,522,956,817,400
| 28.983051
| 83
| 0.549463
| false
| 4.020455
| false
| false
| false
|
ashtonteng/squad_exp
|
SelfMatchingLayer.py
|
1
|
6757
|
import tensorflow as tf
from tensorflow.contrib import rnn
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"]="2"
class SelfMatchingLayer():
def __init__(self, args, inputs, scope):
print("building self-matching layer", scope)
batch_size = args.batch_size
vocab_size = args.vocab_size
hidden_size = args.SelfMatchingLayer_size
model = args.model
num_layers = args.num_layers
training = args.training
#inputs = #batch_size x seq_length x hidden_size
max_seq_length = tf.shape(inputs)[1]
def compute_lengths(inputs):
used = tf.sign(tf.reduce_max(tf.abs(inputs), reduction_indices=2))
lengths = tf.reduce_sum(used, reduction_indices=1)
lengths = tf.cast(lengths, tf.int32) #lengths must be integers
return lengths
seq_lengths = compute_lengths(inputs)
# dropout beta testing: double check which one should affect next line
#if training and output_keep_prob < 1.0:
# inputs = tf.nn.dropout(inputs, output_keep_prob)
if model == 'rnn':
cell_fn = rnn.BasicRNNCell
elif model == 'gru':
cell_fn = rnn.GRUCell
elif model == 'lstm':
cell_fn = rnn.BasicLSTMCell
elif model == 'nas':
cell_fn = rnn.NASCell
else:
raise Exception("model type not supported: {}".format(model))
"""
1) W_vP * v_jP = how important is the jth p word to the t'th p word
2) W_vP2 * v_tP = how important is the t'th p word just by itself
"""
with tf.variable_scope(scope): #variables unsed in the pointerRNN
W_vP = tf.get_variable("W_vP", [hidden_size, hidden_size])#, initializer=tf.random_normal_initializer)
W_vP2 = tf.get_variable("W_vP2", [hidden_size, hidden_size])#, initializer=tf.random_normal_initializer)
W_g = tf.get_variable("W_g", [2*hidden_size, 2*hidden_size])#, initializer=tf.random_normal_initializer)
v = tf.get_variable("v", [hidden_size, 1])
tf.summary.histogram("W_vP_self", W_vP)
tf.summary.histogram("W_g_self", W_g)
tf.summary.histogram("W_vP2_self", W_vP)
tf.summary.histogram("v_self", v)
W_vP_tiled = tf.tile(tf.expand_dims(W_vP, 0), [batch_size, 1, 1]) #batch_size x hidden_size x hidden_size
W_vP2_tiled = tf.tile(tf.expand_dims(W_vP2, 0), [batch_size, 1, 1]) #batch_size x hidden_size x hidden_size
v_tiled = tf.tile(tf.expand_dims(v, 0), [batch_size, 1, 1]) #batch_size x hidden_size x 1
weighted_inputs = tf.matmul(inputs, W_vP_tiled) #batch_size x seq_length x hidden_size
weighted_inputs2 = tf.matmul(inputs, W_vP2_tiled) #batch_size x seq_length x hidden_size
#weighted_inputs2_tiled = tf.tile(tf.expand_dims(weighted_inputs2, 1), [1, max_seq_length, 1, 1]) #batch_size x seq_length x seq_length x hidden_size
#tf.matmul(tf.tanh(tf.add(tf.expand_dims(weighted_inputs, 1), weighted_inputs2_tiled)), v_tiled) #batch_size x seq_length x
#create TensorArray of length seq_length, containing tensors of size batch_size x 2*hidden_size, to be populated by tf.while_loop
initial_ta = tf.TensorArray(tf.float32, size=max_seq_length)
def condition(time, input_ta):
#elements_finished = (time >= seq_lengths) #this operation produces boolean tensor of [batch_size] defining if corresponding sequence has ended
#finished = tf.reduce_all(elements_finished) #AND operation over all batches. True if all batches finished.
return tf.less(time, max_seq_length)
def body(time, input_ta):
time_index = tf.stack([tf.constant(0, dtype=tf.int32), time, tf.constant(0, dtype=tf.int32)], axis=0)
inputs_slice = tf.slice(inputs, time_index, [-1, 1, -1]) #batch_size x 1 x hidden_size
weighted_inputs_slice = tf.matmul(inputs_slice, W_vP2_tiled) #batch_size x 1 x hidden_size
#time_index = tf.stack([tf.constant(0, dtype=tf.int32), time, tf.constant(0, dtype=tf.int32)], axis=0)
#weighted_inputs2_slice = tf.slice(weighted_inputs2, time_index, [-1, 1, -1]) #batch_size x 1 x hidden_size
logits = tf.matmul(tf.tanh(tf.add(weighted_inputs, weighted_inputs_slice)), v_tiled) #batch_size x seq_length x hidden_size * batch_size x hidden_size x 1 = #batch_size x seq_length x 1
attention_over_passage = tf.nn.softmax(logits, dim=1) # batch_size x seq_length x 1
weighted_passage = tf.reduce_sum(tf.multiply(attention_over_passage, inputs), axis=1) #batch_size x hidden_size
weighted_passage_with_inputs = tf.concat([tf.squeeze(inputs_slice, axis=1), weighted_passage], axis=1)
gate = tf.sigmoid(tf.matmul(weighted_passage_with_inputs, W_g)) #batch_size x hidden_size
output_ta = input_ta.write(time, tf.multiply(gate, weighted_passage_with_inputs))
return time + 1, output_ta
time = tf.constant(0)
time, output_ta = tf.while_loop(condition, body, [time, initial_ta])
BiRNN_inputs_stacked = tf.reshape(output_ta.stack(), [batch_size, max_seq_length, 2*hidden_size])
def compute_lengths(inputs):
used = tf.sign(tf.reduce_max(tf.abs(inputs), reduction_indices=2))
lengths = tf.reduce_sum(used, reduction_indices=1)
lengths = tf.cast(lengths, tf.int32) #lengths must be integers
return lengths
seq_lengths = compute_lengths(inputs)
cells_fw = []
for _ in range(num_layers):
cell = cell_fn(2*hidden_size)
if training and (output_keep_prob < 1.0 or input_keep_prob < 1.0):
cell = rnn.DropoutWrapper(cell, input_keep_prob=input_keep_prob, output_keep_prob=output_keep_prob)
cells_fw.append(cell) #cells is num_layers of cell stacked together
cells_bw = []
for _ in range(num_layers):
cell = cell_fn(2*hidden_size)
if training and (output_keep_prob < 1.0 or input_keep_prob < 1.0):
cell = rnn.DropoutWrapper(cell, input_keep_prob=input_keep_prob, output_keep_prob=output_keep_prob)
cells_bw.append(cell)
initial_states_fw = [cells_fw[i].zero_state(batch_size, tf.float32) for i in range(num_layers)]
initial_states_bw = [cells_bw[i].zero_state(batch_size, tf.float32) for i in range(num_layers)]
outputs, output_states_fw, output_states_bw = rnn.stack_bidirectional_dynamic_rnn(cells_fw, cells_bw, BiRNN_inputs_stacked, initial_states_fw, initial_states_bw, dtype=tf.float32, sequence_length=seq_lengths, scope=scope)
self.outputs = outputs
|
mit
| -9,153,670,270,058,485,000
| 57.258621
| 229
| 0.631789
| false
| 3.335143
| false
| false
| false
|
google/nitroml
|
examples/config.py
|
1
|
1213
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# Lint as: python3
"""NitroML config.
This file defines environments for nitroml.
"""
import os
USE_KUBEFLOW = True
PIPELINE_NAME = 'examples'
GCS_BUCKET_NAME = 'artifacts.nitroml-brain-xgcp.appspot.com'
PIPELINE_ROOT = os.path.join('gs://', GCS_BUCKET_NAME, PIPELINE_NAME)
TF_DOWNLOAD_DIR = os.path.join('gs://', GCS_BUCKET_NAME, 'tensorflow-datasets')
OTHER_DOWNLOAD_DIR = os.path.join('gs://', GCS_BUCKET_NAME, 'other-datasets')
ENDPOINT = '38070e0315a0e15-dot-us-east1.pipelines.googleusercontent.com'
TFX_IMAGE = 'tensorflow/tfx:0.23.0.dev20200716'
|
apache-2.0
| 6,816,562,970,263,589,000
| 38.129032
| 79
| 0.700742
| false
| 3.436261
| false
| false
| false
|
scopenco/hagent
|
lib/modules/ip/IpAddr.py
|
1
|
3971
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
Set of fuctions/classes for ip administration
Author: Andrey Scopenco andrey@scopenco.net
'''
import sys
sys.path.insert(0, '/usr/local/hagent/lib')
import logging
from cli import Output
from hagent_db import add_record, del_record, update_record, \
get_record_attr, get_account_resources
class IpAddr(object):
'''Base class for ip administration '''
def __init__(self, options, ip):
self.options = options
self.ip = ip
self.db = self.options.get('db_file')
self.output = {'status': 0}
self.service_attr = {}
if not self.ip:
self.output['status'] = 1
self.output['status_msg'] = 'argument <ip> not specified'
def create(self, account, shared):
'''Create ip and assign to user.'''
try:
if self.output['status']:
raise Output
# check if some ip exist
check_attr = get_record_attr(self.db, 'Ip', self.ip)
if not check_attr['status']:
self.output['status'] = 1
self.output['status_msg'] = 'Ip %s exist' % self.ip
raise Output
if shared == 'on':
self.service_attr['shared'] = shared
else:
if account:
self.service_attr['account'] = account
self.output.update(add_record(self.db, 'Ip',
self.ip, self.service_attr))
raise Output
except Output:
return self.output
def delete(self):
'''Delete virtual domain.'''
try:
if self.output['status']:
raise Output
#TODO
# add check if ip assigned to one of users
# if so show error
self.output.update(del_record(self.db, 'Ip', self.ip))
raise Output
except Output:
return self.output
def update(self, account, shared='off', free='off', restart=1):
'''Change account for ip'''
restart = str(restart)
if restart != str(0):
restart = 1
try:
if not account and shared == 'off' and free == 'off':
self.output['status'] = 1
self.output['status_msg'] = 'argument <account> not specified'
if self.output['status']:
raise Output
# check if ip exist
check_attr = get_record_attr(self.db, 'Ip', self.ip)
if check_attr['status']:
self.output.update(check_attr)
raise Output
self.service_attr.update(check_attr)
del(self.service_attr['status'])
if free == 'on':
# TODO
# remove ip from all domains
if 'account' in self.service_attr:
del(self.service_attr['account'])
if 'shared' in self.service_attr:
del(self.service_attr['shared'])
else:
if shared == 'on':
# TODO
# remove ip from account and assign to shared
self.service_attr['shared'] = shared
if 'account' in self.service_attr:
del(self.service_attr['account'])
else:
# TODO
# remove from shared and assign to account
# if shared add is only one, show error
self.service_attr['account'] = account
if 'shared' in self.service_attr:
del(self.service_attr['shared'])
self.output.update(update_record(
self.db, 'Ip', self.ip, self.service_attr, remove_attr=True))
raise Output
except Output:
return self.output
if __name__ == "__main__":
print __doc__
|
gpl-2.0
| 5,762,475,317,647,996,000
| 30.267717
| 78
| 0.498363
| false
| 4.422049
| false
| false
| false
|
KBNLresearch/iromlab
|
iromlab/kbapi/sru.py
|
1
|
12625
|
#! /usr/bin/env python
"""
Python API for KB SRU
"""
import sys
import urllib
import requests
from lxml import etree
SRU_BASEURL = 'http://jsru.kb.nl/sru/sru'
SRU_BASEURL += '?version=1.2&maximumRecords=%i'
SRU_BASEURL += '&operation=searchRetrieve'
SRU_BASEURL += '&startRecord=%i'
SRU_BASEURL += '&recordSchema=%s'
SRU_BASEURL += '&x-collection=%s&query=%s'
SETS = {'ANP': {'collection': 'ANP',
'description_en': 'Radio Bulletins ANP Press Agency',
'description_nl': 'ANP Radiobulletins Digitaal',
'metadataPrefix': 'didl',
'recordschema': 'dcx',
'setname': 'anp',
'time_period': [1937, 1989]},
'DPO': {'collection': 'DPO_boekdeel',
'description_en': 'Early Dutch Books Online',
'description_nl': 'Early Dutch Books Online',
'metadataPrefix': 'didl',
'recordschema': 'ddd',
'setname': 'DPO',
'time_period': [1781, 1800]},
'BYVANCK': {'description_en': 'Medieval Illuminated Manuscripts',
'description_nl': 'Middeleeuwse Verluchte Handschriften',
'metadataPrefix': 'dcx',
'setname': 'BYVANCK',
'time_period': [500, 1500]},
'SGD': {'description_en': 'States General Digital',
'description_nl': 'Staten-Generaal Digitaal',
'metadataPrefix': 'dcx',
'setname': 'sgd:register',
'time_period': [1962, 1994]},
'GGC': {'collection': 'GGC',
'description_en': 'General Catalogue KB',
'description_nl': 'Algemene Catalogus KB',
'metadataPrefix': 'dcx',
'recordschema': 'dcx',
'setname': 'ggc',
'time_period': [1937, 2021]}} # No idea what to use here?
# Name spaces in GGC records
srw_ns = 'http://www.loc.gov/zing/srw/'
tel_ns = 'http://krait.kb.nl/coop/tel/handbook/telterms.html'
xsi_ns = 'http://www.w3.org/2001/XMLSchema-instance'
dc_ns = 'http://purl.org/dc/elements/1.1/'
dcterms_ns = 'http://purl.org/dc/terms/'
dcx_ns = 'http://krait.kb.nl/coop/tel/handbook/telterms.html'
NSMAPGGC = {"srw": srw_ns,
"tel": tel_ns,
"xsi": xsi_ns,
"dc": dc_ns,
"dcterms": dcterms_ns,
"dcx": dcx_ns}
class response():
def __init__(self, record_data, sru):
self.record_data = record_data
self.sru = sru
def getElementText(self, tagName, attributeName, attributeValue):
# Returns text content of all elements for which tag matches tagName,
# and attribute value equals attributeValue. Set attributeName to empty
# string to get all tagName matches.
textFields = []
for r in self.record_data.iter():
if r.tag == tagName:
if attributeName != '':
try:
if r.attrib[attributeName] == attributeValue:
textFields.append(r.text)
except KeyError:
pass
else:
textFields.append(r.text)
return textFields
@property
def records(self):
if self.sru.nr_of_records == 0:
record_data = "<xml></xml>"
else:
ns = {'zs': 'http://www.loc.gov/zing/srw/'}
record_data = self.record_data.xpath("zs:records/zs:record",
namespaces=ns)[0]
return record(record_data, self.sru)
# Below property functions all return a list with all instances that satisfy
# criteria
@property
def typesDutch(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}type',
'{http://www.w3.org/XML/1998/namespace}lang',
'nl'))
@property
def typesDCMI(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}type',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'DCMIType'))
@property
def identifiersISBN(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}identifier',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcterms:ISBN'))
@property
def identifiersBrinkman(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}identifier',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcx:Brinkman'))
@property
def identifiersURI(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}identifier',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcterms:URI'))
@property
def identifiersOCLC(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}identifier',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'OCLC'))
@property
def languagesDutch(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}language',
'{http://www.w3.org/XML/1998/namespace}lang',
'nl'))
@property
def languagesEnglish(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}language',
'{http://www.w3.org/XML/1998/namespace}lang',
'en'))
@property
def languagesFrench(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}language',
'{http://www.w3.org/XML/1998/namespace}lang',
'fr'))
@property
def languagesISO639(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}language',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcterms:ISO639-2'))
@property
def dates(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}date',
'',
''))
@property
def extents(self):
return(self.getElementText('{http://purl.org/dc/terms/}extent',
'',
''))
@property
def creators(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}creator',
'',
''))
@property
def contributors(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}contributor',
'',
''))
@property
def titles(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}title',
'',
''))
@property
def titlesMain(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}title',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcx:maintitle'))
@property
def titlesIntermediate(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}title',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcx:intermediatetitle'))
@property
def publishers(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}publisher',
'',
''))
@property
def countries(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}country',
'',
''))
@property
def subjectsBrinkman(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}subject',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcx:Brinkman'))
@property
def subjectsISO9707(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}subject',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'ISO_9707_[Brinkman]'))
@property
def subjectsUNESCO(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}subject',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'UNESCO'))
@property
def collectionIdentifiers(self):
return(self.getElementText('{http://purl.org/dc/terms/}isPartOf',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcx:collectionIdentifier'))
@property
def recordIdentifiersURI(self):
return(self.getElementText('{http://krait.kb.nl/coop/tel/handbook/telterms.html}recordIdentifier',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcterms:URI'))
@property
def annotations(self):
# Note that annotations sometimes contain language or itenID attibutes;
# ignored for now (collect everything).
return(self.getElementText('{http://krait.kb.nl/coop/tel/handbook/telterms.html}annotation',
'',
''))
class record():
def __init__(self, record_data, sru):
self.record_data = record_data
self.sru = sru
def __iter__(self):
return self
# This works under Python 2.7
def next(self):
if self.sru.nr_of_records == 0:
raise StopIteration
if self.sru.startrecord < self.sru.nr_of_records + 1:
record_data = self.sru.run_query()
self.sru.startrecord += 1
return response(record_data, self.sru)
else:
raise StopIteration
# This works under Python 3
def __next__(self):
if self.sru.nr_of_records == 0:
raise StopIteration
if self.sru.startrecord < self.sru.nr_of_records + 1:
record_data = self.sru.run_query()
self.sru.startrecord += 1
return response(record_data, self.sru)
else:
raise StopIteration
class sru():
DEBUG = False
collection = False
maximumrecords = 50
nr_of_records = 0
query = ""
recordschema = False
sru_collections = SETS
startrecord = 0
def search(self, query, collection=False,
startrecord=1, maximumrecords=1, recordschema=False):
self.maximumrecords = maximumrecords
if sys.version.startswith('3'):
self.query = urllib.parse.quote_plus(query)
elif sys.version.startswith('2'):
self.query = urllib.quote_plus(query)
self.startrecord = startrecord
if collection not in self.sru_collections:
raise Exception('Unknown collection')
self.collection = self.sru_collections[collection]['collection']
if not self.collection:
raise Exception('Error, no collection specified')
if not recordschema:
self.recordschema = self.sru_collections[collection]['recordschema']
else:
self.recordschema = recordschema
record_data = self.run_query()
nr_of_records = [i.text for i in record_data.iter() if
i.tag.endswith('numberOfRecords')][0]
self.nr_of_records = int(nr_of_records)
if self.nr_of_records > 0:
return response(record_data, self)
return False
def run_query(self):
url = SRU_BASEURL % (self.maximumrecords, self.startrecord,
self.recordschema, self.collection, self.query)
if self.DEBUG:
sys.stdout.write(url)
r = requests.get(url)
if not r.status_code == 200:
raise Exception('Error while getting data from %s' % url)
record_data = etree.fromstring(r.content)
return record_data
|
apache-2.0
| -5,763,885,260,459,784,000
| 35.594203
| 106
| 0.515644
| false
| 4.009209
| false
| false
| false
|
klen/fquest
|
base/fquest/celery.py
|
1
|
2029
|
from __future__ import absolute_import
from sqlalchemy.exc import IntegrityError, DataError
from datetime import datetime, timedelta
from celery import Celery
from celery.utils.log import get_task_logger
from flask import current_app as app
from ..app import create_app
logger = get_task_logger('fquest')
if not app:
app = create_app()
ctx = app.test_request_context()
ctx.push()
celery = Celery('fquest')
celery.config_from_object(dict(
BROKER_URL=app.config.get('BROKER_URL'),
CELERYBEAT_SCHEDULE={
'fquest-beat': {
'task': 'base.fquest.celery.beat',
'schedule': app.config.get('BEAT_SCHEDULE'),
},
}
))
@celery.task(ignore_result=True)
def beat():
" Fetch character progress. "
from .models import Character, db, Event
from ..ext import cache
last_synced = cache.get('fquest.last_synced')
logger.info('BEAT')
if last_synced:
characters = Character.query.filter(Character.facebook_synced <= last_synced - timedelta(minutes=10)).limit(10).all()
else:
characters = [Character.query.order_by(Character.facebook_synced.desc()).first()]
cache.set('fquest.last_synced', datetime.now(), timeout=300)
for character in characters:
try:
if Event.fire(character):
db.session.commit()
except (IntegrityError, DataError):
db.session.rollback()
@celery.task
def publish(token, level, ignore_result=True):
" Async action publush. "
from facepy import GraphAPI, FacepyError
graph = GraphAPI(token)
try:
logger.info(level, token)
graph.session.request('POST', '%s/me/fquest-klen:raised' % graph.url, data=dict(
access_token=token,
level="http://fquest.node42.org%s" % level
))
# graph.post('/me/fquest-klen:raised', data=dict(
# level="http://fquest.node42.org%s" % level
# ))
except FacepyError, e:
logger.error(str(e))
# pymode:lint_ignore=E061
|
bsd-3-clause
| -30,655,614,917,419,030
| 25.012821
| 125
| 0.637753
| false
| 3.498276
| false
| false
| false
|
shubhamjain0594/OthelloReinforcementLearning
|
nn.py
|
1
|
8833
|
import neurolab as nl
import game2
import othello
import ntuplesystematic as nts
import time
import random
class nn:
def __init__(self):
self.x = [[-1,1] for x in range(64)]
self.net = nl.net.newff(self.x,[1])
#self.net.trainf = nl.train.train_gd
self.moveb = 0
self.movew = 0
self.last_vb = 0
self.last_vw = 0
self.fin_v = []
self.fin_val = []
def play_move(self,game,epsilon = 0):
moves = game.generate_moves()
num = random.uniform(0,1)
if(num <= epsilon):
temp = game.copy()
if(game.player==-1):
if(self.moveb == 0):
move = random.choice(moves)
temp.play_move(move)
v=[]
for k in range(8):
for l in range(8):
v.append(temp.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
self.moveb = self.moveb+1
self.last_vb = v
return (v1[0][0], move)
else:
if(moves[0]==None):
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(game.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = [v1]
#print 0
#print self.last_vb
self.fin_v.append(self.last_vb)
self.fin_val.append(v1)
self.last_vb = v
return (0,None)
else:
move = random.choice(moves)
reward = 0
temp.play_move(move)
if(temp.terminal_test()):
if(temp.score()>0):
reward=-1
elif(temp.score()<0):
reward = 1
v=[]
for k in range(8):
for l in range(8):
v.append(temp.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = reward + v1
v1 = [v1]
#print 1
#print self.last_vb
self.fin_v.append(self.last_vb)
self.fin_val.append(v1)
self.last_vb = v
return (v1[0],move)
else:
if(self.movew == 0):
move = random.choice(moves)
temp.play_move(move)
v=[]
for k in range(8):
for l in range(8):
v.append(temp.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
self.movew = self.movew+1
self.last_vw = v
return (v1[0][0], move)
else:
if(moves[0]==None):
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(game.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = [v1]
#print 2
#print self.last_vw
self.fin_v.append(self.last_vw)
self.fin_val.append(v1)
self.last_vw = v
return (0,None)
else:
move = random.choice(moves)
reward = 0
temp.play_move(move)
if(temp.terminal_test()):
if(temp.score()>0):
reward=-1
elif(temp.score()<0):
reward = 1
v=[]
for k in range(8):
for l in range(8):
v.append(temp.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = reward + v1
v1 = [v1]
#print 3
#print self.last_vw
self.fin_v.append(self.last_vw)
self.fin_val.append(v1)
self.last_vw = v
return (v1[0],move)
else:
if(game.player == -1):
if(self.moveb==0):
j=0
max1 = 0
best_v = 0
best_move = None
for move in moves:
temp = game.copy()
temp.play_move(move)
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(temp.get_color([k,l]))
#print v
v2 = [v]
v1 = self.net.sim(v2)
if(j==0):
max1 = v1[0][0]
best_v = v
best_move = move
elif(v1[0][0]>max1):
max1 = v1[0][0]
best_move = move
best_v =v
j = j+1
self.moveb = self.moveb+1
self.last_vb = best_v
return (max1, best_move)
else:
if(moves[0]==None):
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(game.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = [v1]
#print 4
#print self.last_vb
self.fin_v.append(self.last_vb)
self.fin_val.append(v1)
self.last_vb = v
return (0,None)
else:
j=0
max1 = 0
best_v = 0
best_move = 0
for move in moves:
temp = game.copy()
temp.play_move(move)
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(temp.get_color([k,l]))
#print v
v2 = [v]
v1 = self.net.sim(v2)
if(j==0):
max1 = v1[0][0]
best_v = v
best_move = move
elif(v1[0][0]>max1):
max1 = v1[0][0]
best_move = move
best_v =v
j = j+1
temp = game.copy()
reward = 0
temp.play_move(best_move)
if(temp.terminal_test()):
if(temp.score()>0):
reward=-1
elif(temp.score()<0):
reward = 1
v2 = [best_v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = reward + v1
v1 = [v1]
#print 5
#print self.last_vw
self.fin_v.append(self.last_vb)
self.fin_val.append(v1)
self.last_vb = best_v
return (max1,best_move)
else:
if(self.movew==0):
j=0
max1 = 0
best_v = 0
best_move = 0
for move in moves:
temp = game.copy()
temp.play_move(move)
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(temp.get_color([k,l]))
#print v
v2 = [v]
v1 = self.net.sim(v2)
if(j==0):
max1 = v1[0][0]
best_v = v
best_move = move
elif(v1[0][0]<max1):
max1 = v1[0][0]
best_move = move
best_v =v
j = j+1
self.movew = self.movew+1
self.last_vw = best_v
return (max1,best_move)
else:
if(moves[0]==None):
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(game.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = [v1]
#print 6
#print self.last_vw
self.fin_v.append(self.last_vw)
self.fin_val.append(v1)
self.last_vw = v
return (0,None)
else:
j=0
max1 = 0
best_v = 0
best_move = 0
for move in moves:
temp = game.copy()
temp.play_move(move)
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(temp.get_color([k,l]))
#print v
v2 = [v]
v1 = self.net.sim(v2)
if(j==0):
max1 = v1[0][0]
best_v = v
best_move = move
elif(v1[0][0]<max1):
max1 = v1[0][0]
best_move = move
best_v =v
j = j+1
temp = game.copy()
reward = 0
temp.play_move(best_move)
if(temp.terminal_test()):
if(temp.score()>0):
reward=-1
elif(temp.score()<0):
reward = 1
v2 = [best_v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = reward + v1
v1 = [v1]
#print 7
#print self.last_vw
self.fin_v.append(self.last_vw)
self.fin_val.append(v1)
self.last_vw = best_v
return (max1,best_move)
def reset(self):
#print self.fin_v
#print self.fin_val
error = self.net.train(self.fin_v,self.fin_val,epochs=5,show=1)
self.moveb = 0
self.movew = 0
self.last_vb = 0
self.last_vw = 0
self.fin_v = []
self.fin_val = []
def reset_without_train(self):
self.moveb = 0
self.movew = 0
self.last_vb = 0
self.last_vw = 0
self.fin_v = []
self.fin_val = []
if __name__ == "__main__":
"""
Creates a main player
"""
playernew = nn()
nTuplesSystematicObject = nts.nTuplesSystematic()
game2.play(othello.game(), game2.player(lambda x: playernew.play_move(x)),game2.player(lambda x: nTuplesSystematicObject.play_next_move(x)), True)
playernew.reset_without_train()
time.sleep(5)
k = 100
for i in range(k):
print(i)
game2.play(othello.game(), game2.player(lambda x: playernew.play_move(x,0.3)),game2.player(lambda x: playernew.play_move(x,0.3)), False)
playernew.reset()
wins = [0, 0]
for i in range(100):
winner = game2.play(othello.game(), game2.player_epsilon(lambda x: playernew.play_move(x)),game2.player_epsilon(lambda x: nTuplesSystematicObject.play_next_move(x)), False)
if winner == 1:
wins[0] += 1
elif winner == 2:
wins[1] += 1
winner = game2.play(othello.game(),game2.player_epsilon(lambda x: nTuplesSystematicObject.play_next_move(x)), game2.player_epsilon(lambda x: playernew.play_move(x)), False)
if winner == 2:
wins[0] += 1
elif winner == 1:
wins[1] += 1
print wins
f = open('results','a')
val = (k,0.001,'epsilon',wins)
val = str(val)
|
gpl-2.0
| -5,274,029,948,232,391,000
| 22.744624
| 174
| 0.510246
| false
| 2.476311
| false
| false
| false
|
cjdinsmore/slack-karma-bot
|
sqlite_helper.py
|
1
|
2956
|
import sqlite3
from models import DbMessage, DbUser, ReactionNames
class SqliteHelper(object):
"""
This class manages interfacing with the SQLite database. It stores DbUser and DbMessage
objects (see: models.py).
"""
def __init__(self, db_file):
self.connection = sqlite3.connect(db_file)
self.cursor = self.connection.cursor()
def add_users(self, users):
"""
Adds users to the database.
"""
query = 'INSERT INTO User VALUES (?, ?, ?, ?)'
users_as_rows = []
for user in users:
users_as_rows.append(user.to_row())
self._execute_many_query(query, users_as_rows)
return self.cursor.fetchall()
def get_votes_for_user(self, user_id):
"""
Fetches a sum of the user's upvotes, returning a tuple (upvotes, downvotes)
"""
query = 'SELECT sum(upvotes), sum(downvotes) FROM Message WHERE user_id=?'
args = (user_id,)
self._execute_query(query, args)
return self.cursor.fetchone()
def get_user_by_id(self, user_id):
"""
Self-explanatory.
"""
query = 'SELECT * FROM User WHERE slack_id=?'
args = (user_id,)
row = self._execute_query(query, args=args)
if row:
return DbUser(row[0])
def get_messages_for_user(self, user_id):
"""
Fetches all messages in the database for a given user.
Returns an array of DbMessage objects (models.py)
"""
messages = []
args = (user_id,)
query = "SELECT * FROM Message WHERE user_id=?"
self._execute_query(query, args)
rows = self.cursor.fetchall()
for row in rows:
messages.append(DbMessage(row))
return messages
def get_latest_message_timestamp(self):
"""
Gets the timestamp for the most recent message.
"""
query = 'SELECT timestamp FROM Message ORDER BY timestamp DESC'
self._execute_query(query)
return self.cursor.fetchone()[0]
def add_messages(self, messages):
"""
Adds messages to the database.
"""
query = 'INSERT INTO Message VALUES (NULL, ?, ?, ?, ?, ?)'
messages_as_rows = []
for db_message in messages:
messages_as_rows.append(db_message.to_row())
self._execute_many_query(query, messages_as_rows)
return self.cursor.fetchall()
def _execute_query(self, query, args=None):
"""
Protected method that executes a database query.
`args` represents arguments for the WHERE clause, like user_id and such.
"""
if args:
self.cursor.execute(query, args)
else:
self.cursor.execute(query)
def _execute_many_query(self, query, args):
with self.connection:
self.cursor.executemany(query, args)
|
mit
| -4,904,182,541,421,417,000
| 32.213483
| 95
| 0.569012
| false
| 4.094183
| false
| false
| false
|
VertNet/api
|
Download/CountHandler.py
|
1
|
3989
|
# This file is part of VertNet: https://github.com/VertNet/webapp
#
# VertNet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# VertNet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with VertNet. If not, see: http://www.gnu.org/licenses
"""Download service.
Get parameters from request
Get record count from vnsearch.query_rec_counter
Send email to user with result
"""
import os
import json
import logging
from datetime import datetime
from google.appengine.api import search, taskqueue, mail
import webapp2
import Search.search as vnsearch
from config import OPTIMUM_CHUNK_SIZE
LAST_UPDATED = '2016-05-20T12:37:29+CEST'
IS_DEV = os.environ.get('SERVER_SOFTWARE', '').startswith('Development')
if IS_DEV:
QUEUE_NAME = 'default'
else:
QUEUE_NAME = 'apitracker'
class CountHandler(webapp2.RequestHandler):
def post(self):
# Get parameters from request
q = json.loads(self.request.get('q'))
latlon = self.request.get('latlon')
country = self.request.get('country')
user_agent = self.request.get('user_agent')
requesttime = self.request.get('requesttime')
reccount = int(self.request.get('reccount'))
fromapi = self.request.get('fromapi')
source = self.request.get('source')
cursor = self.request.get('cursor')
email = self.request.get('email')
if cursor:
curs = search.Cursor(web_safe_string=cursor)
else:
curs = ''
records, next_cursor = vnsearch.query_rec_counter(
q, OPTIMUM_CHUNK_SIZE, curs=curs
)
logging.info("Got %d records this round" % records)
# Update the total number of records retrieved
reccount = reccount+records
if next_cursor:
curs = next_cursor.web_safe_string
else:
curs = None
if curs:
countparams = dict(
q=self.request.get('q'), cursor=curs, reccount=reccount,
requesttime=requesttime, fromapi=fromapi, source=source,
latlon=latlon, email=email, country=country,
user_agent=user_agent)
logging.info('Record counter. Count: %s Email: %s Query: %s'
' Cursor: %s Version: %s' %
(reccount, email, q, next_cursor, fromapi))
# Keep counting
taskqueue.add(
url='/service/download/count',
params=countparams
)
else:
# Finished counting. Log the results and send email.
apitracker_params = dict(
latlon=latlon,
country=country,
user_agent=user_agent,
query=q,
type='count',
api_version=fromapi,
request_source=source,
count=reccount,
downloader=email
)
taskqueue.add(
url='/apitracker',
payload=json.dumps(apitracker_params),
queue_name=QUEUE_NAME
)
resulttime = datetime.utcnow().isoformat()
mail.send_mail(
sender="VertNet Counts <vertnetinfo@vertnet.org>",
to=email,
subject="Your VertNet count is ready!",
body="""Your query found %s matching records.
Query: %s
Request submitted: %s
Request fulfilled: %s
""" % (reccount, q, requesttime, resulttime))
logging.info("Successfully sent mail to user")
|
gpl-2.0
| -9,069,144,461,149,763,000
| 31.169355
| 72
| 0.601655
| false
| 4.029293
| false
| false
| false
|
UMD-DRASTIC/drastic-web
|
webdav/resources.py
|
1
|
6514
|
from os import path as ospath
from datetime import datetime
from djangodav.fs.resources import BaseFSDavResource
from djangodav.utils import url_join
from drastic.models import Collection, Resource, DataObject
import logging
logging.warn('WEBDAV has been loaded')
CHUNK_SIZE = 1048576
def chunkstring(string, length):
return (string[0+i:length+i] for i in range(0, len(string), length))
class DrasticDavResource(BaseFSDavResource):
root = '/'
node = None
notfound = False
def me(self):
if self.node is not None or self.notfound:
return self.node
try:
self.node = Collection.find(self.get_abs_path())
except Exception:
logging.exception('Cannot fetch drastic collection for {}'.format(self.path))
if self.node is None:
try:
self.node = Resource.find(self.get_abs_path())
except Exception:
logging.exception("Cannot find drastic file resource for {}"
.format(self.path))
if self.node is None:
self.notfound = True
return self.node
def get_abs_path(self):
"""Return the absolute path of the resource. Used internally to interface with
an actual file system. If you override all other methods, this one will not
be used."""
return ospath.join(self.root, *self.path)
@property
def getcontentlength(self):
"""Return the size of the resource in bytes."""
if self.is_collection:
return 0
else:
return self.me().get_size()
def get_created(self):
"""Return the create time as datetime object."""
return self.me().get_create_ts()
def get_modified(self):
"""Return the modified time as datetime object."""
return self.me().get_modified_ts()
@property
def is_root(self):
if self.path is None or len(self.path) == 0:
return True
else:
return False
@property
def displayname(self):
if self.is_root:
return '/'
else:
return super(DrasticDavResource, self).displayname
@property
def is_collection(self):
"""Return True if this resource is a directory (collection in WebDAV parlance)."""
return isinstance(self.me(), Collection)
@property
def is_object(self):
"""Return True if this resource is a file (resource in WebDAV parlance)."""
return not self.is_collection
@property
def exists(self):
"""Return True if this resource exists."""
return self.me() is not None
@property
def getetag(self):
return self.me().uuid
def get_children(self):
"""Return an iterator of all direct children of this resource."""
if self.is_collection:
child_c, child_r = self.me().get_child()
child_c = [u"{}/".format(c) for c in child_c]
child_c.extend(child_r)
for child in child_c:
yield self.clone(url_join(*(self.path + [child])))
def read(self):
data = []
for chk in self.me().chunk_content():
data.append(chk)
return data
def write(self, request):
"""Write this data object from HTTP request."""
# Note that all permission checks happen in DAVView
# TODO Can be optimized with Cassandra LWT
# Check if the resource already exists
content = request.body
# md5sum = md5(content).hexdigest()
mimetype = "application/octet-stream"
logging.warn(str(dir(request)))
if hasattr(request, 'content_type'):
tmp = request.content_type.split("; ")
mimetype = tmp[0]
resource = Resource.find(self.get_abs_path())
if resource:
# NOTE For now WEBDAV updates are not supported.
# TODO WEBDAV updates were resulting in empty files. Compare with CDMIResource
raise NotImplementedError()
# Update value
# Delete old blobs
old_meta = resource.get_metadata()
old_acl = resource.get_acl()
create_ts = resource.get_create_ts()
resource.delete_blobs()
uuid = None
seq_num = 0
for chk in chunkstring(content, CHUNK_SIZE):
if uuid is None:
uuid = DataObject.create(chk,
metadata=old_meta,
acl=old_acl,
create_ts=create_ts).uuid
else:
DataObject.append_chunk(uuid, chk, seq_num, False)
seq_num += 1
url = "cassandra://{}".format(uuid)
resource.update(url=url,
mimetype=mimetype)
else: # Create resource
uuid = None
seq_num = 0
create_ts = datetime.now()
for chk in chunkstring(content, CHUNK_SIZE):
if uuid is None:
uuid = DataObject.create(chk, False,
create_ts=create_ts).uuid
else:
DataObject.append_chunk(uuid, chk, seq_num, False)
seq_num += 1
if uuid is None: # Content is null
uuid = self.create_empty_data_object()
url = "cassandra://{}".format(uuid)
resource = Resource.create(name=self.displayname,
container=self.get_parent_path()[:-1],
url=url,
mimetype=mimetype,
size=len(content))
def delete(self):
"""Delete the resource, recursive is implied."""
self.me().delete()
def create_collection(self):
"""Create a directory in the location of this resource."""
# TODO needs checks from CDMIView
container = None
if self.get_parent_path() == '' or self.get_parent_path() == '/':
container = '/'
else:
container = self.get_parent_path()[:-1]
Collection.create(name=self.displayname, container=container)
def copy_object(self, destination, depth=0):
raise NotImplementedError
def move_object(self, destination):
raise NotImplementedError
|
agpl-3.0
| -4,060,378,795,736,630,300
| 32.927083
| 90
| 0.548204
| false
| 4.45859
| false
| false
| false
|
chrisRubiano/django_reportes
|
config/settings/local.py
|
1
|
2241
|
# -*- coding: utf-8 -*-
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
import socket
import os
from .base import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='-og<m&!U(b$2D.+^D-9LvG{,-Bdk%F[pE@Q>@26QB9}0EeTuj`')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware', ]
INSTALLED_APPS += ['debug_toolbar', ]
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
# tricks to have debug toolbar when developing with docker
if os.environ.get('USE_DOCKER') == 'yes':
ip = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + '1']
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['django_extensions', ]
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
|
mit
| 205,409,767,345,248,540
| 29.283784
| 99
| 0.481481
| false
| 4.142329
| false
| false
| false
|
roboime/pyroboime
|
roboime/core/skills/orientto.py
|
1
|
2436
|
#
# Copyright (C) 2013-2015 RoboIME
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
from numpy import pi, sign, array
from numpy.linalg import norm
#from ...utils.mathutils import sqrt
from ...utils.pidcontroller import PidController
from .. import Skill
class OrientTo(Skill):
"""
This skill will orient the robot arount a given point to look to another given point,
"""
angle_tolerance = 0.5
distance_tolerance = 0.11
walkspeed = 0.1
def __init__(self, robot, lookpoint=None, minpower=0.0, maxpower=1.0, **kwargs):
"""
"""
super(OrientTo, self).__init__(robot, deterministic=True, **kwargs)
self.lookpoint = lookpoint
self.minpower = minpower
self.maxpower = maxpower
self.angle_controller = PidController(kp=1.8, ki=0, kd=0, integ_max=687.55, output_max=360)
self.distance_controller = PidController(kp=1.8, ki=0, kd=0, integ_max=687.55, output_max=360)
@property
def final_target(self):
return self.lookpoint
def good_position(self):
good_distance = self.robot.kicker.distance(self.ball) <= self.distance_tolerance
good_angle = abs(self.delta_angle()) < self.angle_tolerance
return good_distance and good_angle
def delta_angle(self):
delta = self.robot.angle - self.ball.angle_to_point(self.lookpoint)
return (180 + delta) % 360 - 180
def _step(self):
delta_angle = self.delta_angle()
self.angle_controller.input = delta_angle
self.angle_controller.feedback = 0.0
self.angle_controller.step()
#d = self.robot.front_cut + self.ball.radius
d = norm(array(self.robot) - array(self.ball))
r = self.robot.radius
w = self.angle_controller.output
max_w = 180.0 * self.robot.max_speed / r / pi
if abs(w) > max_w:
w = sign(w) * max_w
v = pi * w * d / 180.0
self.robot.action.speeds = (0.0, v, -w)
|
agpl-3.0
| 2,438,896,582,819,734,000
| 33.309859
| 102
| 0.65353
| false
| 3.510086
| false
| false
| false
|
matthagy/Jamenson
|
jamenson/transform/globals.py
|
1
|
4950
|
'''Transform operation on globally scoped symbols to
operations on symbol_cell mapping.
'''
from __future__ import absolute_import
from __future__ import with_statement
from ..runtime.symbol import get_symbol_cells_map, gensym
from ..compiler import ir as I
from ..compiler import bind
from ..compiler.walk import IRWalker, propigate_location
from ..compiler.translate import state as translation_state
class GlobalSymbolTransformer(IRWalker):
def __init__(self, symbol_map_sym, top_scope):
IRWalker.__init__(self)
self.symbol_map_sym = symbol_map_sym
self.current_scope = top_scope
@staticmethod
def is_global(binding):
return bind.get_binding_use_type(binding) == bind.BND_GLOBAL
@staticmethod
def replace(old, new, skips=[]):
propigate_location(old, new, skips)
I.replace_child(old, new)
def visit_function(self, func):
for child in func.defaults:
self.visit(child)
old_scope = self.current_scope
self.current_scope = func.scope
self.visit(func.body)
self.current_scope = old_scope
def make_read_map(self):
return I.make_read_binding(self.current_scope.use_symbol(self.symbol_map_sym))
def visit_read_binding(self, rb):
if not self.is_global(rb.binding):
return
self.replace(rb, I.make_getitem(self.make_read_map(),
I.make_constant(rb.binding.symbol)))
def make_set(self, binding, value_ir):
return I.make_setitem(self.make_read_map(),
I.make_constant(binding.symbol),
value_ir)
def visit_write_binding(self, wb):
value = wb.value
if self.is_global(wb.binding):
del value.continuation
self.replace(wb, self.make_set(wb.binding, value),
skips=[value])
self.visit(value)
def visit_delete_binding(self, db):
if not self.is_global(db.binding):
return
self.replace(db, I.make_delitem(self.make_read_map(),
I.make_constant(db.binding.symbol)))
def visit_foriter(self, fi):
itr = fi.iter
if self.is_global(fi.binding):
old_binding = fi.binding
del fi.binding
sym = gensym('foriter-tmp')
self.current_scope.register_local(sym)
del itr.continuation
self.replace(fi, I.make_progn([
I.make_foriter(tag=fi.tag,
binding=self.current_scope.use_symbol(sym),
iter=itr),
self.make_set(old_binding, I.make_read_binding(self.current_scope.use_symbol(sym)))
]),
skips=[itr])
del fi.tag
self.visit(itr)
def visit_unpack_seq(self, us):
new_bindings = []
copies = []
for binding in us.places:
if not self.is_global(binding):
new_bindings.append(binding)
else:
gs = gensym('unpack-tmp')
new_bindings.append(self.current_scope.register_and_use_local(gs))
copies.append([gs, binding])
seq = us.seq
if copies:
del seq.continuation
del us.places
self.replace(us, I.make_progn([
I.make_unpack_seq(seq, new_bindings)
] + [self.make_set(binding, I.make_read_binding(self.current_scope.use_symbol(gs)))
for gs,binding in copies]),
skips=[seq])
self.visit(seq)
def transform_global_symbol_use(top):
assert isinstance(top, I.toplevel)
top_scope = top.scope
assert not top_scope.parent
symbol_map_sym = gensym('symbol-cells-map')
symbol_map_binding = top_scope.register_local(symbol_map_sym)
GlobalSymbolTransformer(symbol_map_sym, top_scope).visit(top.expression)
if not len(symbol_map_binding.uses):
top_scope.unregister_binding(symbol_map_binding)
return top
expression = top.expression
del expression.continuation
when = None
if isinstance(expression, I.evalwhen):
when = expression.when
expression = expression.expression
del expression.continuation
new_ir = I.make_progn([I.make_write_binding(
top_scope.use_symbol(symbol_map_sym),
I.make_call(callee=I.make_constant(get_symbol_cells_map),
args=[], kwd_names=[], kwd_values=[],
star_args=None, star_kwds=None)),
expression])
if when is not None:
new_ir = I.make_evalwhen(when=when, expression=new_ir)
new_top = I.make_toplevel(new_ir, top_scope)
propigate_location(top, new_top, [expression])
return new_top
|
apache-2.0
| -3,855,520,096,124,097,500
| 36.218045
| 99
| 0.573131
| false
| 3.91924
| false
| false
| false
|
ari-zah/gaiasky
|
assets/scripts/tests/camera-path-sync.py
|
1
|
1065
|
# This script tests the synchronous camera file playing.
# Created by Toni Sagrista
import time, os
from py4j.java_gateway import JavaGateway, GatewayParameters
gateway = JavaGateway(gateway_parameters=GatewayParameters(auto_convert=True))
gs = gateway.entry_point
# Prints to both Gaia Sky and Python logs
def printall(string):
# print to gaia sky log
gs.print(string)
# print to python log
print(string)
gs.disableInput()
gs.cameraStop()
gs.minimizeInterfaceWindow()
fname = os.path.abspath("./camera-path-test.gsc")
printall("(1/2) Starting synchronous camera file execution: %s" % fname)
t0 = time.time()
gs.runCameraPath(fname, True)
t1 = time.time()
printall("Sync exec: script regained control after %.4f seconds" % (t1 - t0))
printall("(2/2) Starting asynchronous camera file execution: %s" % fname)
t0 = time.time()
gs.runCameraPath(fname)
t1 = time.time()
printall("Async exec: script regained control after %.4f seconds" % (t1 - t0))
gs.maximizeInterfaceWindow()
gs.enableInput()
printall("Script finishes")
gateway.close()
|
lgpl-3.0
| -7,236,454,278,470,441,000
| 23.204545
| 78
| 0.738028
| false
| 3.160237
| false
| false
| false
|
rob-nn/motus
|
gait_loader.py
|
1
|
2787
|
from numpy import *
class DataLoader(object):
def __init__(self, file_name):
self._data = None
self._file_name = file_name
self._load_data()
self._data_descs =[]
self._generate_data_descs()
def _load_data(self):
f = open(self._file_name)
data = f.readlines()
f.close()
j = 0
data_list = []
for i in range(len(data)):
line = data[j]
if len(line) <=1 or line[0] == '#':
data.pop(j)
j = j -1
else:
words = line.split()
temp = []
for word in words:
temp.append(float(word))
data_list.append(temp)
j = j + 1
self._data = array(data_list)
def _generate_data_descs(self):
self._data_descs.append(self._generate_data_desc(0, 'Left angular velocities'))
self._data_descs.append(self._generate_data_desc(1, 'Right angular velocities'))
self._data_descs.append(self._generate_data_desc(2, 'Left angles'))
self._data_descs.append(self._generate_data_desc(3, 'Right angles'))
self._data_descs.append(self._generate_data_desc(4, 'Left angular accelarations'))
self._data_descs.append(self._generate_data_desc(5, 'Right angular accelerations'))
self._data_descs.append(self._generate_data_desc(6, 'Left x velocities'))
self._data_descs.append(self._generate_data_desc(7, 'Left y velocities'))
self._data_descs.append(self._generate_data_desc(8, 'Left z velocities'))
self._data_descs.append(self._generate_data_desc(9, 'Right x velocities'))
self._data_descs.append(self._generate_data_desc(10, 'Right y velocities'))
self._data_descs.append(self._generate_data_desc(11, 'Right z velocities'))
def _generate_data_desc(self, index, desc):
column = self.data[:, index]
return DataDesc(index, desc, column.min(), column.max())
@property
def data(self):
return self._data
@property
def data_descs(self):
return self._data_descs
def normalize(self, index):
return array((self.data[:, index] - self.data_descs[index].min_val) / \
(self.data_descs[index].max_val - self.data_descs[index].min_val))
def normalize_all(self):
new_data = array([])
for i in range(self.data.shape[1]):
new_data = concatenate((new_data, self.normalize(i)))
return reshape(new_data, self.data.shape)
class DataDesc(object):
def __init__(self, index, desc, min_val, max_val):
self._index = index
self._min_val = min_val
self._max_val = max_val
self._desc = desc
@property
def index(self):
return self._index
@property
def min_val(self):
return self._min_val
@property
def max_val(self):
return self._max_val
@property
def desc(self):
return self._desc
def loadWalk(value):
return DataLoader('./dynamics_data/dynamics_walk' + str(value) + '.mat')
|
gpl-2.0
| -6,392,789,390,617,272,000
| 30.314607
| 88
| 0.642268
| false
| 2.921384
| false
| false
| false
|
mariocesar/django-tricks
|
django_tricks/models/abstract.py
|
1
|
2638
|
from uuid import uuid4
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from .mixins import MPAwareModel
treebeard = True
try:
from treebeard.mp_tree import MP_Node
except ImportError:
treebeard = False
class UniqueTokenModel(models.Model):
token = models.CharField(max_length=32, unique=True, blank=True)
class Meta:
abstract = True
def get_token(self):
return str(uuid4().hex)
def save(self, **kwargs):
if not self.token:
self.token = self.get_token()
super().save(**kwargs)
if treebeard:
class MaterializedPathNode(MPAwareModel, MP_Node):
slug = models.SlugField(max_length=255, db_index=True, unique=False, blank=True)
node_order_by = ['name']
node_order_by = ['numval', 'strval']
class Meta:
abstract = True
class MutableModelManager(models.QuerySet):
def by_type(self, model_class):
return self.filter(specific_type=ContentType.objects.get_for_model(model_class))
class MutableModel(models.Model):
"""A Model that if inherited from will store the specific class reference in self."""
specific_type = models.ForeignKey(
ContentType,
verbose_name=_('specific type'),
related_name='+',
editable=False,
on_delete=models.PROTECT)
class Meta:
abstract = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.pk and not self.specific_type_id:
# this model is being newly created rather than retrieved from the db;
# set content type to correctly represent the model class that this was
# created as
self.specific_type = ContentType.objects.get_for_model(self)
@cached_property
def specific(self):
"""Return this page in its most specific subclassed form."""
specific_type = ContentType.objects.get_for_id(self.specific_type_id)
model_class = specific_type.model_class()
if model_class is None:
return self
elif isinstance(self, model_class):
return self
else:
return specific_type.get_object_for_this_type(id=self.id)
@cached_property
def specific_class(self):
"""Return the class that this page would be if instantiated in its most specific form."""
specific_type = ContentType.objects.get_for_id(self.specific_type_id)
return specific_type.model_class()
|
isc
| 4,206,543,305,950,345,000
| 29.321839
| 97
| 0.655042
| false
| 4.070988
| false
| false
| false
|
jamielennox/requests-mock
|
requests_mock/adapter.py
|
1
|
10629
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import weakref
from requests.adapters import BaseAdapter
from requests.utils import requote_uri
import six
from six.moves.urllib import parse as urlparse
from requests_mock import exceptions
from requests_mock.request import _RequestObjectProxy
from requests_mock.response import _MatcherResponse
import logging
logger = logging.getLogger(__name__)
try:
import purl
purl_types = (purl.URL,)
except ImportError:
purl = None
purl_types = ()
ANY = object()
class _RequestHistoryTracker(object):
def __init__(self):
self.request_history = []
def _add_to_history(self, request):
self.request_history.append(request)
@property
def last_request(self):
"""Retrieve the latest request sent"""
try:
return self.request_history[-1]
except IndexError:
return None
@property
def called(self):
return self.call_count > 0
@property
def called_once(self):
return self.call_count == 1
@property
def call_count(self):
return len(self.request_history)
def reset(self):
self.request_history = []
class _RunRealHTTP(Exception):
"""A fake exception to jump out of mocking and allow a real request.
This exception is caught at the mocker level and allows it to execute this
request through the real requests mechanism rather than the mocker.
It should never be exposed to a user.
"""
class _Matcher(_RequestHistoryTracker):
"""Contains all the information about a provided URL to match."""
def __init__(self, method, url, responses, complete_qs, request_headers,
additional_matcher, real_http, case_sensitive):
"""
:param bool complete_qs: Match the entire query string. By default URLs
match if all the provided matcher query arguments are matched and
extra query arguments are ignored. Set complete_qs to true to
require that the entire query string needs to match.
"""
super(_Matcher, self).__init__()
self._method = method
self._url = url
self._responses = responses
self._complete_qs = complete_qs
self._request_headers = request_headers
self._real_http = real_http
self._additional_matcher = additional_matcher
# url can be a regex object or ANY so don't always run urlparse
if isinstance(url, six.string_types):
url_parts = urlparse.urlparse(url)
self._scheme = url_parts.scheme.lower()
self._netloc = url_parts.netloc.lower()
self._path = requote_uri(url_parts.path or '/')
self._query = url_parts.query
if not case_sensitive:
self._path = self._path.lower()
self._query = self._query.lower()
elif isinstance(url, purl_types):
self._scheme = url.scheme()
self._netloc = url.netloc()
self._path = url.path()
self._query = url.query()
if not case_sensitive:
self._path = self._path.lower()
self._query = self._query.lower()
else:
self._scheme = None
self._netloc = None
self._path = None
self._query = None
def _match_method(self, request):
if self._method is ANY:
return True
if request.method.lower() == self._method.lower():
return True
return False
def _match_url(self, request):
if self._url is ANY:
return True
# regular expression matching
if hasattr(self._url, 'search'):
return self._url.search(request.url) is not None
# scheme is always matched case insensitive
if self._scheme and request.scheme.lower() != self._scheme:
return False
# netloc is always matched case insensitive
if self._netloc and request.netloc.lower() != self._netloc:
return False
if (request.path or '/') != self._path:
return False
# construct our own qs structure as we remove items from it below
request_qs = urlparse.parse_qs(request.query, keep_blank_values=True)
matcher_qs = urlparse.parse_qs(self._query, keep_blank_values=True)
for k, vals in six.iteritems(matcher_qs):
for v in vals:
try:
request_qs.get(k, []).remove(v)
except ValueError:
return False
if self._complete_qs:
for v in six.itervalues(request_qs):
if v:
return False
return True
def _match_headers(self, request):
for k, vals in six.iteritems(self._request_headers):
try:
header = request.headers[k]
except KeyError:
# NOTE(jamielennox): This seems to be a requests 1.2/2
# difference, in 2 they are just whatever the user inputted in
# 1 they are bytes. Let's optionally handle both and look at
# removing this when we depend on requests 2.
if not isinstance(k, six.text_type):
return False
try:
header = request.headers[k.encode('utf-8')]
except KeyError:
return False
if header != vals:
return False
return True
def _match_additional(self, request):
if callable(self._additional_matcher):
return self._additional_matcher(request)
if self._additional_matcher is not None:
raise TypeError("Unexpected format of additional matcher.")
return True
def _match(self, request):
return (self._match_method(request) and
self._match_url(request) and
self._match_headers(request) and
self._match_additional(request))
def __call__(self, request):
if not self._match(request):
return None
# doing this before _add_to_history means real requests are not stored
# in the request history. I'm not sure what is better here.
if self._real_http:
raise _RunRealHTTP()
if len(self._responses) > 1:
response_matcher = self._responses.pop(0)
else:
response_matcher = self._responses[0]
self._add_to_history(request)
return response_matcher.get_response(request)
class Adapter(BaseAdapter, _RequestHistoryTracker):
"""A fake adapter than can return predefined responses.
"""
def __init__(self, case_sensitive=False):
super(Adapter, self).__init__()
self._case_sensitive = case_sensitive
self._matchers = []
def send(self, request, **kwargs):
request = _RequestObjectProxy(request,
case_sensitive=self._case_sensitive,
**kwargs)
self._add_to_history(request)
for matcher in reversed(self._matchers):
try:
resp = matcher(request)
except Exception:
request._matcher = weakref.ref(matcher)
raise
if resp is not None:
request._matcher = weakref.ref(matcher)
resp.connection = self
logger.debug('{} {} {}'.format(request._request.method,
request._request.url,
resp.status_code))
return resp
raise exceptions.NoMockAddress(request)
def close(self):
pass
def register_uri(self, method, url, response_list=None, **kwargs):
"""Register a new URI match and fake response.
:param str method: The HTTP method to match.
:param str url: The URL to match.
"""
complete_qs = kwargs.pop('complete_qs', False)
additional_matcher = kwargs.pop('additional_matcher', None)
request_headers = kwargs.pop('request_headers', {})
real_http = kwargs.pop('_real_http', False)
if response_list and kwargs:
raise RuntimeError('You should specify either a list of '
'responses OR response kwargs. Not both.')
elif real_http and (response_list or kwargs):
raise RuntimeError('You should specify either response data '
'OR real_http. Not both.')
elif not response_list:
response_list = [] if real_http else [kwargs]
# NOTE(jamielennox): case_sensitive is not present as a kwarg because i
# think there would be an edge case where the adapter and register_uri
# had different values.
# Ideally case_sensitive would be a value passed to match() however
# this would change the contract of matchers so we pass ito to the
# proxy and the matcher separately.
responses = [_MatcherResponse(**k) for k in response_list]
matcher = _Matcher(method,
url,
responses,
case_sensitive=self._case_sensitive,
complete_qs=complete_qs,
additional_matcher=additional_matcher,
request_headers=request_headers,
real_http=real_http)
self.add_matcher(matcher)
return matcher
def add_matcher(self, matcher):
"""Register a custom matcher.
A matcher is a callable that takes a `requests.Request` and returns a
`requests.Response` if it matches or None if not.
:param callable matcher: The matcher to execute.
"""
self._matchers.append(matcher)
def reset(self):
super(Adapter, self).reset()
for matcher in self._matchers:
matcher.reset()
__all__ = ['Adapter']
|
apache-2.0
| -4,611,116,238,172,673,500
| 32.215625
| 79
| 0.581428
| false
| 4.643512
| false
| false
| false
|
netinept/plog
|
plog/storages/settings_s3boto.py
|
1
|
1207
|
# S3Boto storage settings for photologue example project.
import os
DEFAULT_FILE_STORAGE = 'plog.storages.s3utils.MediaS3BotoStorage'
STATICFILES_STORAGE = 'plog.storages.s3utils.StaticS3BotoStorage'
try:
# If you want to test the example_project with S3, you'll have to configure the
# environment variables as specified below.
# (Secret keys are stored in environment variables for security - you don't want to
# accidentally commit and push them to a public repository).
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
AWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME']
except KeyError:
raise KeyError('Need to define AWS environment variables: ' +
'AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and AWS_STORAGE_BUCKET_NAME')
# Default Django Storage API behavior - don't overwrite files with same name
AWS_S3_FILE_OVERWRITE = False
MEDIA_ROOT = '/media/'
MEDIA_URL = 'http://%s.s3.amazonaws.com/media/' % AWS_STORAGE_BUCKET_NAME
STATIC_ROOT = '/static/'
STATIC_URL = 'http://%s.s3.amazonaws.com/static/' % AWS_STORAGE_BUCKET_NAME
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
|
apache-2.0
| -7,196,048,099,957,750,000
| 40.655172
| 91
| 0.727423
| false
| 3.438746
| false
| false
| false
|
Bhare8972/LOFAR-LIM
|
LIM_scripts/utilities.py
|
1
|
11289
|
#!/usr/bin/env python3
##ON APP MACHINE
import sys
from os import listdir, mkdir
from os.path import isdir, dirname, abspath
import os
import subprocess
import weakref
from scipy import fftpack
import numpy as np
## some global variables, this needs to be fixed at some point
default_raw_data_loc = None#"/exp_app2/appexp1/public/raw_data"
default_processed_data_loc = None#"/home/brian/processed_files"
MetaData_directory = dirname(abspath(__file__)) + '/data' ## change this if antenna_response_model is in a folder different from this module
#### constants
C = 299792458.0
RTD = 180.0/3.1415926 ##radians to degrees
n_air = 1.000293
v_air = C/n_air
latlonCS002 = np.array([52.91512249, 6.869837540]) ## lattitude and longitude of CS002 in degrees
#### log data to screen and to a file
class logger(object):
class std_writer(object):
def __init__(self, logger):
self.logger_ref = weakref.ref(logger)
def write(self, msg):
logger=self.logger_ref()
logger.out_file.write(msg)
if logger.to_screen:
logger.old_stdout.write(msg)
def flush(self):
logger=self.logger_ref()
logger.out_file.flush()
def __init__(self):
self.has_stderr = False
self.has_stdout = False
self.old_stderr = sys.stderr
self.old_stdout = sys.stdout
self.set("out_log")
def set(self, fname, to_screen=True):
self.out_file = open(fname, 'w')
self.set_to_screen( to_screen )
def __call__(self, *args):
for a in args:
if self.to_screen:
self.old_stdout.write(str(a))
self.old_stdout.write(" ")
self.out_file.write(str(a))
self.out_file.write(" ")
self.out_file.write("\n")
if self.to_screen:
self.old_stdout.write("\n")
self.out_file.flush()
self.old_stdout.flush()
def set_to_screen(self, to_screen=True):
self.to_screen = to_screen
def take_stdout(self):
if not self.has_stdout:
sys.stdout = self.std_writer(self)
self.has_stdout = True
def take_stderr(self):
if not self.has_stderr:
sys.stderr = self.std_writer(self)
self.has_stderr = True
def restore_stdout(self):
if self.has_stdout:
sys.stdout = self.old_stdout
self.has_stdout = False
def restore_stderr(self):
if self.has_stderr:
sys.stderr = self.old_stderr
self.has_stderr = False
def flush(self):
self.out_file.flush()
# def __del__(self):
# self.restore_stderr()
# self.restore_stdout()
#log = logger()
def iterate_pairs(list_one, list_two, list_one_avoid=[], list_two_avoid=[]):
"""returns an iterator that loops over all pairs of the two lists"""
for item_one in list_one:
if item_one in list_one_avoid:
continue
for item_two in list_two:
if item_two in list_two_avoid:
continue
yield (item_one, item_two)
import re
natural_regex_pattern = re.compile('([0-9]+)')
def natural_sort( l ):
""" Sort the given iterable in the way that humans expect. Usefull for sorting station names."""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in natural_regex_pattern.split(key) ]
return sorted(l, key = alphanum_key)
#### some file utils
def Fname_data(Fpath):
""" takes both pulse data file names and h5 file names and returns UTC_time, station_name, Fpath"""
Fname = Fpath.split('/')[-1]
data = Fname.split('_')
timeID = data[1]
station_name = data[2]
if len(data[3][1:])==0:
file_number = 0
else:
file_number = int(data[3][1:])
return timeID, station_name, Fpath, file_number
##note that timeID is a string representing the datetime of a LOFAR trigger. such as: D20130619T094846.507Z
## the timeID is used to uniquely identify triggers
def get_timeID(fname):
data=fname.split("_")
return data[1]
def year_from_timeID(timeID):
return timeID[1:5]
def raw_data_dir(timeID, data_loc=None):
"""gives path to the raw data folder for a particular timeID, given location of data structure. Defaults to default_raw_data_loc"""
if data_loc is None:
data_loc = default_raw_data_loc
if default_raw_data_loc is None:
print("ERROR: 'default_raw_data_loc' in utilities is not set.")
quit()
path = data_loc + '/' + year_from_timeID(timeID)+"/"+timeID
return path
def processed_data_dir(timeID, data_loc=None):
"""gives path to the analysis folders for a particular timeID, given location of data structure. Defaults to default_processed_data_loc
makes the directory if it doesn't exist"""
if data_loc is None:
data_loc = default_processed_data_loc
if default_processed_data_loc is None:
print("ERROR: 'default_processed_data_loc' in utilities is not set.")
quit()
path=data_loc + "/" + year_from_timeID(timeID)+"/"+timeID
if not isdir(path):
mkdir(path)
return path
## a python list where the keys are the number of a station and the values are the station name
SId_to_Sname = [None]*209 #just to pre-initilize list, so syntax below is possible
SId_to_Sname[1] = "CS001"
SId_to_Sname[2] = "CS002"
SId_to_Sname[3] = "CS003"
SId_to_Sname[4] = "CS004"
SId_to_Sname[5] = "CS005"
SId_to_Sname[6] = "CS006"
SId_to_Sname[7] = "CS007"
#SId_to_Sname[8] = "CS008"
#SId_to_Sname[9] = "CS009"
#SId_to_Sname[10] = "CS010"
SId_to_Sname[11] = "CS011"
#SId_to_Sname[12] = "CS012"
SId_to_Sname[13] = "CS013"
#SId_to_Sname[14] = "CS014"
#SId_to_Sname[15] = "CS015"
#SId_to_Sname[16] = "CS016"
SId_to_Sname[17] = "CS017"
#SId_to_Sname[18] = "CS018"
#SId_to_Sname[19] = "CS019"
#SId_to_Sname[20] = "CS020"
SId_to_Sname[21] = "CS021"
#SId_to_Sname[22] = "CS022"
#SId_to_Sname[23] = "CS023"
SId_to_Sname[24] = "CS024"
#SId_to_Sname[25] = "CS025"
SId_to_Sname[26] = "CS026"
#SId_to_Sname[27] = "CS027"
SId_to_Sname[28] = "CS028"
#SId_to_Sname[29] = "CS029"
SId_to_Sname[30] = "CS030"
SId_to_Sname[31] = "CS031"
SId_to_Sname[32] = "CS032"
SId_to_Sname[101] = "CS101"
#SId_to_Sname[102] = "CS102"
SId_to_Sname[103] = "CS103"
SId_to_Sname[121] = "CS201"
SId_to_Sname[141] = "CS301"
SId_to_Sname[142] = "CS302"
SId_to_Sname[161] = "CS401"
SId_to_Sname[181] = "CS501"
#SId_to_Sname[104] = "RS104"
#SId_to_Sname[105] = "RS105"
SId_to_Sname[106] = "RS106"
#SId_to_Sname[107] = "RS107"
#SId_to_Sname[108] = "RS108"
#SId_to_Sname[109] = "RS109"
#SId_to_Sname[122] = "RS202"
#SId_to_Sname[123] = "RS203"
#SId_to_Sname[124] = "RS204"
SId_to_Sname[125] = "RS205"
#SId_to_Sname[126] = "RS206"
#SId_to_Sname[127] = "RS207"
SId_to_Sname[128] = "RS208"
#SId_to_Sname[129] = "RS209"
SId_to_Sname[130] = "RS210"
#SId_to_Sname[143] = "RS303"
#SId_to_Sname[144] = "RS304"
SId_to_Sname[145] = "RS305"
SId_to_Sname[146] = "RS306"
SId_to_Sname[147] = "RS307"
#SId_to_Sname[148] = "RS308"
#SId_to_Sname[149] = "RS309"
SId_to_Sname[150] = "RS310"
SId_to_Sname[166] = "RS406"
SId_to_Sname[167] = "RS407"
SId_to_Sname[169] = "RS409"
SId_to_Sname[183] = "RS503"
SId_to_Sname[188] = "RS508"
SId_to_Sname[189] = "RS509"
SId_to_Sname[201] = "DE601"
SId_to_Sname[202] = "DE602"
SId_to_Sname[203] = "DE603"
SId_to_Sname[204] = "DE604"
SId_to_Sname[205] = "DE605"
SId_to_Sname[206] = "FR606"
SId_to_Sname[207] = "SE607"
SId_to_Sname[208] = "UK608"
## this just "inverts" the previous list, discarding unused values
Sname_to_SId_dict = {name:ID for ID,name in enumerate(SId_to_Sname) if name is not None}
def even_antName_to_odd(even_ant_name):
even_num = int(even_ant_name)
odd_num = even_num + 1
return str( odd_num ).zfill( 9 )
def antName_is_even(ant_name):
return not int(ant_name)%2
def odd_antName_to_even(odd_ant_name):
odd_num = int(odd_ant_name)
even_num = odd_num + 1
return str( even_num ).zfill( 9 )
#### plotting utilities ####
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5*max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
### some math functions? ###
def normalize_angle_radians( angle_radians ):
"""For an angle in radians, return the equivalent angle that is garunteed be between -pi and pi"""
while angle_radians > np.pi:
angle_radians -= 2.0*np.pi
while angle_radians < -np.pi:
angle_radians += 2.0*np.pi
return angle_radians
def BoundingBox_collision(BB1, BB2):
""" return true if two N-D bounding boxes collide, False otherwise"""
for B1, B2 in zip(BB1,BB2):
if (B1[1] < B2[0]) or (B2[1] < B1[0]):
return False
return True
### some build tools ####
def GSL_include():
"""return directory for location of GSL headers, useful when combining GSL and cython"""
try:
gsl_include = subprocess.check_output('gsl-config --cflags', shell=True).decode('utf-8')[2:-1]
except subprocess.CalledProcessError:
gsl_include = os.getenv('LIB_GSL')
if gsl_include is None:
# Environmental variable LIB_GSL not set, use hardcoded path.
gsl_include = r"c:\Program Files\GnuWin32\include"
else:
gsl_include += "/include"
assert gsl_include != '', "Couldn't find gsl. Make sure it's installed and in the path."
return gsl_include
def GSL_library_dir():
"""return directory for location of GSL binaries, useful when combining GSL and cython"""
try:
lib_gsl_dir = subprocess.check_output('gsl-config --libs', shell=True).decode('utf-8').split()[0][2:]
except subprocess.CalledProcessError:
lib_gsl_dir = os.getenv('LIB_GSL')
if lib_gsl_dir is None:
# Environmental variable LIB_GSL not set, use hardcoded path.
lib_gsl_dir = r"c:\Program Files\GnuWin32\lib"
else:
lib_gsl_dir += "/lib"
return lib_gsl_dir
|
mit
| -6,915,917,853,648,077,000
| 29.349462
| 141
| 0.60776
| false
| 2.893873
| false
| false
| false
|
fintech-circle/edx-platform
|
openedx/core/djangoapps/auth_exchange/forms.py
|
1
|
4033
|
"""
Forms to support third-party to first-party OAuth 2.0 access token exchange
"""
import provider.constants
from django.contrib.auth.models import User
from django.forms import CharField
from edx_oauth2_provider.constants import SCOPE_NAMES
from oauth2_provider.models import Application
from provider.forms import OAuthForm, OAuthValidationError
from provider.oauth2.forms import ScopeChoiceField, ScopeMixin
from provider.oauth2.models import Client
from requests import HTTPError
from social.backends import oauth as social_oauth
from social.exceptions import AuthException
from third_party_auth import pipeline
class AccessTokenExchangeForm(ScopeMixin, OAuthForm):
"""Form for access token exchange endpoint"""
access_token = CharField(required=False)
scope = ScopeChoiceField(choices=SCOPE_NAMES, required=False)
client_id = CharField(required=False)
def __init__(self, request, oauth2_adapter, *args, **kwargs):
super(AccessTokenExchangeForm, self).__init__(*args, **kwargs)
self.request = request
self.oauth2_adapter = oauth2_adapter
def _require_oauth_field(self, field_name):
"""
Raise an appropriate OAuthValidationError error if the field is missing
"""
field_val = self.cleaned_data.get(field_name)
if not field_val:
raise OAuthValidationError(
{
"error": "invalid_request",
"error_description": "{} is required".format(field_name),
}
)
return field_val
def clean_access_token(self):
"""
Validates and returns the "access_token" field.
"""
return self._require_oauth_field("access_token")
def clean_client_id(self):
"""
Validates and returns the "client_id" field.
"""
return self._require_oauth_field("client_id")
def clean(self):
if self._errors:
return {}
backend = self.request.backend
if not isinstance(backend, social_oauth.BaseOAuth2):
raise OAuthValidationError(
{
"error": "invalid_request",
"error_description": "{} is not a supported provider".format(backend.name),
}
)
self.request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API
client_id = self.cleaned_data["client_id"]
try:
client = self.oauth2_adapter.get_client(client_id=client_id)
except (Client.DoesNotExist, Application.DoesNotExist):
raise OAuthValidationError(
{
"error": "invalid_client",
"error_description": "{} is not a valid client_id".format(client_id),
}
)
if client.client_type not in [provider.constants.PUBLIC, Application.CLIENT_PUBLIC]:
raise OAuthValidationError(
{
# invalid_client isn't really the right code, but this mirrors
# https://github.com/edx/django-oauth2-provider/blob/edx/provider/oauth2/forms.py#L331
"error": "invalid_client",
"error_description": "{} is not a public client".format(client_id),
}
)
self.cleaned_data["client"] = client
user = None
try:
user = backend.do_auth(self.cleaned_data.get("access_token"), allow_inactive_user=True)
except (HTTPError, AuthException):
pass
if user and isinstance(user, User):
self.cleaned_data["user"] = user
else:
# Ensure user does not re-enter the pipeline
self.request.social_strategy.clean_partial_pipeline()
raise OAuthValidationError(
{
"error": "invalid_grant",
"error_description": "access_token is not valid",
}
)
return self.cleaned_data
|
agpl-3.0
| -3,508,841,061,231,892,500
| 36
| 106
| 0.598562
| false
| 4.557062
| false
| false
| false
|
nicolaselie/pykuli
|
keyboard/mac.py
|
1
|
5816
|
#Copyright 2013 Paul Barton
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
from Quartz import *
from AppKit import NSEvent
from .base import KeyboardMeta, KeyboardEventMeta
# Taken from events.h
# /System/Library/Frameworks/Carbon.framework/Versions/A/Frameworks/HIToolbox.framework/Versions/A/Headers/Events.h
character_translate_table = {
'a': 0x00,
's': 0x01,
'd': 0x02,
'f': 0x03,
'h': 0x04,
'g': 0x05,
'z': 0x06,
'x': 0x07,
'c': 0x08,
'v': 0x09,
'b': 0x0b,
'q': 0x0c,
'w': 0x0d,
'e': 0x0e,
'r': 0x0f,
'y': 0x10,
't': 0x11,
'1': 0x12,
'2': 0x13,
'3': 0x14,
'4': 0x15,
'6': 0x16,
'5': 0x17,
'=': 0x18,
'9': 0x19,
'7': 0x1a,
'-': 0x1b,
'8': 0x1c,
'0': 0x1d,
']': 0x1e,
'o': 0x1f,
'u': 0x20,
'[': 0x21,
'i': 0x22,
'p': 0x23,
'l': 0x25,
'j': 0x26,
'\'': 0x27,
'k': 0x28,
';': 0x29,
'\\': 0x2a,
',': 0x2b,
'/': 0x2c,
'n': 0x2d,
'm': 0x2e,
'.': 0x2f,
'`': 0x32,
' ': 0x31,
'\r': 0x24,
'\t': 0x30,
'shift': 0x38
}
# Taken from ev_keymap.h
# http://www.opensource.apple.com/source/IOHIDFamily/IOHIDFamily-86.1/IOHIDSystem/IOKit/hidsystem/ev_keymap.h
special_key_translate_table = {
'KEYTYPE_SOUND_UP': 0,
'KEYTYPE_SOUND_DOWN': 1,
'KEYTYPE_BRIGHTNESS_UP': 2,
'KEYTYPE_BRIGHTNESS_DOWN': 3,
'KEYTYPE_CAPS_LOCK': 4,
'KEYTYPE_HELP': 5,
'POWER_KEY': 6,
'KEYTYPE_MUTE': 7,
'UP_ARROW_KEY': 8,
'DOWN_ARROW_KEY': 9,
'KEYTYPE_NUM_LOCK': 10,
'KEYTYPE_CONTRAST_UP': 11,
'KEYTYPE_CONTRAST_DOWN': 12,
'KEYTYPE_LAUNCH_PANEL': 13,
'KEYTYPE_EJECT': 14,
'KEYTYPE_VIDMIRROR': 15,
'KEYTYPE_PLAY': 16,
'KEYTYPE_NEXT': 17,
'KEYTYPE_PREVIOUS': 18,
'KEYTYPE_FAST': 19,
'KEYTYPE_REWIND': 20,
'KEYTYPE_ILLUMINATION_UP': 21,
'KEYTYPE_ILLUMINATION_DOWN': 22,
'KEYTYPE_ILLUMINATION_TOGGLE': 23
}
class Keyboard(KeyboardMeta):
def press_key(self, key):
if key in special_key_translate_table:
self._press_special_key(key, True)
else:
self._press_normal_key(key, True)
def release_key(self, key):
if key in special_key_translate_table:
self._press_special_key(key, False)
else:
self._press_normal_key(key, False)
def special_key_assignment(self):
self.volume_mute_key = 'KEYTYPE_MUTE'
self.volume_down_key = 'KEYTYPE_SOUND_DOWN'
self.volume_up_key = 'KEYTYPE_SOUND_UP'
self.media_play_pause_key = 'KEYTYPE_PLAY'
# Doesn't work :(
# self.media_next_track_key = 'KEYTYPE_NEXT'
# self.media_prev_track_key = 'KEYTYPE_PREVIOUS'
def _press_normal_key(self, key, down):
try:
if self.is_char_shifted(key):
key_code = character_translate_table[key.lower()]
event = CGEventCreateKeyboardEvent(None,
character_translate_table['shift'], down)
CGEventPost(kCGHIDEventTap, event)
# Tiny sleep to let OS X catch up on us pressing shift
time.sleep(.01)
else:
key_code = character_translate_table[key]
event = CGEventCreateKeyboardEvent(None, key_code, down)
CGEventPost(kCGHIDEventTap, event)
except KeyError:
raise RuntimeError("Key {} not implemented.".format(key))
def _press_special_key(self, key, down):
""" Helper method for special keys.
Source: http://stackoverflow.com/questions/11045814/emulate-media-key-press-on-mac
"""
key_code = special_key_translate_table[key]
ev = NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_(
NSSystemDefined, # type
(0,0), # location
0xa00 if down else 0xb00, # flags
0, # timestamp
0, # window
0, # ctx
8, # subtype
(key_code << 16) | ((0xa if down else 0xb) << 8), # data1
-1 # data2
)
CGEventPost(0, ev.CGEvent())
class KeyboardEvent(KeyboardEventMeta):
def run(self):
tap = CGEventTapCreate(
kCGSessionEventTap,
kCGHeadInsertEventTap,
kCGEventTapOptionDefault,
CGEventMaskBit(kCGEventKeyDown) |
CGEventMaskBit(kCGEventKeyUp),
self.handler,
None)
loopsource = CFMachPortCreateRunLoopSource(None, tap, 0)
loop = CFRunLoopGetCurrent()
CFRunLoopAddSource(loop, loopsource, kCFRunLoopDefaultMode)
CGEventTapEnable(tap, True)
while self.state:
CFRunLoopRunInMode(kCFRunLoopDefaultMode, 5, False)
def handler(self, proxy, type, event, refcon):
key = CGEventGetIntegerValueField(event, kCGKeyboardEventKeycode)
if type == kCGEventKeyDown:
self.key_press(key)
elif type == kCGEventKeyUp:
self.key_release(key)
if self.capture:
CGEventSetType(event, kCGEventNull)
return event
|
gpl-3.0
| -165,510,983,904,530,620
| 27.935323
| 115
| 0.585798
| false
| 3.162588
| false
| false
| false
|
rgayon/plaso
|
plaso/cli/status_view.py
|
1
|
18822
|
# -*- coding: utf-8 -*-
"""The status view."""
from __future__ import unicode_literals
import ctypes
import sys
import time
try:
import win32api
import win32console
except ImportError:
win32console = None
from dfvfs.lib import definitions as dfvfs_definitions
import plaso
from plaso.cli import tools
from plaso.cli import views
class StatusView(object):
"""Processing status view."""
MODE_LINEAR = 'linear'
MODE_WINDOW = 'window'
_SOURCE_TYPES = {
dfvfs_definitions.SOURCE_TYPE_DIRECTORY: 'directory',
dfvfs_definitions.SOURCE_TYPE_FILE: 'single file',
dfvfs_definitions.SOURCE_TYPE_STORAGE_MEDIA_DEVICE: (
'storage media device'),
dfvfs_definitions.SOURCE_TYPE_STORAGE_MEDIA_IMAGE: (
'storage media image')}
_UNITS_1024 = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'EiB', 'ZiB', 'YiB']
_WINAPI_STD_OUTPUT_HANDLE = -11
_WINAPI_ENABLE_PROCESSED_INPUT = 1
_WINAPI_ENABLE_LINE_INPUT = 2
_WINAPI_ENABLE_ECHO_INPUT = 4
_WINAPI_ANSI_CONSOLE_MODE = (
_WINAPI_ENABLE_PROCESSED_INPUT | _WINAPI_ENABLE_LINE_INPUT |
_WINAPI_ENABLE_ECHO_INPUT)
def __init__(self, output_writer, tool_name):
"""Initializes a status view.
Args:
output_writer (OutputWriter): output writer.
tool_name (str): namd of the tool.
"""
super(StatusView, self).__init__()
self._artifact_filters = None
self._filter_file = None
self._have_ansi_support = not win32console
self._mode = self.MODE_WINDOW
self._output_writer = output_writer
self._source_path = None
self._source_type = None
self._stdout_output_writer = isinstance(
output_writer, tools.StdoutOutputWriter)
self._storage_file_path = None
self._tool_name = tool_name
if win32console:
kernel32 = ctypes.windll.kernel32
stdout_handle = kernel32.GetStdHandle(self._WINAPI_STD_OUTPUT_HANDLE)
result = kernel32.SetConsoleMode(
stdout_handle, self._WINAPI_ANSI_CONSOLE_MODE)
self._have_ansi_support = result != 0
def _AddsAnalysisProcessStatusTableRow(self, process_status, table_view):
"""Adds an analysis process status table row.
Args:
process_status (ProcessStatus): processing status.
table_view (CLITabularTableView): table view.
"""
used_memory = self._FormatSizeInUnitsOf1024(process_status.used_memory)
events = ''
if (process_status.number_of_consumed_events is not None and
process_status.number_of_consumed_events_delta is not None):
events = '{0:d} ({1:d})'.format(
process_status.number_of_consumed_events,
process_status.number_of_consumed_events_delta)
event_tags = ''
if (process_status.number_of_produced_event_tags is not None and
process_status.number_of_produced_event_tags_delta is not None):
event_tags = '{0:d} ({1:d})'.format(
process_status.number_of_produced_event_tags,
process_status.number_of_produced_event_tags_delta)
reports = ''
if (process_status.number_of_produced_reports is not None and
process_status.number_of_produced_reports_delta is not None):
reports = '{0:d} ({1:d})'.format(
process_status.number_of_produced_reports,
process_status.number_of_produced_reports_delta)
table_view.AddRow([
process_status.identifier, process_status.pid, process_status.status,
used_memory, events, event_tags, reports])
def _AddExtractionProcessStatusTableRow(self, process_status, table_view):
"""Adds an extraction process status table row.
Args:
process_status (ProcessStatus): processing status.
table_view (CLITabularTableView): table view.
"""
used_memory = self._FormatSizeInUnitsOf1024(process_status.used_memory)
sources = ''
if (process_status.number_of_produced_sources is not None and
process_status.number_of_produced_sources_delta is not None):
sources = '{0:d} ({1:d})'.format(
process_status.number_of_produced_sources,
process_status.number_of_produced_sources_delta)
events = ''
if (process_status.number_of_produced_events is not None and
process_status.number_of_produced_events_delta is not None):
events = '{0:d} ({1:d})'.format(
process_status.number_of_produced_events,
process_status.number_of_produced_events_delta)
# TODO: shorten display name to fit in 80 chars and show the filename.
table_view.AddRow([
process_status.identifier, process_status.pid, process_status.status,
used_memory, sources, events, process_status.display_name])
def _ClearScreen(self):
"""Clears the terminal/console screen."""
if self._have_ansi_support:
# ANSI escape sequence to clear screen.
self._output_writer.Write('\033[2J')
# ANSI escape sequence to move cursor to top left.
self._output_writer.Write('\033[H')
elif win32console:
# This version of Windows cmd.exe does not support ANSI escape codes, thus
# instead we fill the console screen buffer with spaces. The downside of
# this approach is an annoying flicker.
top_left_coordinate = win32console.PyCOORDType(0, 0)
screen_buffer = win32console.GetStdHandle(win32api.STD_OUTPUT_HANDLE)
screen_buffer_information = screen_buffer.GetConsoleScreenBufferInfo()
screen_buffer_attributes = screen_buffer_information['Attributes']
screen_buffer_size = screen_buffer_information['Size']
console_size = screen_buffer_size.X * screen_buffer_size.Y
screen_buffer.FillConsoleOutputCharacter(
' ', console_size, top_left_coordinate)
screen_buffer.FillConsoleOutputAttribute(
screen_buffer_attributes, console_size, top_left_coordinate)
screen_buffer.SetConsoleCursorPosition(top_left_coordinate)
# TODO: remove update flicker. For win32console we could set the cursor
# top left, write the table, clean the remainder of the screen buffer
# and set the cursor at the end of the table.
def _FormatSizeInUnitsOf1024(self, size):
"""Represents a number of bytes in units of 1024.
Args:
size (int): size in bytes.
Returns:
str: human readable string of the size.
"""
magnitude_1024 = 0
used_memory_1024 = float(size)
while used_memory_1024 >= 1024:
used_memory_1024 /= 1024
magnitude_1024 += 1
if 0 < magnitude_1024 <= 7:
return '{0:.1f} {1:s}'.format(
used_memory_1024, self._UNITS_1024[magnitude_1024])
return '{0:d} B'.format(size)
def _FormatProcessingTime(self, processing_status):
"""Formats the processing time.
Args:
processing_status (ProcessingStatus): processing status.
Returns:
str: processing time formatted as: "5 days, 12:34:56".
"""
processing_time = 0
if processing_status:
processing_time = time.time() - processing_status.start_time
processing_time, seconds = divmod(int(processing_time), 60)
processing_time, minutes = divmod(processing_time, 60)
days, hours = divmod(processing_time, 24)
if days == 0:
days_string = ''
elif days == 1:
days_string = '1 day, '
else:
days_string = '{0:d} days, '.format(days)
return '{0:s}{1:02d}:{2:02d}:{3:02d}'.format(
days_string, hours, minutes, seconds)
def _PrintAnalysisStatusHeader(self, processing_status):
"""Prints the analysis status header.
Args:
processing_status (ProcessingStatus): processing status.
"""
self._output_writer.Write(
'Storage file\t\t: {0:s}\n'.format(self._storage_file_path))
processing_time = self._FormatProcessingTime(processing_status)
self._output_writer.Write(
'Processing time\t\t: {0:s}\n'.format(processing_time))
if processing_status and processing_status.events_status:
self._PrintEventsStatus(processing_status.events_status)
self._output_writer.Write('\n')
def _PrintAnalysisStatusUpdateLinear(self, processing_status):
"""Prints an analysis status update in linear mode.
Args:
processing_status (ProcessingStatus): processing status.
"""
processing_time = self._FormatProcessingTime(processing_status)
self._output_writer.Write(
'Processing time: {0:s}\n'.format(processing_time))
status_line = (
'{0:s} (PID: {1:d}) status: {2:s}, events consumed: {3:d}\n').format(
processing_status.foreman_status.identifier,
processing_status.foreman_status.pid,
processing_status.foreman_status.status,
processing_status.foreman_status.number_of_consumed_events)
self._output_writer.Write(status_line)
for worker_status in processing_status.workers_status:
status_line = (
'{0:s} (PID: {1:d}) status: {2:s}, events consumed: {3:d}\n').format(
worker_status.identifier, worker_status.pid, worker_status.status,
worker_status.number_of_consumed_events)
self._output_writer.Write(status_line)
self._output_writer.Write('\n')
def _PrintAnalysisStatusUpdateWindow(self, processing_status):
"""Prints an analysis status update in window mode.
Args:
processing_status (ProcessingStatus): processing status.
"""
if self._stdout_output_writer:
self._ClearScreen()
output_text = 'plaso - {0:s} version {1:s}\n\n'.format(
self._tool_name, plaso.__version__)
self._output_writer.Write(output_text)
self._PrintAnalysisStatusHeader(processing_status)
table_view = views.CLITabularTableView(column_names=[
'Identifier', 'PID', 'Status', 'Memory', 'Events', 'Tags',
'Reports'], column_sizes=[23, 7, 15, 15, 15, 15, 0])
self._AddsAnalysisProcessStatusTableRow(
processing_status.foreman_status, table_view)
for worker_status in processing_status.workers_status:
self._AddsAnalysisProcessStatusTableRow(worker_status, table_view)
table_view.Write(self._output_writer)
self._output_writer.Write('\n')
if processing_status.aborted:
self._output_writer.Write(
'Processing aborted - waiting for clean up.\n\n')
if self._stdout_output_writer:
# We need to explicitly flush stdout to prevent partial status updates.
sys.stdout.flush()
def _PrintExtractionStatusUpdateLinear(self, processing_status):
"""Prints an extraction status update in linear mode.
Args:
processing_status (ProcessingStatus): processing status.
"""
processing_time = self._FormatProcessingTime(processing_status)
self._output_writer.Write(
'Processing time: {0:s}\n'.format(processing_time))
status_line = (
'{0:s} (PID: {1:d}) status: {2:s}, events produced: {3:d}, file: '
'{4:s}\n').format(
processing_status.foreman_status.identifier,
processing_status.foreman_status.pid,
processing_status.foreman_status.status,
processing_status.foreman_status.number_of_produced_events,
processing_status.foreman_status.display_name)
self._output_writer.Write(status_line)
for worker_status in processing_status.workers_status:
status_line = (
'{0:s} (PID: {1:d}) status: {2:s}, events produced: {3:d}, file: '
'{4:s}\n').format(
worker_status.identifier, worker_status.pid, worker_status.status,
worker_status.number_of_produced_events,
worker_status.display_name)
self._output_writer.Write(status_line)
self._output_writer.Write('\n')
def _PrintExtractionStatusUpdateWindow(self, processing_status):
"""Prints an extraction status update in window mode.
Args:
processing_status (ProcessingStatus): processing status.
"""
if self._stdout_output_writer:
self._ClearScreen()
output_text = 'plaso - {0:s} version {1:s}\n\n'.format(
self._tool_name, plaso.__version__)
self._output_writer.Write(output_text)
self.PrintExtractionStatusHeader(processing_status)
table_view = views.CLITabularTableView(column_names=[
'Identifier', 'PID', 'Status', 'Memory', 'Sources', 'Events',
'File'], column_sizes=[15, 7, 15, 15, 15, 15, 0])
self._AddExtractionProcessStatusTableRow(
processing_status.foreman_status, table_view)
for worker_status in processing_status.workers_status:
self._AddExtractionProcessStatusTableRow(worker_status, table_view)
table_view.Write(self._output_writer)
self._output_writer.Write('\n')
if processing_status.aborted:
self._output_writer.Write(
'Processing aborted - waiting for clean up.\n\n')
# TODO: remove update flicker. For win32console we could set the cursor
# top left, write the table, clean the remainder of the screen buffer
# and set the cursor at the end of the table.
if self._stdout_output_writer:
# We need to explicitly flush stdout to prevent partial status updates.
sys.stdout.flush()
def _PrintEventsStatus(self, events_status):
"""Prints the status of the events.
Args:
events_status (EventsStatus): events status.
"""
if events_status:
table_view = views.CLITabularTableView(
column_names=['Events:', 'Filtered', 'In time slice', 'Duplicates',
'MACB grouped', 'Total'],
column_sizes=[15, 15, 15, 15, 15, 0])
table_view.AddRow([
'', events_status.number_of_filtered_events,
events_status.number_of_events_from_time_slice,
events_status.number_of_duplicate_events,
events_status.number_of_macb_grouped_events,
events_status.total_number_of_events])
self._output_writer.Write('\n')
table_view.Write(self._output_writer)
def _PrintTasksStatus(self, processing_status):
"""Prints the status of the tasks.
Args:
processing_status (ProcessingStatus): processing status.
"""
if processing_status and processing_status.tasks_status:
tasks_status = processing_status.tasks_status
table_view = views.CLITabularTableView(
column_names=['Tasks:', 'Queued', 'Processing', 'Merging',
'Abandoned', 'Total'],
column_sizes=[15, 7, 15, 15, 15, 0])
table_view.AddRow([
'', tasks_status.number_of_queued_tasks,
tasks_status.number_of_tasks_processing,
tasks_status.number_of_tasks_pending_merge,
tasks_status.number_of_abandoned_tasks,
tasks_status.total_number_of_tasks])
self._output_writer.Write('\n')
table_view.Write(self._output_writer)
def GetAnalysisStatusUpdateCallback(self):
"""Retrieves the analysis status update callback function.
Returns:
function: status update callback function or None if not available.
"""
if self._mode == self.MODE_LINEAR:
return self._PrintAnalysisStatusUpdateLinear
if self._mode == self.MODE_WINDOW:
return self._PrintAnalysisStatusUpdateWindow
return None
def GetExtractionStatusUpdateCallback(self):
"""Retrieves the extraction status update callback function.
Returns:
function: status update callback function or None if not available.
"""
if self._mode == self.MODE_LINEAR:
return self._PrintExtractionStatusUpdateLinear
if self._mode == self.MODE_WINDOW:
return self._PrintExtractionStatusUpdateWindow
return None
# TODO: refactor to protected method.
def PrintExtractionStatusHeader(self, processing_status):
"""Prints the extraction status header.
Args:
processing_status (ProcessingStatus): processing status.
"""
self._output_writer.Write(
'Source path\t\t: {0:s}\n'.format(self._source_path))
self._output_writer.Write(
'Source type\t\t: {0:s}\n'.format(self._source_type))
if self._artifact_filters:
artifacts_string = ', '.join(self._artifact_filters)
self._output_writer.Write('Artifact filters\t: {0:s}\n'.format(
artifacts_string))
if self._filter_file:
self._output_writer.Write('Filter file\t\t: {0:s}\n'.format(
self._filter_file))
processing_time = self._FormatProcessingTime(processing_status)
self._output_writer.Write(
'Processing time\t\t: {0:s}\n'.format(processing_time))
self._PrintTasksStatus(processing_status)
self._output_writer.Write('\n')
def PrintExtractionSummary(self, processing_status):
"""Prints a summary of the extraction.
Args:
processing_status (ProcessingStatus): processing status.
"""
if not processing_status:
self._output_writer.Write(
'WARNING: missing processing status information.\n')
elif not processing_status.aborted:
if processing_status.error_path_specs:
self._output_writer.Write('Processing completed with errors.\n')
else:
self._output_writer.Write('Processing completed.\n')
number_of_warnings = (
processing_status.foreman_status.number_of_produced_warnings)
if number_of_warnings:
output_text = '\n'.join([
'',
('Number of warnings generated while extracting events: '
'{0:d}.').format(number_of_warnings),
'',
'Use pinfo to inspect warnings in more detail.',
''])
self._output_writer.Write(output_text)
if processing_status.error_path_specs:
output_text = '\n'.join([
'',
'Path specifications that could not be processed:',
''])
self._output_writer.Write(output_text)
for path_spec in processing_status.error_path_specs:
self._output_writer.Write(path_spec.comparable)
self._output_writer.Write('\n')
self._output_writer.Write('\n')
def SetMode(self, mode):
"""Sets the mode.
Args:
mode (str): status view mode.
"""
self._mode = mode
def SetSourceInformation(
self, source_path, source_type, artifact_filters=None, filter_file=None):
"""Sets the source information.
Args:
source_path (str): path of the source.
source_type (str): source type.
artifact_filters (Optional[list[str]]): names of artifact definitions to
use as filters.
filter_file (Optional[str]): filter file.
"""
self._artifact_filters = artifact_filters
self._filter_file = filter_file
self._source_path = source_path
self._source_type = self._SOURCE_TYPES.get(source_type, 'UNKNOWN')
def SetStorageFileInformation(self, storage_file_path):
"""Sets the storage file information.
Args:
storage_file_path (str): path to the storage file.
"""
self._storage_file_path = storage_file_path
|
apache-2.0
| -2,176,070,529,547,022,800
| 33.855556
| 80
| 0.660663
| false
| 3.797821
| false
| false
| false
|
ktneely/ir-scripts
|
VulnMgmt/Vuln-tickets.py
|
1
|
9151
|
#!/usr/bin/python3
# This takes an XML report extracted from an OpenVAS VA scanner and
# creates issue tickets on ServiceNow and Redmine systems for tracking
# purposes.
#
# Most parameters are specified in the 'ov_prefs.txt' file, however,
# the XML report file may be specified on the command line. If
# specified this way, the script will ignore that line in the
# preferences file, however, the line must still exist!
# version 0.5
#modules
import os
import sys
import csv
import json
import socket
import requests
from redmine import Redmine
import xml.etree.ElementTree as ET
## Configure your environment through preferences file
# load prefs from ~/.incmgmt/prefs.txt
# The parameters should be in the following format
# DO NOT use comments or blank lines.
# Redmine Project
# Redmine URL
# Redmine API key
# ServiceNow URL
# ServiceNow username
# Servicenow password
# severity level
# OpenVAS XML report file
# Preamble: general info you want included in every ticket created
os.chdir(os.path.expanduser("~") + "/.incmgmt/")
prefs = []
for line in open('ov_prefs.txt'):
prefs.append(line)
redmine_project = prefs[0].rstrip()
redmine_server = prefs[1].rstrip()
redmine_key = prefs[2].rstrip()
sn_server = prefs[3].rstrip()
user = prefs[4].rstrip()
pwd = prefs[5].rstrip()
severity_filter = prefs[6].rstrip()
if len(sys.argv) == 1: # test for command line arguments
ov_report = prefs[7].rstrip()
else:
ov_report = sys.argv[1]
preamble = prefs[8].rstrip()
# Define service now headers
headers = {"Content-Type":"application/json","Accept":"application/json"}
# Input the vulnerability report and parse the XML
root = ET.parse(ov_report)
## determine criticality factors
# impact and urgency are used for Service Now
# priority is used for Redmine
def criticality(cvss):
global impact
global urgency
global priority
if float(cvss) > 7:
impact = 2
urgency = 1
priority = 5
elif float(cvss) < 4:
impact = 3
urgency = 3
priority = 3
else:
impact = 2
urgency = 2
priority = 4
return impact, urgency, priority
def reverse_lookup(ip):
try:
hostname = socket.gethostbyaddr(ip)[0]
except socket.herror:
hostname = " "
return hostname
## determine category
""" Redmine reference
0 nothing
53 Database
54 Networking
56 Server - Unix
55 Server - Windows
57 Web Application """
## Function to categorize the issue for all ticketing systems
# categoy is used for redmine, and subcategory is used for
# ServiceNow because it has a default high-level category for vulns
def categorize(family):
if family == "Web application abuses" or "Web Servers":
category = 57
subcategory = "Internal Application"
elif family == "Databases":
category = 53
subcategory = "Internal Application"
elif family == "General":
category = 56
subcategory = "UNIX"
elif "CentOS" in family:
category = 56
subcategory = "UNIX"
elif "Windows" in family:
category = 55
subcategory = "Windows"
else:
category = 0
subcategory = " "
return category, subcategory
#Specify Redmine server params
redmine = Redmine(redmine_server, requests={'verify': False}, key=redmine_key, version='2.5.1')
def redmine_issue(priority, subject, body, category):
## Create an issue in Redmine to track the vulnerability
# and return information regarding the created ticket
new_issue = redmine.issue.create(project_id = redmine_project, \
priority_id = priority, subject = subject, description = body,\
tracker_id=19, category_id = category)
redmine_issue_id = str(new_issue.id)
redmine_url = redmine_server + "/issues/" + redmine_issue_id
print("redmine ticket created")
return redmine_url, redmine_issue_id
def sn_issue(subject, redmine_url, subcategory, impact, urgency):
## Create the incident in ServiceNow
# Construct the incident JSON object
incident_data = '{' + \
'"short_description":' + '"' + subject + '",' + \
'"description":' + '"For more information, see: ' + redmine_url + '",' + \
'"u_category":' + '"Vulnerability Management",' + \
'"u_subcategory":' + '"' + subcategory + '",' + \
'"impact":' + '"' + str(impact) + '",' + \
'"urgency":' + '"' + str(urgency) + '",' + \
'"contact_type":"Alert"' + '}'
# Create the incident on the Service Now system
response = requests.post(sn_server, auth=(user, pwd), \
headers=headers, data=incident_data)
# Capture the ticket number and unique identifier
sn_ticket = response.json()['result']['number']
sys_id = response.json()['result']['sys_id']
print("service now ticket created")
return sn_ticket, sys_id
# Update the Service Now ticket with a comment
def sn_update(sys_id, comment):
sn_url = sn_server + '/' + sys_id # REST URL for the ticket
update = requests.patch(sn_url, auth=(user, pwd), headers=headers,\
data='{"comments":"' + comment +'"}')
if update.status_code != 200:
print('Status:', response.status_code, 'Headers:',\
response.headers, 'Error Response:',response.json())
exit()
print("Updated Service Now ticket" + " " + sys_id) # user output
# checks for a ticket with the exact same "subject" or "short
# description" on the Redmine system.
def CheckTickets(subject):
i = 0
project = redmine.project.get(redmine_project)
while i < len(project.issues):
# print("Checking: " + str(project.issues[i]))
if str(project.issues[i]) == subject:
incident_id = project.issues[i].id
opentix_log = csv.reader(open('opentix.csv'))
# Generate a dictionary of the known open tickets. This
# should really be performed at the beginning so it
# doesn't run everytime, but meh!
tix_dict = {}
for row in opentix_log:
tix_dict[row[0]]=row[2]
sn_sysid = tix_dict[str(incident_id)]
print("Found match: " + tix_dict[str(incident_id)] + " " + str(project.issues[i])) # debug
return sn_sysid # return a value for test
i += 1
return None # if the test fails, return nothing
def log(redmine_issue_id, sn_ticket, sys_id, redmine_url):
# Write log file of tickets created
ticket_log = open('ticketlog.csv','a')
opentix_log = open('opentix.csv','a')
ticket_log.write(redmine_issue_id + ',' + sn_ticket + ',' + \
sys_id + ',' + redmine_url + ',' + '\n')
opentix_log.write(redmine_issue_id + ',' + sn_ticket + ',' + \
sys_id + '\n')
ticket_log.close()
opentix_log.close()
## Main program. Extract the data, then call functions
# Extract elements from the XML for use in creating the ticket
for result in root.findall("./report/results/result"):
# only process vulnerabilities of a certain severity or higher
if result.find('overrides/override/new_severity') is not None:
cvss = result.find('overrides/override/new_severity').text
else:
cvss = result.find('severity').text
if float(cvss) >= float(severity_filter):
# Extract the elements from the XML
host_ip = result.find('host').text
severity = result.find('severity').text
if result.find('description').text is not None:
description = result.find('description').text
else:
description = "no extended description available"
short_desc = result.find('nvt/name').text
cvss = result.find('nvt/cvss_base').text
cve = result.find('nvt/cve').text
system_type = result.find('nvt/family')
# get some additional info based on extracted values
hostname = reverse_lookup(host_ip) # perform name lookup
impact, urgency, priority = criticality(severity)
category, subcategory = categorize(system_type)
full_desc = result.find('nvt/tags').text
criticality(cvss) # calc criticality levels
subject = short_desc + " detected on " + hostname + " " + host_ip
# Create the body of the ticket by combining multiple elements from
# the report file.
body = preamble + "\n \n" + full_desc + "\n \n CVEs:" + cve +\
"\n \n Description: \n" + description
# Check for currently active ticket for same issue. This
previous = CheckTickets(subject)
# Create a new ticket if one does not exist.
if previous is not None:
sn_update(previous, "Please provide an update for this ticket")
else:
# create the issues in redmine and return info
redmine_url, redmine_issue_id = redmine_issue(priority, \
subject, body, category)
# create the issues in ServiceNow and return info
sn_ticket, sys_id = sn_issue(subject, redmine_url, \
subcategory, impact, urgency)
log (redmine_issue_id, sn_ticket, sys_id, redmine_url)
|
gpl-3.0
| 3,450,388,158,392,082,000
| 35.899194
| 103
| 0.635013
| false
| 3.758111
| false
| false
| false
|
easyw/kicad-3d-models-in-freecad
|
cadquery/FCAD_script_generator/4UCON_17809/cq_models/conn_4ucon_17809.py
|
1
|
10895
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CadQuery script to generate connector models
## requirements
## freecad (v1.5 and v1.6 have been tested)
## cadquery FreeCAD plugin (v0.3.0 and v0.2.0 have been tested)
## https://github.com/jmwright/cadquery-freecad-module
## This script can be run from within the cadquery module of freecad.
## To generate VRML/ STEP files for, use export_conn_jst_xh
## script of the parent directory.
#* This is a cadquery script for the generation of MCAD Models. *
#* *
#* Copyright (c) 2016 *
#* Rene Poeschl https://github.com/poeschlr *
#* All trademarks within this guide belong to their legitimate owners. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU General Public License (GPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., *
#* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA *
#* *
#* The models generated with this script add the following exception: *
#* As a special exception, if you create a design which uses this symbol, *
#* and embed this symbol or unaltered portions of this symbol into the *
#* design, this symbol does not by itself cause the resulting design to *
#* be covered by the GNU General Public License. This exception does not *
#* however invalidate any other reasons why the design itself might be *
#* covered by the GNU General Public License. If you modify this symbol, *
#* you may extend this exception to your version of the symbol, but you *
#* are not obligated to do so. If you do not wish to do so, delete this *
#* exception statement from your version. *
#****************************************************************************
__title__ = "model description for 4UCON 17809 series connectors"
__author__ = "hackscribble"
__Comment__ = 'model description for 4UCON 17809 series connectors using cadquery'
___ver___ = "0.3 18/06/2020"
import cadquery as cq
from Helpers import show
from collections import namedtuple
import FreeCAD
from conn_4ucon_17809_params import *
from ribbon import Ribbon
def generate_straight_pin(params, pin_1_side):
foot_height = seriesParams.foot_height
pin_width=seriesParams.pin_width
pin_depth=seriesParams.pin_depth
pin_height=seriesParams.pin_height
pin_inside_distance=seriesParams.pin_inside_distance
pin_thickness = seriesParams.pin_thickness
chamfer_long = seriesParams.pin_chamfer_long
chamfer_short = seriesParams.pin_chamfer_short
sign = 1 if pin_1_side else -1
pin=cq.Workplane("YZ").workplane(offset=-pin_width/2.0)\
.moveTo(0, foot_height)\
.line(sign*pin_thickness/2,0)\
.line(sign*1.27,-foot_height)\
.line(0, -2.54)\
.line(sign*-pin_thickness,0)\
.line(0, 2.54)\
.line(sign*-1.27, foot_height)\
.line(0,1)\
.close()\
.extrude(pin_width).edges("|X").fillet(0.07)
return pin
def generate_2_pin_group(params, pin_1_side):
pin_pitch=params.pin_pitch
pin_y_pitch=params.pin_y_pitch
num_pins=params.num_pins
pin_a = generate_straight_pin(params, pin_1_side).translate((0, -pin_y_pitch/2, 0))
pin_b = pin_a.translate((0, -2 * pin_y_pitch, 0))
pin_group = pin_a.union(pin_b)
return pin_group
def generate_pins(params):
pin_pitch=params.pin_pitch
num_pins=params.num_pins
pins = generate_2_pin_group(params, pin_1_side=True)
for i in range(1, num_pins // 2):
pins = pins.union(generate_2_pin_group(params, i % 2 == 0).translate((i*pin_pitch,0,0)))
return pins
def generate_2_contact_group(params):
pin_y_pitch=params.pin_y_pitch
foot_height = seriesParams.foot_height
pin_thickness = seriesParams.pin_thickness
pin_width=seriesParams.pin_width
y_offset = -(2*pin_y_pitch)
c_list = [
('start', {'position': (pin_y_pitch, foot_height), 'direction': 90.0, 'width':pin_thickness}),
('line', {'length': 4.5}),
('arc', {'radius': 0.2, 'angle': 35.0}),
('line', {'length': 3}),
('arc', {'radius': 2.0, 'angle': -70.0}),
('line', {'length': 2}),
('arc', {'radius': 0.2, 'angle': 35.0}),
('line', {'length': 2.8}),
]
ribbon = Ribbon(cq.Workplane("YZ").workplane(offset=-pin_width/2.0), c_list)
contact1 = ribbon.drawRibbon().extrude(pin_width)
contact2 = contact1.mirror("XZ")
contact1 = contact1.union(contact2).translate((0,-3*pin_y_pitch/2.0,0))
return contact1
def generate_contacts(params):
num_pins=params.num_pins
pin_pitch=params.pin_pitch
pair = generate_2_contact_group(params)
contacts = pair
for i in range(0, num_pins // 2):
contacts = contacts.union(pair.translate((i*pin_pitch,0,0)))
return contacts
def generate_body(params, calc_dim):
pin_inside_distance = seriesParams.pin_inside_distance
pin_width = seriesParams.pin_width
num_pins = params.num_pins
pin_pitch = params.pin_pitch
pin_y_pitch=params.pin_y_pitch
body_length = calc_dim.length
body_width = seriesParams.body_width
body_height = seriesParams.body_height
body_fillet_radius = seriesParams.body_fillet_radius
marker_x_inside = seriesParams.marker_x_inside
marker_y_inside = seriesParams.marker_y_inside
marker_size = seriesParams.marker_size
marker_depth = seriesParams.marker_depth
foot_height = seriesParams.foot_height
foot_width = seriesParams.foot_width
foot_length = seriesParams.foot_length
foot_inside_distance = seriesParams.foot_inside_distance
slot_length = calc_dim.slot_length
slot_outside_pin = seriesParams.slot_outside_pin
slot_width = seriesParams.slot_width
slot_depth = seriesParams.slot_depth
slot_chamfer = seriesParams.slot_chamfer
hole_width = seriesParams.hole_width
hole_length = seriesParams.hole_length
hole_offset = seriesParams.hole_offset
hole_depth = seriesParams.hole_depth
top_void_depth = seriesParams.top_void_depth
top_void_width = seriesParams.top_void_width
bottom_void_width = calc_dim.bottom_void_width
recess_depth = seriesParams.recess_depth
recess_large_width = seriesParams.recess_large_width
recess_small_width = seriesParams.recess_small_width
recess_height = seriesParams.recess_height
x_offset = (((num_pins // 2) - 1)*pin_pitch)/2.0
y_offset = -(1.5*pin_y_pitch)
# body
body = cq.Workplane("XY").workplane(offset=foot_height).moveTo(x_offset, y_offset)\
.rect(body_length, body_width).extrude(body_height)\
.edges("|Z").fillet(body_fillet_radius).edges(">Z").fillet(body_fillet_radius)
# pin 1 marker
body = body.faces(">Z").workplane().moveTo(-(body_length/2)+marker_x_inside, (body_width/2)-marker_y_inside)\
.line(-marker_size,-marker_size/2).line(0, marker_size).close().cutBlind(-marker_depth)
# foot
foot = cq.Workplane("YZ").workplane(offset=(body_length/2)-foot_inside_distance)\
.moveTo(y_offset - foot_length/2, 0)\
.line(foot_length*0.2,0)\
.line(0,foot_height/2)\
.line(foot_length*0.6,0)\
.line(0,-foot_height/2)\
.line(foot_length*0.2,0)\
.line(0,foot_height)\
.line(-foot_length,0)\
.close()\
.extrude(-foot_width)
foot_mirror = foot.mirror("YZ")
foot = foot.union(foot_mirror).translate((x_offset, 0, 0))
body = body.union(foot)
# slot
body = body.faces(">Z").workplane().rect(slot_length, slot_width).cutBlind(-slot_depth)
chamfer = cq.Workplane("XY").workplane(offset=foot_height+body_height).moveTo(x_offset, y_offset) \
.rect(slot_length+2*slot_chamfer, slot_width+2*slot_chamfer) \
.workplane(offset=-slot_chamfer).rect(slot_length, slot_width) \
.loft(combine=True)
body = body.cut(chamfer)
# contact holes
body = body.faces(">Z").workplane().center(0, hole_offset)\
.rarray(pin_pitch, 1, (num_pins//2), 1).rect(hole_width, hole_length)\
.center(0, -2*hole_offset)\
.rarray(pin_pitch, 1, (num_pins//2), 1).rect(hole_width, hole_length)\
.cutBlind(-2)
# internal void
body = body.faces(">Z").workplane(offset=-hole_depth)\
.rarray(pin_pitch, 1, (num_pins//2), 1).rect(hole_width, top_void_width)\
.cutBlind(-(top_void_depth-hole_depth))
body = body.faces(">Z").workplane(offset=-top_void_depth)\
.rarray(pin_pitch, 1, (num_pins//2), 1).rect(hole_width, bottom_void_width)\
.cutBlind(-(body_height-top_void_depth))
# body end recesses
body = body.faces(">Z").workplane().center(body_length/2.0-recess_depth/2.0, 0)\
.rect(recess_depth, recess_small_width).cutBlind(-recess_height)
recess = cq.Workplane("XY").workplane(offset=foot_height+body_height).center(x_offset-body_length/2.0+recess_depth/2.0, y_offset)\
.rect(recess_depth, recess_large_width).extrude(-recess_height).edges(">X").edges("|Z").fillet(0.3)
body = body.cut(recess)
return body
def generate_part(part_key):
params = all_params[part_key]
calc_dim = dimensions(params)
pins = generate_pins(params)
body = generate_body(params, calc_dim)
contacts = generate_contacts(params)
return (pins, body, contacts)
# opened from within freecad
if "module" in __name__:
part_to_build = 'ucon_17809_02x10_1.27mm'
FreeCAD.Console.PrintMessage("Started from CadQuery: building " +
part_to_build + "\n")
(pins, body, contacts) = generate_part(part_to_build)
show(pins)
show(body)
show(contacts)
|
gpl-2.0
| 7,542,962,034,964,685,000
| 38.908425
| 134
| 0.616246
| false
| 3.378295
| false
| false
| false
|
lavizhao/keyword
|
data_analysis/ana_words.py
|
1
|
1087
|
#coding: utf-8
import sys
def morethan(keyword,n):
"""
Arguments:
- `keyword`:
- `n`:
"""
ans = 0
for line in keyword:
if len(line.split()) - 1 <= n :
ans += 1
print "少余%s的词占总的百分比为%s"%(n,1.0*ans/len(keyword))
def aw(kf,nc):
"""
"""
f = open(kf)
print kf
keyword = f.readlines()
print "总关键词长度:",len(keyword)
morethan(keyword,1000)
morethan(keyword,100)
morethan(keyword,10)
morethan(keyword,5)
morethan(keyword,2)
twf = open(nc,"w")
a = 0
for line in keyword:
if len(line.split()) - 1 <= 200 :
twf.write(line)
a += 1
print "处理后词表长度",a
twf.close()
def usage():
"""
"""
print '''
计数文件:
python ana_words.py ../data/counting.txt ../data/new_counting.txt
'''
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv)!= 3:
usage()
kf = sys.argv[1]
nc = sys.argv[2]
#analysis words
aw(kf,nc)
|
apache-2.0
| -7,799,968,741,795,487,000
| 16.40678
| 71
| 0.484907
| false
| 2.798365
| false
| false
| false
|
a10networks/a10sdk-python
|
a10sdk/core/waf/waf_wsdl.py
|
2
|
1276
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Wsdl(A10BaseClass):
"""Class Description::
Manage Web Services Definition Language files.
Class wsdl supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param max_filesize: {"description": "Set maximum WSDL file size (Maximum file size in KBytes, default is 32K)", "partition-visibility": "shared", "default": 32, "optional": true, "format": "number", "maximum": 256, "minimum": 16, "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/waf/wsdl`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "wsdl"
self.a10_url="/axapi/v3/waf/wsdl"
self.DeviceProxy = ""
self.uuid = ""
self.max_filesize = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
apache-2.0
| 6,820,183,366,621,144,000
| 33.486486
| 252
| 0.631661
| false
| 3.687861
| false
| false
| false
|
jasonrbriggs/stomp.py
|
tests/test_basic.py
|
1
|
8361
|
import signal
from time import monotonic
import stomp
from stomp.listener import TestListener
from .testutils import *
@pytest.fixture()
def testlistener():
yield TestListener("123", print_to_log=True)
@pytest.fixture()
def conn(testlistener):
conn = stomp.Connection11(get_default_host())
conn.set_listener("testlistener", testlistener)
conn.connect(get_default_user(), get_default_password(), wait=True)
yield conn
conn.disconnect(receipt=None)
@pytest.fixture()
def invalidconn(testlistener):
conn = stomp.Connection([("192.0.2.0", 60000)], timeout=5, reconnect_attempts_max=1)
conn.set_listener("testlistener", testlistener)
yield conn
class TestBasic(object):
def test_subscribe_and_send(self, conn, testlistener):
queuename = "/queue/test1-%s" % testlistener.timestamp
conn.subscribe(destination=queuename, id=1, ack="auto")
conn.send(body='{"val": "this is a test"}', destination=queuename,
content_type="application/json", receipt="123")
validate_send(conn)
(headers, body) = testlistener.get_latest_message()
assert "content-type" in headers
assert headers["content-type"] == "application/json"
def test_default_to_localhost(self):
conn = stomp.Connection()
listener = TestListener("123", print_to_log=True)
queuename = "/queue/test1-%s" % listener.timestamp
conn.set_listener("testlistener", listener)
conn.connect(get_rabbitmq_user(), get_rabbitmq_password(), wait=True)
conn.send(body="this is a test", destination=queuename, receipt="123")
conn.disconnect(receipt=None)
def test_commit(self, conn):
timestamp = time.strftime("%Y%m%d%H%M%S")
queuename = "/queue/test2-%s" % timestamp
conn.subscribe(destination=queuename, id=1, ack="auto")
trans_id = conn.begin()
conn.send(body="this is a test1", destination=queuename, transaction=trans_id)
conn.send(body="this is a test2", destination=queuename, transaction=trans_id)
conn.send(body="this is a test3", destination=queuename, transaction=trans_id, receipt="123")
time.sleep(3)
listener = conn.get_listener("testlistener")
assert listener.connections == 1, "should have received 1 connection acknowledgement"
assert listener.messages == 0, "should not have received any messages"
conn.commit(transaction=trans_id)
listener.wait_for_message()
time.sleep(3)
assert listener.messages == 3, "should have received 3 messages"
assert listener.errors == 0, "should not have received any errors"
def test_abort(self, conn):
timestamp = time.strftime("%Y%m%d%H%M%S")
queuename = "/queue/test3-%s" % timestamp
conn.subscribe(destination=queuename, id=1, ack="auto")
trans_id = conn.begin()
conn.send(body="this is a test1", destination=queuename, transaction=trans_id)
conn.send(body="this is a test2", destination=queuename, transaction=trans_id)
conn.send(body="this is a test3", destination=queuename, transaction=trans_id)
time.sleep(3)
listener = conn.get_listener("testlistener")
assert listener.connections == 1, "should have received 1 connection acknowledgement"
assert listener.messages == 0, "should not have received any messages"
conn.abort(transaction=trans_id)
time.sleep(3)
assert listener.messages == 0, "should not have received any messages"
assert listener.errors == 0, "should not have received any errors"
def test_timeout(self, invalidconn):
ms = monotonic()
try:
invalidconn.connect("test", "test")
pytest.fail("shouldn't happen")
except stomp.exception.ConnectFailedException:
pass # success!
ms = monotonic() - ms
assert ms > 5.0, "connection timeout should have been at least 5 seconds"
def test_childinterrupt(self, conn):
def childhandler(signum, frame):
print("received child signal")
oldhandler = signal.signal(signal.SIGCHLD, childhandler)
timestamp = time.strftime("%Y%m%d%H%M%S")
queuename = "/queue/test5-%s" % timestamp
conn.subscribe(destination=queuename, id=1, ack="auto", receipt="123")
listener = conn.get_listener("testlistener")
listener.wait_on_receipt()
conn.send(body="this is an interrupt test 1", destination=queuename)
print("causing signal by starting child process")
os.system("sleep 1")
time.sleep(1)
signal.signal(signal.SIGCHLD, oldhandler)
print("completed signal section")
conn.send(body="this is an interrupt test 2", destination=queuename, receipt="123")
listener.wait_for_message()
assert listener.connections == 1, "should have received 1 connection acknowledgment"
assert listener.errors == 0, "should not have received any errors"
assert conn.is_connected(), "should still be connected to STOMP provider"
def test_clientack(self, conn):
timestamp = time.strftime("%Y%m%d%H%M%S")
queuename = "/queue/testclientack-%s" % timestamp
conn.subscribe(destination=queuename, id=1, ack="client")
conn.send(body="this is a test", destination=queuename, receipt="123")
listener = conn.get_listener("testlistener")
listener.wait_for_message()
(headers, _) = listener.get_latest_message()
message_id = headers["message-id"]
subscription = headers["subscription"]
conn.ack(message_id, subscription)
def test_clientnack(self, conn):
timestamp = time.strftime("%Y%m%d%H%M%S")
queuename = "/queue/testclientnack-%s" % timestamp
conn.subscribe(destination=queuename, id=1, ack="client")
conn.send(body="this is a test", destination=queuename, receipt="123")
listener = conn.get_listener("testlistener")
listener.wait_for_message()
(headers, _) = listener.get_latest_message()
message_id = headers["message-id"]
subscription = headers["subscription"]
conn.nack(message_id, subscription)
def test_specialchars(self, conn):
timestamp = time.strftime("%Y%m%d%H%M%S")
queuename = "/queue/testspecialchars-%s" % timestamp
conn.subscribe(destination=queuename, id=1, ack="client")
hdrs = {
"special-1": "test with colon : test",
"special-2": "test with backslash \\ test",
"special-3": "test with newline \n"
}
conn.send(body="this is a test", headers=hdrs, destination=queuename, receipt="123")
listener = conn.get_listener("testlistener")
listener.wait_for_message()
(headers, _) = listener.get_latest_message()
_ = headers["message-id"]
_ = headers["subscription"]
assert "special-1" in headers
assert "test with colon : test" == headers["special-1"]
assert "special-2" in headers
assert "test with backslash \\ test" == headers["special-2"]
assert "special-3" in headers
assert "test with newline \n" == headers["special-3"]
def test_host_bind_port(self):
conn = stomp.Connection(bind_host_port=("localhost", next_free_port()))
listener = TestListener("981", print_to_log=True)
queuename = "/queue/testbind-%s" % listener.timestamp
conn.set_listener("testlistener", listener)
conn.connect(get_rabbitmq_user(), get_rabbitmq_password(), wait=True)
conn.send(body="this is a test using local bind port", destination=queuename, receipt="981")
conn.disconnect(receipt=None)
class TestConnectionErrors(object):
def test_connect_wait_error(self):
conn = stomp.Connection(get_default_host())
try:
conn.connect("invalid", "user", True)
pytest.fail("Shouldn't happen")
except:
pass
def test_connect_nowait_error(self):
conn = stomp.Connection(get_default_host())
try:
conn.connect("invalid", "user", False)
assert not conn.is_connected(), "Should not be connected"
except:
pytest.fail("Shouldn't happen")
|
apache-2.0
| -6,242,501,988,846,949,000
| 35.995575
| 101
| 0.640115
| false
| 3.849448
| true
| false
| false
|
phiros/nepi
|
src/nepi/resources/linux/ns3/ccn/ns3ccnrdceapplication.py
|
1
|
9256
|
#
# NEPI, a framework to manage network experiments
# Copyright (C) 2014 INRIA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.linux.ns3.ccn.ns3ccndceapplication import LinuxNS3CCNDceApplication
@clsinit_copy
class LinuxNS3DceCCNR(LinuxNS3CCNDceApplication):
_rtype = "linux::ns3::dce::CCNR"
@classmethod
def _register_attributes(cls):
max_fanout = Attribute("maxFanout",
"Sets the CCNR_BTREE_MAX_FANOUT environmental variable. ",
flags = Flags.Design)
max_leaf_entries = Attribute("maxLeafEntries",
"Sets the CCNR_BTREE_MAX_LEAF_ENTRIES environmental variable. ",
flags = Flags.Design)
max_node_bytes = Attribute("maxNodeBytes",
"Sets the CCNR_BTREE_MAX_NODE_BYTES environmental variable. ",
flags = Flags.Design)
max_node_pool = Attribute("maxNodePool",
"Sets the CCNR_BTREE_MAX_NODE_POOL environmental variable. ",
flags = Flags.Design)
content_cache = Attribute("contentCache",
"Sets the CCNR_CONTENT_CACHE environmental variable. ",
flags = Flags.Design)
debug = Attribute("debug",
"Sets the CCNR_DEBUG environmental variable. "
"Logging level for ccnr. Defaults to WARNING.",
type = Types.Enumerate,
allowed = [
"NONE",
"SEVERE",
"ERROR",
"WARNING",
"INFO",
"FINE, FINER, FINEST"],
flags = Flags.Design)
directory = Attribute("directory",
"Sets the CCNR_DIRECTORY environmental variable. ",
flags = Flags.Design)
global_prefix = Attribute("globalPrefix",
"Sets the CCNR_GLOBAL_PREFIX environmental variable. ",
flags = Flags.Design)
listen_on = Attribute("listenOn",
"Sets the CCNR_LISTEN_ON environmental variable. ",
flags = Flags.Design)
min_send_bufsize = Attribute("minSendBufsize",
"Sets the CCNR_MIN_SEND_BUFSIZE environmental variable. ",
flags = Flags.Design)
proto = Attribute("proto",
"Sets the CCNR_PROTO environmental variable. ",
flags = Flags.Design)
status_port = Attribute("statusPort",
"Sets the CCNR_STATUS_PORT environmental variable. ",
flags = Flags.Design)
start_write_scope_limit = Attribute("startWriteScopeLimit",
"Sets the CCNR_START_WRITE_SCOPE_LIMIT environmental variable. ",
flags = Flags.Design)
ccns_debug = Attribute("ccnsDebug",
"Sets the CCNS_DEBUG environmental variable. ",
flags = Flags.Design)
ccns_enable = Attribute("ccnsEnable",
"Sets the CCNS_ENABLE environmental variable. ",
flags = Flags.Design)
ccns_faux_error = Attribute("ccnsFauxError",
"Sets the CCNS_FAUX_ERROR environmental variable. ",
flags = Flags.Design)
ccns_heartbeat_micros = Attribute("ccnsHeartBeatMicros",
"Sets the CCNS_HEART_BEAT_MICROS environmental variable. ",
flags = Flags.Design)
ccns_max_compares_busy = Attribute("ccnsMaxComparesBusy",
"Sets the CCNS_MAX_COMPARES_BUSY environmental variable. ",
flags = Flags.Design)
ccns_max_fetch_busy = Attribute("ccnsMaxFetchBusy",
"Sets the CCNS_MAX_FETCH_BUSY environmental variable. ",
flags = Flags.Design)
ccns_node_fetch_lifetime = Attribute("ccnsNodeFetchLifetime",
"Sets the CCNS_NODE_FETCH_LIFETIME environmental variable. ",
flags = Flags.Design)
ccns_note_err = Attribute("ccnsNoteErr",
"Sets the CCNS_NOTE_ERR environmental variable. ",
flags = Flags.Design)
ccns_repo_store = Attribute("ccnsRepoStore",
"Sets the CCNS_REPO_STORE environmental variable. ",
flags = Flags.Design)
ccns_root_advise_fresh = Attribute("ccnsRootAdviseFresh",
"Sets the CCNS_ROOT_ADVISE_FRESH environmental variable. ",
flags = Flags.Design)
ccns_root_advise_lifetime = Attribute("ccnsRootAdviseLifetime",
"Sets the CCNS_ROOT_ADVISE_LIFETIME environmental variable. ",
flags = Flags.Design)
ccns_stable_enabled = Attribute("ccnsStableEnabled",
"Sets the CCNS_STABLE_ENABLED environmental variable. ",
flags = Flags.Design)
ccns_sync_scope = Attribute("ccnsSyncScope",
"Sets the CCNS_SYNC_SCOPE environmental variable. ",
flags = Flags.Design)
repo_file = Attribute("repoFile1",
"The Repository uses $CCNR_DIRECTORY/repoFile1 for "
"persistent storage of CCN Content Objects",
flags = Flags.Design)
cls._register_attribute(max_fanout)
cls._register_attribute(max_leaf_entries)
cls._register_attribute(max_node_bytes)
cls._register_attribute(max_node_pool)
cls._register_attribute(content_cache)
cls._register_attribute(debug)
cls._register_attribute(directory)
cls._register_attribute(global_prefix)
cls._register_attribute(listen_on)
cls._register_attribute(min_send_bufsize)
cls._register_attribute(proto)
cls._register_attribute(status_port)
cls._register_attribute(start_write_scope_limit)
cls._register_attribute(ccns_debug)
cls._register_attribute(ccns_enable)
cls._register_attribute(ccns_faux_error)
cls._register_attribute(ccns_heartbeat_micros)
cls._register_attribute(ccns_max_compares_busy)
cls._register_attribute(ccns_max_fetch_busy)
cls._register_attribute(ccns_node_fetch_lifetime)
cls._register_attribute(ccns_note_err)
cls._register_attribute(ccns_repo_store)
cls._register_attribute(ccns_root_advise_fresh)
cls._register_attribute(ccns_root_advise_lifetime)
cls._register_attribute(ccns_stable_enabled)
cls._register_attribute(ccns_sync_scope)
cls._register_attribute(repo_file)
def _instantiate_object(self):
if not self.get("binary"):
self.set("binary", "ccnr")
if not self.get("environment"):
self.set("environment", self._environment)
repoFile1 = self.get("repoFile1")
if repoFile1:
env = "CCNR_DIRECTORY=/REPO/"
environment = self.get("environment")
if environment:
env += ";" + environment
self.set("environment", env)
self.set("files", "%s=/REPO/repoFile1" % repoFile1)
super(LinuxNS3DceCCNR, self)._instantiate_object()
@property
def _environment(self):
envs = dict({
"maxFanout": "CCNR_BTREE_MAX_FANOUT",
"maxLeafEntries": "CCNR_BTREE_MAX_LEAF_ENTRIES",
"maxNodeBytes": "CCNR_BTREE_MAX_NODE_BYTES",
"maxNodePool": "CCNR_BTREE_MAX_NODE_POOL",
"contentCache": "CCNR_CONTENT_CACHE",
"debug": "CCNR_DEBUG",
"directory": "CCNR_DIRECTORY",
"globalPrefix": "CCNR_GLOBAL_PREFIX",
"listenOn": "CCNR_LISTEN_ON",
"minSendBufsize": "CCNR_MIN_SEND_BUFSIZE",
"proto": "CCNR_PROTO",
"statusPort": "CCNR_STATUS_PORT",
"startWriteScopeLimit": "CCNR_START_WRITE_SCOPE_LIMIT",
"ccnsDebug": "CCNS_DEBUG",
"ccnsEnable": "CCNS_ENABLE",
"ccnsFauxError": "CCNS_FAUX_ERROR",
"ccnsHeartBeatMicros": "CCNS_HEART_BEAT_MICROS",
"ccnsMaxComparesBusy": "CCNS_MAX_COMPARES_BUSY",
"ccnsMaxFetchBusy": "CCNS_MAX_FETCH_BUSY",
"ccnsNodeFetchLifetime": "CCNS_NODE_FETCH_LIFETIME",
"ccnsNoteErr": "CCNS_NOTE_ERR",
"ccnsRepoStore": "CCNS_REPO_STORE",
"ccnsRootAdviseFresh": "CCNS_ROOT_ADVISE_FRESH",
"ccnsRootAdviseLifetime": "CCNS_ROOT_ADVISE_LIFETIME",
"ccnsStableEnabled": "CCNS_STABLE_ENABLED",
"ccnsSyncScope": "CCNS_SYNC_SCOPE",
})
env = ";".join(map(lambda k: "%s=%s" % (envs.get(k), str(self.get(k))),
[k for k in envs.keys() if self.get(k)]))
return env
|
gpl-3.0
| 6,438,432,219,293,200,000
| 39.243478
| 87
| 0.611927
| false
| 3.782591
| false
| false
| false
|
cswaney/hfttools
|
prickle/core.py
|
1
|
65651
|
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import struct
import h5py
import time
import os
class Database():
"""Connection to an HDF5 database storing message and order book data.
Parameters
----------
path : string
Specifies location of the HDF5 file
names : list
Contains the stock tickers to include in the database
nlevels : int
Specifies the number of levels to include in the order book data
"""
def __init__(self, path, names, nlevels, method):
self.method = method
if self.method == 'hdf5':
try:
self.file = h5py.File(path, 'r+') # read/write, file must exist
print('Appending existing HDF5 file.')
for name in names:
if name in self.file['messages'].keys():
print('Overwriting message data for {}'.format(name))
del self.file['messages'][name]
if name in self.file['orderbooks'].keys():
print('Overwriting orderbook data for {}'.format(name))
del self.file['orderbooks'][name]
if name in self.file['trades'].keys():
print('Overwriting trades data for {}'.format(name))
del self.file['trades'][name]
if name in self.file['noii'].keys():
print('Overwriting noii data for {}'.format(name))
del self.file['noii'][name]
except OSError as e:
print('HDF5 file does not exist. Creating a new one.')
self.file = h5py.File(path, 'x') # create file, fail if exists
self.messages = self.file.require_group('messages')
self.orderbooks = self.file.require_group('orderbooks')
self.trades = self.file.require_group('trades')
self.noii = self.file.require_group('noii')
for name in names:
self.messages.require_dataset(name,
shape=(0, 8),
maxshape=(None, None),
dtype='i')
self.orderbooks.require_dataset(name,
shape=(0, 4 * nlevels + 2),
maxshape=(None, None),
dtype='i')
self.trades.require_dataset(name,
shape=(0, 5),
maxshape=(None, None),
dtype='i')
self.noii.require_dataset(name,
shape=(0, 14),
maxshape=(None, None),
dtype='i')
elif self.method == 'csv':
if os.path.exists('{}'.format(path)):
response = input('A database with that path already exists. Are you sure you want to proceed? [Y/N] ')
if response == 'Y':
proceed = True
for item in os.listdir('{}/messages/'.format(path)):
os.remove('{}/messages/{}'.format(path, item))
for item in os.listdir('{}/books/'.format(path)):
os.remove('{}/books/{}'.format(path, item))
for item in os.listdir('{}/trades/'.format(path)):
os.remove('{}/trades/{}'.format(path, item))
for item in os.listdir('{}/noii/'.format(path)):
os.remove('{}/noii/{}'.format(path, item))
os.rmdir('{}/messages/'.format(path))
os.rmdir('{}/books/'.format(path))
os.rmdir('{}/trades/'.format(path))
os.rmdir('{}/noii/'.format(path))
for item in os.listdir('{}'.format(path)):
os.remove('{}/{}'.format(path, item))
os.rmdir('{}'.format(path))
else:
# TODO: Need to exit the program
proceed = False
print('Process cancelled.')
else:
proceed = True
if proceed:
print('Creating a new database in directory: {}/'.format(path))
self.messages_path = '{}/messages/'.format(path)
self.books_path = '{}/books/'.format(path)
self.trades_path = '{}/trades/'.format(path)
self.noii_path = '{}/noii/'.format(path)
os.makedirs(path)
os.makedirs(self.messages_path)
os.makedirs(self.books_path)
os.makedirs(self.trades_path)
os.makedirs(self.noii_path)
columns = ['sec', 'nano', 'name']
columns.extend(['bidprc{}'.format(i) for i in range(nlevels)])
columns.extend(['askprc{}'.format(i) for i in range(nlevels)])
columns.extend(['bidvol{}'.format(i) for i in range(nlevels)])
columns.extend(['askvol{}'.format(i) for i in range(nlevels)])
for name in names:
with open(self.messages_path + 'messages_{}.txt'.format(name), 'w') as messages_file:
messages_file.write('sec,nano,name,type,refno,side,shares,price,mpid\n')
with open(self.books_path + 'books_{}.txt'.format(name), 'w') as books_file:
books_file.write(','.join(columns) + '\n')
with open(self.trades_path + 'trades_{}.txt'.format(name), 'w') as trades_file:
trades_file.write('sec,nano,name,side,shares,price\n')
with open(self.noii_path + 'noii_{}.txt'.format(name), 'w') as noii_file:
noii_file.write('sec,nano,name,type,cross,shares,price,paired,imb,dir,far,near,curr\n')
def close(self):
if self.method == 'hdf5':
self.file.close()
else:
pass
class Message():
"""A class representing out-going messages from the NASDAQ system.
Parameters
----------
sec : int
Seconds
nano : int
Nanoseconds
type : string
Message type
event : string
System event
name : string
Stock ticker
buysell : string
Trade position
price : int
Trade price
shares : int
Shares
refno : int
Unique reference number of order
newrefno : int
Replacement reference number
mpid: string
MPID attribution
"""
def __init__(self, date='.', sec=-1, nano=-1, type='.', event='.', name='.',
buysell='.', price=-1, shares=0, refno=-1, newrefno=-1, mpid='.'):
self.date = date
self.name = name
self.sec = sec
self.nano = nano
self.type = type
self.event = event
self.buysell = buysell
self.price = price
self.shares = shares
self.refno = refno
self.newrefno = newrefno
self.mpid = mpid
def __str__(self):
sep = ', '
line = ['sec=' + str(self.sec),
'nano=' + str(self.nano),
'type=' + str(self.type),
'event=' + str(self.event),
'name=' + str(self.name),
'buysell=' + str(self.buysell),
'price=' + str(self.price),
'shares=' + str(self.shares),
'refno=' + str(self.refno),
'newrefno=' + str(self.newrefno),
'mpid= {}'.format(self.mpid)]
return sep.join(line)
def __repr__(self):
sep = ', '
line = ['sec: ' + str(self.sec),
'nano: ' + str(self.nano),
'type: ' + str(self.type),
'event: ' + str(self.event),
'name: ' + str(self.name),
'buysell: ' + str(self.buysell),
'price: ' + str(self.price),
'shares: ' + str(self.shares),
'refno: ' + str(self.refno),
'newrefno: ' + str(self.newrefno),
'mpid: {}'.format(self.mpid)]
return 'Message(' + sep.join(line) + ')'
def split(self):
"""Converts a replace message to an add and a delete."""
assert self.type == 'U', "ASSERT-ERROR: split method called on non-replacement message."
if self.type == 'U':
new_message = Message(date=self.date,
sec=self.sec,
nano=self.nano,
type='U',
price=self.price,
shares=self.shares,
refno=self.refno,
newrefno=self.newrefno)
del_message = Message(date=self.date,
sec=self.sec,
nano=self.nano,
type='D',
refno=self.refno,
newrefno=-1)
add_message = Message(date=self.date,
sec=self.sec,
nano=self.nano,
type='U+',
price=self.price,
shares=self.shares,
refno=self.refno,
newrefno=self.newrefno)
return (new_message, del_message, add_message)
def to_list(self):
"""Returns message as a list."""
values = []
values.append(str(self.date))
values.append(str(self.name))
values.append(int(self.sec))
values.append(int(self.nano))
values.append(str(self.type))
values.append(str(self.event))
values.append(str(self.buysell))
values.append(int(self.price))
values.append(int(self.shares))
values.append(int(self.refno))
values.append(int(self.newrefno))
values.append(int(self.mpid))
return values
def to_array(self):
"""Returns message as an np.array of integers."""
if self.type == 'P':
if self.buysell == 'B':
side = -1
else:
side = 1
values = [self.sec, self.nano, side, self.price, self.shares]
return np.array(values)
else:
if self.type == 'A': # add
type = 0
elif self.type == 'F': # add w/mpid
type = 1
elif self.type == 'X': # cancel
type = 2
elif self.type == 'D': # delete
type = 3
elif self.type == 'E': # execute
type = 4
elif self.type == 'C': # execute w/price
type = 5
elif self.type == 'U': # replace
type = 6
else:
type = -1
if self.buysell == 'B': # bid
side = 1
elif self.buysell == 'S': # ask
side = -1
else:
side = 0
values = [self.sec,
self.nano,
type,
side,
self.price,
np.abs(self.shares),
self.refno,
self.newrefno]
return np.array(values)
def to_txt(self, path=None):
if self.type in ('S', 'H'):
sep = ','
line = [str(self.sec),
str(self.nano),
str(self.name),
str(self.event)]
elif self.type in ('A', 'F', 'E', 'C', 'X', 'D', 'U'):
sep = ','
line = [str(self.sec),
str(self.nano),
str(self.name),
str(self.type),
str(self.refno),
str(self.buysell),
str(self.shares),
str(self.price / 10 ** 4),
str(self.mpid)]
elif self.type == 'P':
sep = ','
line = [str(self.sec),
str(self.nano),
str(self.name),
str(self.buysell),
str(self.shares),
str(self.price / 10 ** 4)]
if path is None:
return sep.join(line) + '\n'
else:
with open(path, 'a') as fout:
fout.write(sep.join(line) + '\n')
class NOIIMessage():
"""A class representing out-going messages from the NASDAQ system.
This class is specific to net order imbalance indicator messages and
cross trade messages.
Parameters
----------
sec: int
Seconds
nano: int
Nanoseconds
name: string
Stock ticker
type: string
Message type
cross: string
Cross type
buysell: string
Trade position
price: int
Trade price
shares: int
Shares
matchno: int
Unique reference number of trade
paired: int
Shares paired
imbalance: int
Shares imbalance
direction: string
Imbalance direction
far: int
Far price
near: int
Near price
current: int
Current refernce price
"""
def __init__(self, date='.', sec=-1, nano=-1, name='.', type='.', cross='.',
buysell='.', price=-1, shares=0, matchno=-1, paired=-1,
imbalance=-1, direction='.', far=-1, near=-1, current=-1):
self.date = date
self.sec = sec
self.nano = nano
self.name = name
self.type = type
self.cross = cross
self.buysell = buysell
self.price = price
self.shares = shares
self.matchno = matchno
self.paired = paired
self.imbalance = imbalance
self.direction = direction
self.far = far
self.near = near
self.current = current
def __str__(self):
sep = ', '
line = ['date=' + str(self.date),
'sec=' + str(self.sec),
'nano=' + str(self.nano),
'name=' + str(self.name),
'type=' + str(self.type),
'cross=' + str(self.cross),
'buysell=' + str(self.buysell),
'price=' + str(self.price),
'shares=' + str(self.shares),
'matchno=' + str(self.matchno),
'paired=' + str(self.paired),
'imbalance=' + str(self.imbalance),
'direction=' + str(self.direction),
'far=' + str(self.far),
'near=' + str(self.near),
'current=' + str(self.current)]
return sep.join(line)
def __repr__(self):
sep = ', '
line = ['date=' + str(self.date),
'sec=' + str(self.sec),
'nano=' + str(self.nano),
'name=' + str(self.name),
'type=' + str(self.type),
'cross=' + str(self.cross),
'buysell=' + str(self.buysell),
'price=' + str(self.price),
'shares=' + str(self.shares),
'matchno=' + str(self.matchno),
'paired=' + str(self.paired),
'imbalance=' + str(self.imbalance),
'direction=' + str(self.direction),
'far=' + str(self.far),
'near=' + str(self.near),
'current=' + str(self.current)]
return 'Message(' + sep.join(line) + ')'
def to_list(self):
"""Returns message as a list."""
values = []
values.append(str(self.date))
values.append(int(self.sec))
values.append(int(self.nano))
values.append(str(self.name))
values.append(str(self.type))
values.append(str(self.cross))
values.append(str(self.buysell))
values.append(int(self.price))
values.append(int(self.shares))
values.append(int(self.matchno))
values.append(int(self.paired))
values.append(int(self.imbalance))
values.append(int(self.direction))
values.append(int(self.far))
values.append(int(self.near))
values.append(int(self.current))
return values
def to_array(self):
"""Returns message as an np.array of integers."""
if self.type == 'Q': # cross trade
type = 0
elif self.type == 'I': # noii
type = 1
else:
type = -1
print('Unexpected NOII message type: {}'.format(self.type))
if self.cross == 'O': # opening cross
cross = 0
elif self.cross == 'C': # closing cross
cross = 1
elif self.cross == 'H': # halted cross
cross = 2
elif self.cross == 'I': # intraday cross
cross = 3
else:
cross = -1
print('Unexpected cross type: {}'.format(self.cross))
if self.buysell == 'B': # bid
side = 1
elif self.buysell == 'S': # ask
side = -1
else:
side = 0
if self.direction == 'B': # bid
dir = 1
elif self.direction == 'S': # ask
dir = -1
else:
dir = 0
values = [self.sec,
self.nano,
type,
cross,
side,
self.price,
self.shares,
self.matchno,
self.paired,
self.imbalance,
dir,
self.far,
self.near,
self.current]
return np.array(values)
def to_txt(self, path=None):
sep = ','
if self.type == 'Q':
line = [str(self.sec),
str(self.nano),
str(self.name),
str(self.type),
str(self.cross),
str(self.shares),
str(self.price / 10 ** 4),
str(self.paired),
str(self.imbalance),
str(self.direction),
str(self.far),
str(self.near),
str(self.current)]
elif self.type == 'I':
line = [str(self.sec),
str(self.nano),
str(self.name),
str(self.type),
str(self.cross),
str(self.shares),
str(self.price),
str(self.paired),
str(self.imbalance),
str(self.direction),
str(self.far / 10 ** 4),
str(self.near / 10 ** 4),
str(self.current / 10 ** 4)]
if path is None:
return sep.join(line) + '\n'
else:
with open(path, 'a') as fout:
fout.write(sep.join(line) + '\n')
class Trade():
"""A class representing trades on the NASDAQ system.
Parameters
----------
date: int
Date
sec : int
Seconds
nano : int
Nanoseconds
name : string
Stock ticker
side : string
Buy or sell
price : int
Trade price
shares : int
Shares
"""
def __init__(self, date='.', sec=-1, nano=-1, name='.', side='.', price=-1, shares=0):
self.date = date
self.name = name
self.sec = sec
self.nano = nano
self.side = side
self.price = price
self.shares = shares
def __str__(self):
sep = ', '
line = ['sec: ' + str(self.sec),
'nano: ' + str(self.nano),
'name: ' + str(self.name),
'side: ' + str(self.buysell),
'price: ' + str(self.price),
'shares: ' + str(self.shares)]
return sep.join(line)
def __repr__(self):
sep = ', '
line = ['sec: ' + str(self.sec),
'nano: ' + str(self.nano),
'name: ' + str(self.name),
'side: ' + str(self.buysell),
'price: ' + str(self.price),
'shares: ' + str(self.shares)]
return 'Trade(' + sep.join(line) + ')'
def to_list(self):
"""Returns message as a list."""
values = []
values.append(str(self.date))
values.append(str(self.name))
values.append(int(self.sec))
values.append(int(self.nano))
values.append(str(self.side))
values.append(int(self.price))
values.append(int(self.shares))
return values
def to_array(self):
"""Returns message as an np.array of integers."""
if self.side == 'B':
side = -1
else:
side = 1
return np.array([self.sec, self.nano, side, self.price, self.shares])
def to_txt(self, path=None):
sep = ','
line = [str(self.sec),
str(self.nano),
str(self.name),
str(self.side),
str(self.shares),
str(self.price / 10 ** 4)]
if path is None:
return sep.join(line) + '\n'
else:
with open(path, 'a') as fout:
fout.write(sep.join(line) + '\n')
class Messagelist():
"""A class to store messages.
Provides methods for writing to HDF5 and PostgreSQL databases.
Parameters
----------
date : string
Date to be assigned to data
names : list
Contains the stock tickers to include in the database
Attributes
----------
messages : dict
Contains a Message objects for each name in names
Examples
--------
Create a MessageList::
>> msglist = pk.Messagelist(date='112013', names=['GOOG', 'AAPL'])
"""
def __init__(self, date, names):
self.messages = {}
self.date = date
for name in names:
self.messages[name] = []
def add(self, message):
"""Add a message to the list."""
try:
self.messages[message.name].append(message)
except KeyError as e:
print("KeyError: Could not find {} in the message list".format(message.name))
def to_hdf5(self, name, db, grp):
"""Write messages to HDF5 file."""
assert db.method == 'hdf5', 'Attempted to write to non-HDF5 database'
m = self.messages[name]
if len(m) > 0:
listed = [message.to_array() for message in m]
array = np.array(listed)
if grp == 'messages':
db_size, db_cols = db.messages[name].shape # rows
array_size, array_cols = array.shape
db_resize = db_size + array_size
db.messages[name].resize((db_resize, db_cols))
db.messages[name][db_size:db_resize, :] = array
if grp == 'trades':
db_size, db_cols = db.trades[name].shape # rows
array_size, array_cols = array.shape
db_resize = db_size + array_size
db.trades[name].resize((db_resize, db_cols))
db.trades[name][db_size:db_resize, :] = array
if grp == 'noii':
db_size, db_cols = db.noii[name].shape # rows
array_size, array_cols = array.shape
db_resize = db_size + array_size
db.noii[name].resize((db_resize, db_cols))
db.noii[name][db_size:db_resize, :] = array
self.messages[name] = [] # reset
print('wrote {} messages to dataset (name={}, group={})'.format(len(m), name, grp))
def to_txt(self, name, db, grp):
assert db.method == 'csv', 'Attempted to write to non-CSV database'
message_list = self.messages[name]
if len(message_list) > 0:
texted = [message.to_txt() for message in message_list]
if grp == 'messages':
with open('{}/messages_{}.txt'.format(db.messages_path, name), 'a') as fout:
fout.writelines(texted)
if grp == 'trades':
with open('{}/trades_{}.txt'.format(db.trades_path, name), 'a') as fout:
fout.writelines(texted)
if grp == 'noii':
with open('{}/noii_{}.txt'.format(db.noii_path, name), 'a') as fout:
fout.writelines(texted)
self.messages[name] = []
print('wrote {} messages to dataset (name={}, group={})'.format(len(message_list), name, grp))
class Order():
"""A class to represent limit orders.
Stores essential message data for order book reconstruction.
Attributes
----------
name : string
Stock ticker
buysell : string
Trade position
price : int
Trade price
shares : int
Shares
"""
def __init__(self, name='.', buysell='.', price='.', shares='.'):
self.name = name
self.buysell = buysell
self.price = price
self.shares = shares
def __str__(self):
sep = ', '
line = ['name=' + str(self.name),
'buysell=' + str(self.buysell),
'price=' + str(self.price),
'shares=' + str(self.shares)]
return sep.join(line)
def __repr__(self):
sep = ', '
line = ['name=' + str(self.name),
'buysell=' + str(self.buysell),
'price=' + str(self.price),
'shares=' + str(self.shares)]
return 'Order(' + sep.join(line) + ')'
class Orderlist():
"""A class to store existing orders and process incoming messages.
This class handles the matching of messages to standing orders. Incoming messages are first matched to standing orders so that missing message data can be completed, and then the referenced order is updated based on the message.
Attributes
----------
orders : dict
Keys are reference numbers, values are Orders.
"""
def __init__(self):
self.orders = {}
def __str__(self):
sep = '\n'
line = []
for key in self.orders.keys():
line.append(str(key) + ': ' + str(self.orders[key]))
return sep.join(line)
# updates message by reference.
def complete_message(self, message):
"""Look up Order for Message and fill in missing data."""
if message.refno in self.orders.keys():
# print('complete_message received message: {}'.format(message.type))
ref_order = self.orders[message.refno]
if message.type == 'U':
message.name = ref_order.name
message.buysell = ref_order.buysell
elif message.type == 'U+': # ADD from a split REPLACE order
message.type = 'A'
message.name = ref_order.name
message.buysell = ref_order.buysell
message.refno = message.newrefno
message.newrefno = -1
elif message.type in ('E', 'C', 'X'):
message.name = ref_order.name
message.buysell = ref_order.buysell
message.price = ref_order.price
message.shares = -message.shares
elif message.type == 'D':
message.name = ref_order.name
message.buysell = ref_order.buysell
message.price = ref_order.price
message.shares = -ref_order.shares
def add(self, message):
"""Add a new Order to the list."""
order = Order()
order.name = message.name
order.buysell = message.buysell
order.price = message.price
order.shares = message.shares
self.orders[message.refno] = order
def update(self, message):
"""Update an existing Order based on incoming Message."""
if message.refno in self.orders.keys():
if message.type == 'E': # execute
self.orders[message.refno].shares += message.shares
elif message.type == 'X': # execute w/ price
self.orders[message.refno].shares += message.shares
elif message.type == 'C': # cancel
self.orders[message.refno].shares += message.shares
elif message.type == 'D': # delete
self.orders.pop(message.refno)
else:
pass
class Book():
"""A class to represent an order book.
This class provides a method for updating the state of an order book from an
incoming message.
Attributes
----------
bids : dict
Keys are prices, values are shares
asks : dict
Keys are prices, values are shares
levels : int
Number of levels of the the order book to track
sec : int
Seconds
nano : int
Nanoseconds
"""
def __init__(self, date, name, levels):
self.bids = {}
self.asks = {}
self.min_bid = -np.inf
self.max_ask = np.inf
self.levels = levels
self.sec = -1
self.nano = -1
self.date = date
self.name = name
def __str__(self):
sep = ', '
sorted_bids = sorted(self.bids.keys(), reverse=True) # high-to-low
sorted_asks = sorted(self.asks.keys()) # low-to-high
bid_list = []
ask_list = []
nbids = len(self.bids)
nasks = len(self.asks)
for i in range(0, self.levels):
if i < nbids:
bid_list.append(str(self.bids[sorted_bids[i]]) + '@' + str(sorted_bids[i]))
else:
pass
if i < nasks:
ask_list.append(str(self.asks[sorted_asks[i]]) + '@' + str(sorted_asks[i]))
else:
pass
return 'bids: ' + sep.join(bid_list) + '\n' + 'asks: ' + sep.join(ask_list)
def __repr__(self):
sep = ', '
sorted_bids = sorted(self.bids.keys(), reverse=True) # high-to-low
sorted_asks = sorted(self.asks.keys()) # low-to-high
bid_list = []
ask_list = []
nbids = len(self.bids)
nasks = len(self.asks)
for i in range(0, self.levels):
if i < nbids:
bid_list.append(str(self.bids[sorted_bids[i]]) + '@' + str(sorted_bids[i]))
else:
pass
if i < nasks:
ask_list.append(str(self.asks[sorted_asks[i]]) + '@' + str(sorted_asks[i]))
else:
pass
return 'Book( \n' + 'bids: ' + sep.join(bid_list) + '\n' + 'asks: ' + sep.join(ask_list) + ' )'
def update(self, message):
"""Update Book using incoming Message data."""
self.sec = message.sec
self.nano = message.nano
updated = False
if message.buysell == 'B':
if message.price in self.bids.keys():
self.bids[message.price] += message.shares
if self.bids[message.price] == 0:
self.bids.pop(message.price)
elif message.type in ('A', 'F'):
self.bids[message.price] = message.shares
elif message.buysell == 'S':
if message.price in self.asks.keys():
self.asks[message.price] += message.shares
if self.asks[message.price] == 0:
self.asks.pop(message.price)
elif message.type in ('A', 'F'):
self.asks[message.price] = message.shares
return self
def to_list(self):
"""Return Order as a list."""
values = []
values.append(self.date)
values.append(self.name)
values.append(int(self.sec))
values.append(int(self.nano))
sorted_bids = sorted(self.bids.keys(), reverse=True)
sorted_asks = sorted(self.asks.keys())
for i in range(0, self.levels): # bid price
if i < len(self.bids):
values.append(sorted_bids[i])
else:
values.append(-1)
for i in range(0, self.levels): # ask price
if i < len(self.asks):
values.append(sorted_asks[i])
else:
values.append(-1)
for i in range(0, self.levels): # bid depth
if i < len(self.bids):
values.append(self.bids[sorted_bids[i]])
else:
values.append(0)
for i in range(0, self.levels): # ask depth
if i < len(self.asks):
values.append(self.asks[sorted_asks[i]])
else:
values.append(0)
return values
def to_array(self):
'''Return Order as numpy array.'''
values = []
values.append(int(self.sec))
values.append(int(self.nano))
sorted_bids = sorted(self.bids.keys(), reverse=True)
sorted_asks = sorted(self.asks.keys())
for i in range(0, self.levels): # bid price
if i < len(self.bids):
values.append(sorted_bids[i])
else:
values.append(-1)
for i in range(0, self.levels): # ask price
if i < len(self.asks):
values.append(sorted_asks[i])
else:
values.append(-1)
for i in range(0, self.levels): # bid depth
if i < len(self.bids):
values.append(self.bids[sorted_bids[i]])
else:
values.append(0)
for i in range(0, self.levels): # ask depth
if i < len(self.asks):
values.append(self.asks[sorted_asks[i]])
else:
values.append(0)
return np.array(values)
def to_txt(self):
values = []
values.append(int(self.sec))
values.append(int(self.nano))
values.append(self.name)
sorted_bids = sorted(self.bids.keys(), reverse=True)
sorted_asks = sorted(self.asks.keys())
for i in range(0, self.levels): # bid price
if i < len(self.bids):
values.append(sorted_bids[i] / 10 ** 4)
else:
values.append(-1)
for i in range(0, self.levels): # ask price
if i < len(self.asks):
values.append(sorted_asks[i] / 10 ** 4)
else:
values.append(-1)
for i in range(0, self.levels): # bid depth
if i < len(self.bids):
values.append(self.bids[sorted_bids[i]])
else:
values.append(0)
for i in range(0, self.levels): # ask depth
if i < len(self.asks):
values.append(self.asks[sorted_asks[i]])
else:
values.append(0)
return ','.join([str(v) for v in values]) + '\n'
class Booklist():
"""A class to store Books.
Provides methods for writing to external databases.
Examples
--------
Create a Booklist::
>> booklist = pk.BookList(['GOOG', 'AAPL'], levels=10)
Attributes
----------
books : list
A list of Books
method : string
Specifies the type of database to create ('hdf5' or 'postgres')
"""
def __init__(self, date, names, levels, method):
self.books = {}
self.method = method
for name in names:
self.books[name] = {'hist': [], 'cur': Book(date, name, levels)}
def update(self, message):
"""Update Book data from message."""
b = self.books[message.name]['cur'].update(message)
if self.method == 'hdf5':
self.books[message.name]['hist'].append(b.to_array())
if self.method == 'csv':
self.books[message.name]['hist'].append(b.to_txt())
def to_hdf5(self, name, db):
"""Write Book data to HDF5 file."""
hist = self.books[name]['hist']
if len(hist) > 0:
array = np.array(hist)
db_size, db_cols = db.orderbooks[name].shape # rows
array_size, array_cols = array.shape
db_resize = db_size + array_size
db.orderbooks[name].resize((db_resize, db_cols))
db.orderbooks[name][db_size:db_resize, :] = array
self.books[name]['hist'] = [] # reset
print('wrote {} books to dataset (name={})'.format(len(hist), name))
def to_txt(self, name, db):
hist = self.books[name]['hist']
if len(hist) > 0:
with open('{}/books_{}.txt'.format(db.books_path, name), 'a') as fout:
fout.writelines(hist)
self.books[name]['hist'] = [] # reset
print('wrote {} books to dataset (name={})'.format(len(hist), name))
def get_message_size(size_in_bytes):
"""Return number of bytes in binary message as an integer."""
(message_size,) = struct.unpack('>H', size_in_bytes)
return message_size
def get_message_type(type_in_bytes):
"""Return the type of a binary message as a string."""
return type_in_bytes.decode('ascii')
def get_message(message_bytes, message_type, date, time, version):
"""Return binary message data as a Message."""
if message_type in ('T', 'S', 'H', 'A', 'F', 'E', 'C', 'X', 'D', 'U', 'P', 'Q', 'I'):
message = protocol(message_bytes, message_type, time, version)
if version == 5.0:
message.sec = int(message.nano / 10 ** 9)
message.nano = message.nano % 10 ** 9
message.date = date
return message
else:
return None
def protocol(message_bytes, message_type, time, version):
"""Decode binary message data and return as a Message."""
if message_type in ('T', 'S', 'H', 'A', 'F', 'E', 'C', 'X', 'D', 'U', 'P'):
message = Message()
elif message_type in ('Q', 'I'):
message = NOIIMessage()
# elif message_type in ('H'):
# message = TradingActionMessage()
message.type = message_type
if version == 4.0:
if message.type == 'T': # time
temp = struct.unpack('>I', message_bytes)
message.sec = temp[0]
message.nano = 0
elif message_type == 'S': # systems
temp = struct.unpack('>Is', message_bytes)
message.sec = time
message.nano = temp[0]
message.event = temp[1].decode('ascii')
elif message_type == 'H': # trade-action
temp = struct.unpack('>I6sss4s', message_bytes)
message.sec = time
message.nano = temp[0]
message.name = temp[1].decode('ascii').rstrip(' ')
message.event = temp[2].decode('ascii')
elif message.type == 'A': # add
temp = struct.unpack('>IQsI6sI', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.buysell = temp[2].decode('ascii')
message.shares = temp[3]
message.name = temp[4].decode('ascii').rstrip(' ')
message.price = temp[5]
elif message.type == 'F': # add w/mpid
temp = struct.unpack('>IQsI6sI4s', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.buysell = temp[2].decode('ascii')
message.shares = temp[3]
message.name = temp[4].decode('ascii').rstrip(' ')
message.price = temp[5]
elif message.type == 'E': # execute
temp = struct.unpack('>IQIQ', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.shares = temp[2]
elif message.type == 'C': # execute w/price (actually don't need price...)
temp = struct.unpack('>IQIQsI', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.shares = temp[2]
message.price = temp[5]
elif message.type == 'X': # cancel
temp = struct.unpack('>IQI', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.shares = temp[2]
elif message.type == 'D': # delete
temp = struct.unpack('>IQ', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
elif message.type == 'U': # replace
temp = struct.unpack('>IQQII', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.newrefno = temp[2]
message.shares = temp[3]
message.price = temp[4]
elif message.type == 'Q':
temp = struct.unpack('>IQ6sIQs', message_bytes)
message.sec = time
message.nano = temp[0]
message.shares = temp[1]
message.name = temp[2].decode('ascii').rstrip(' ')
message.price = temp[3]
message.event = temp[5].decode('ascii')
return message
elif version == 4.1:
if message.type == 'T': # time
temp = struct.unpack('>I', message_bytes)
message.sec = temp[0]
message.nano = 0
elif message.type == 'S': # systems
temp = struct.unpack('>Is', message_bytes)
message.sec = time
message.nano = temp[0]
message.name = '.'
message.event = temp[1].decode('ascii')
elif message.type == 'H': # trade-action
temp = struct.unpack('>I8sss4s', message_bytes)
message.sec = time
message.nano = temp[0]
message.name = temp[1].decode('ascii').rstrip(' ')
message.event = temp[2].decode('ascii')
elif message.type == 'A': # add
temp = struct.unpack('>IQsI8sI', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.buysell = temp[2].decode('ascii')
message.shares = temp[3]
message.name = temp[4].decode('ascii').rstrip(' ')
message.price = temp[5]
elif message.type == 'F': # add w/mpid
temp = struct.unpack('>IQsI8sI4s', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.buysell = temp[2].decode('ascii')
message.shares = temp[3]
message.name = temp[4].decode('ascii').rstrip(' ')
message.price = temp[5]
message.mpid = temp[6].decode('ascii').rstrip(' ')
elif message.type == 'E': # execute
temp = struct.unpack('>IQIQ', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.shares = temp[2]
elif message.type == 'C': # execute w/price
temp = struct.unpack('>IQIQsI', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.shares = temp[2]
message.price = temp[5]
elif message.type == 'X': # cancel
temp = struct.unpack('>IQI', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.shares = temp[2]
elif message.type == 'D': # delete
temp = struct.unpack('>IQ', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
elif message.type == 'U': # replace
temp = struct.unpack('>IQQII', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.newrefno = temp[2]
message.shares = temp[3]
message.price = temp[4]
elif message.type == 'Q': # cross-trade
temp = struct.unpack('>IQ8sIQs', message_bytes)
message.sec = time
message.nano = temp[0]
message.shares = temp[1]
message.name = temp[2].decode('ascii').rstrip(' ')
message.price = temp[3]
message.event = temp[5].decode('ascii')
elif message.type == 'P': # trade message
temp = struct.unpack('>IQsI8sIQ', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.buysell = temp[2].decode('ascii')
message.shares = temp[3]
message.name = temp[4].decode('ascii').rstrip(' ')
message.price = temp[5]
# message.matchno = temp[6]
elif message.type == 'I':
temp = struct.unpack('>IQQs8sIIIss', message_bytes)
message.sec = time
message.nano = temp[0]
message.paired = temp[1]
message.imbalance = temp[2]
message.direction = temp[3].decode('ascii')
message.name = temp[4].decode('ascii').rstrip(' ')
message.far = temp[5]
message.near = temp[6]
message.current = temp[7]
message.cross = temp[8].decode('ascii')
# message.pvar = temp[9].decode('ascii'])
return message
elif version == 5.0:
if message.type == 'T': # time
raise ValueError('Time messages not supported in ITCHv5.0.')
elif message_type == 'S': # systems
temp = struct.unpack('>HHHIs', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.event = temp[4].decode('ascii')
elif message.type == 'H':
temp = struct.unpack('>HHHI8sss4s', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.name = temp[4].decode('ascii').rstrip(' ')
message.event = temp[5].decode('ascii')
elif message.type == 'A': # add
temp = struct.unpack('>HHHIQsI8sI', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.refno = temp[4]
message.buysell = temp[5].decode('ascii')
message.shares = temp[6]
message.name = temp[7].decode('ascii').rstrip(' ')
message.price = temp[8]
elif message.type == 'F': # add w/mpid
temp = struct.unpack('>HHHIQsI8sI4s', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.refno = temp[4]
message.buysell = temp[5].decode('ascii')
message.shares = temp[6]
message.name = temp[7].decode('ascii').rstrip(' ')
message.price = temp[8]
elif message.type == 'E': # execute
temp = struct.unpack('>HHHIQIQ', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.refno = temp[4]
message.shares = temp[5]
elif message.type == 'C': # execute w/price
temp = struct.unpack('>HHHIQIQsI', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.refno = temp[4]
message.shares = temp[5]
message.price = temp[8]
elif message.type == 'X': # cancel
temp = struct.unpack('>HHHIQI', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.refno = temp[4]
message.shares = temp[5]
elif message.type == 'D': # delete
temp = struct.unpack('>HHHIQ', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.refno = temp[4]
elif message.type == 'U': # replace
temp = struct.unpack('>HHHIQQII', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.refno = temp[4]
message.newrefno = temp[5]
message.shares = temp[6]
message.price = temp[7]
elif message.type == 'Q': # cross-trade
temp = struct.unpack('>HHHIQ8sIQ1s', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.shares = temp[4]
message.name = temp[5].decode('ascii').rstrip(' ')
message.price = temp[6]
message.event = temp[8].decode('ascii')
return message
else:
raise ValueError('ITCH version ' + str(version) + ' is not supported')
def unpack(fin, ver, date, nlevels, names, method='csv', fout=None, host=None, user=None):
"""Read ITCH data file, construct LOB, and write to database.
This method reads binary data from a ITCH data file, converts it into human-readable data, then saves time series of out-going messages as well as reconstructed order book snapshots to a research database.
The version number of the ITCH data is specified as a float. Supported versions are: 4.1.
"""
BUFFER_SIZE = 10 ** 4
orderlist = Orderlist()
booklist = Booklist(date, names, nlevels, method)
messagelist = Messagelist(date, names)
tradeslist = Messagelist(date, names)
noiilist = Messagelist(date, names)
if method == 'hdf5':
db = Database(path=fout, names=names, nlevels=nlevels, method='hdf5')
log_path = os.path.abspath('{}/../system.log'.format(fout))
with open(log_path, 'w') as system_file:
system_file.write('sec,nano,name,event\n')
elif method == 'csv':
db = Database(path=fout, names=names, nlevels=nlevels, method='csv')
log_path = '{}/system.log'.format(fout)
with open(log_path, 'w') as system_file:
system_file.write('sec,nano,name,event\n')
data = open(fin, 'rb')
message_reads = 0
message_writes = 0
trade_writes = 0
noii_writes = 0
reading = True
clock = 0
start = time.time()
while reading:
# read message
message_size = get_message_size(data.read(2))
message_type = get_message_type(data.read(1))
message_bytes = data.read(message_size - 1)
message = get_message(message_bytes, message_type, date, clock, ver)
message_reads += 1
# update clock
if message_type == 'T':
if message.sec % 1800 == 0:
print('TIME={}'.format(message.sec))
clock = message.sec
# update system
if message_type == 'S':
print('SYSTEM MESSAGE: {}'.format(message.event))
message.to_txt(log_path)
if message.event == 'C': # end messages
reading = False
if message_type == 'H':
if message.name in names:
print('TRADING MESSAGE ({}): {}'.format(message.name, message.event))
message.to_txt(log_path)
# TODO: What to do about halts?
if message.event == 'H': # halted (all US)
pass
elif message.event == 'P': # paused (all US)
pass
elif message.event == 'Q': # quotation only
pass
elif message.event == 'T': # trading on nasdaq
pass
# complete message
if message_type == 'U':
message, del_message, add_message = message.split()
orderlist.complete_message(message)
orderlist.complete_message(del_message)
orderlist.complete_message(add_message)
if message.name in names:
message_writes += 1
orderlist.update(del_message)
booklist.update(del_message)
orderlist.add(add_message)
booklist.update(add_message)
messagelist.add(message)
# print('ORDER MESSAGE <REPLACE>')
elif message_type in ('E', 'C', 'X', 'D'):
orderlist.complete_message(message)
if message.name in names:
message_writes += 1
orderlist.update(message)
booklist.update(message)
messagelist.add(message)
# print('ORDER MESSAGE')
elif message_type in ('A', 'F'):
if message.name in names:
message_writes += 1
orderlist.add(message)
booklist.update(message)
messagelist.add(message)
# print('ORDER MESSAGE')
elif message_type == 'P':
if message.name in names:
trade_writes += 1
tradeslist.add(message)
# print('TRADE MESSAGE')
elif message_type in ('Q', 'I'):
if message.name in names:
noii_writes += 1
noiilist.add(message)
# print('NOII MESSAGE')
# write message
if method == 'hdf5':
if message_type in ('U', 'A', 'F', 'E', 'C', 'X', 'D'):
if message.name in names:
if len(messagelist.messages[message.name]) == BUFFER_SIZE:
messagelist.to_hdf5(name=message.name, db=db, grp='messages')
if len(booklist.books[message.name]['hist']) == BUFFER_SIZE:
booklist.to_hdf5(name=message.name, db=db)
elif message_type == 'P':
if message.name in names:
if len(tradeslist.messages[message.name]) == BUFFER_SIZE:
tradeslist.to_hdf5(name=message.name, db=db, grp='trades')
elif message_type in ('Q', 'I'):
if message.name in names:
if len(noiilist.messages[message.name]) == BUFFER_SIZE:
noiilist.to_hdf5(name=message.name, db=db, grp='noii')
elif method == 'csv':
if message_type in ('U', 'A', 'F', 'E', 'C', 'X', 'D'):
if message.name in names:
if len(messagelist.messages[message.name]) == BUFFER_SIZE:
messagelist.to_txt(name=message.name, db=db, grp='messages')
if len(booklist.books[message.name]['hist']) == BUFFER_SIZE:
booklist.to_txt(name=message.name, db=db)
elif message_type == 'P':
if message.name in names:
if len(tradeslist.messages[message.name]) == BUFFER_SIZE:
tradeslist.to_txt(name=message.name, db=db, grp='trades')
elif message_type in ('Q', 'I'):
if message.name in names:
if len(noiilist.messages[message.name]) == BUFFER_SIZE:
noiilist.to_txt(name=message.name, db=db, grp='noii')
# clean up
print('Cleaning up...')
for name in names:
if method == 'hdf5':
messagelist.to_hdf5(name=name, db=db, grp='messages')
booklist.to_hdf5(name=name, db=db)
tradeslist.to_hdf5(name=name, db=db, grp='trades')
noiilist.to_hdf5(name=name, db=db, grp='noii')
elif method == 'csv':
messagelist.to_txt(name=name, db=db, grp='messages')
booklist.to_txt(name=name, db=db)
tradeslist.to_txt(name=name, db=db, grp='trades')
noiilist.to_txt(name=name, db=db, grp='noii')
stop = time.time()
data.close()
db.close()
print('Elapsed time: {} seconds'.format(stop - start))
print('Messages read: {}'.format(message_reads))
print('Messages written: {}'.format(message_writes))
print('Trades written: {}'.format(trade_writes))
print('NOII written: {}'.format(noii_writes))
def load_hdf5(db, name, grp):
"""Read data from database and return pd.DataFrames."""
if grp == 'messages':
try:
with h5py.File(db, 'r') as f:
try:
messages = f['/messages/' + name]
data = messages[:, :]
T, N = data.shape
columns = ['sec',
'nano',
'type',
'side',
'price',
'shares',
'refno',
'newrefno']
df = pd.DataFrame(data, index=np.arange(0, T), columns=columns)
return df
except KeyError as e:
print('Could not find name {} in messages'.format(name))
except OSError as e:
print('Could not find file {}'.format(path))
if grp == 'books':
try:
with h5py.File(db, 'r') as f:
try:
data = f['/orderbooks/' + name]
nlevels = int((data.shape[1] - 2) / 4)
pidx = list(range(2, 2 + nlevels))
pidx.extend(list(range(2 + nlevels, 2 + 2 * nlevels)))
vidx = list(range(2 + 2 * nlevels, 2 + 3 * nlevels))
vidx.extend(list(range(2 + 3 * nlevels, 2 + 4 * nlevels)))
timestamps = data[:, 0:2]
prices = data[:, pidx]
volume = data[:, vidx]
base_columns = [str(i) for i in list(range(1, nlevels + 1))]
price_columns = ['bidprc.' + i for i in base_columns]
volume_columns = ['bidvol.' + i for i in base_columns]
price_columns.extend(['askprc.' + i for i in base_columns])
volume_columns.extend(['askvol.' + i for i in base_columns])
df_time = pd.DataFrame(timestamps, columns=['sec', 'nano'])
df_price = pd.DataFrame(prices, columns=price_columns)
df_volume = pd.DataFrame(volume, columns=volume_columns)
df_price = pd.concat([df_time, df_price], axis=1)
df_volume = pd.concat([df_time, df_volume], axis=1)
return df_price, df_volume
except KeyError as e:
print('Could not find name {} in orderbooks'.format(name))
except OSError as e:
print('Could not find file {}'.format(path))
if grp == 'trades':
try:
with h5py.File(db, 'r') as f:
try:
messages = f['/trades/' + name]
data = messages[:, :]
T, N = data.shape
columns = ['sec',
'nano',
'side',
'price',
'shares']
df = pd.DataFrame(data, index=np.arange(0, T), columns=columns)
return df
except KeyError as e:
print('Could not find name {} in messages'.format(name))
except OSError as e:
print('Could not find file {}'.format(path))
if grp == 'noii':
try:
with h5py.File(db, 'r') as f:
try:
messages = f['/noii/' + name]
data = messages[:, :]
T, N = data.shape
columns = ['sec',
'nano',
'type',
'cross',
'side',
'price',
'shares',
'matchno',
'paired',
'imb',
'dir',
'far',
'near',
'current']
df = pd.DataFrame(data, index=np.arange(0, T), columns=columns)
return df
except KeyError as e:
print('Could not find name {} in messages'.format(name))
except OSError as e:
print('Could not find file {}'.format(path))
def interpolate(data, tstep):
"""Interpolate limit order data.
Uses left-hand interpolation, and assumes that the data is indexed by timestamp.
"""
T, N = data.shape
timestamps = data.index
t0 = timestamps[0] - (timestamps[0] % tstep) # 34200
tN = timestamps[-1] - (timestamps[-1] % tstep) + tstep # 57600
timestamps_new = np.arange(t0 + tstep, tN + tstep, tstep) # [34200, ..., 57600]
X = np.zeros((len(timestamps_new), N)) # np.array
X[-1, :] = data.values[-1, :]
t = timestamps_new[0] # keeps track of time in NEW sampling frequency
for i in np.arange(0, T): # observations in data...
if timestamps[i] > t:
s = timestamps[i] - (timestamps[i] % tstep)
tidx = int((t - t0) / tstep - 1)
sidx = int((s - t0) / tstep) # plus one for python indexing (below)
X[tidx:sidx, :] = data.values[i - 1, :]
t = s + tstep
else:
pass
return pd.DataFrame(X,
index=timestamps_new,
columns=data.columns)
def imshow(data, which, levels):
"""
Display order book data as an image, where order book data is either of
`df_price` or `df_volume` returned by `load_hdf5` or `load_postgres`.
"""
if which == 'prices':
idx = ['askprc.' + str(i) for i in range(levels, 0, -1)]
idx.extend(['bidprc.' + str(i) for i in range(1, levels + 1, 1)])
elif which == 'volumes':
idx = ['askvol.' + str(i) for i in range(levels, 0, -1)]
idx.extend(['bidvol.' + str(i) for i in range(1, levels + 1, 1)])
plt.imshow(data.loc[:, idx].T, interpolation='nearest', aspect='auto')
plt.yticks(range(0, levels * 2, 1), idx)
plt.colorbar()
plt.tight_layout()
plt.show()
def reorder(data, columns):
"""Reorder the columns of order data.
The resulting columns will be asks (high-to-low) followed by bids (low-to-high).
"""
levels = int((data.shape[1] - 2) / 2)
if columns == 'volume' or type == 'v':
idx = ['askvol.' + str(i) for i in range(levels, 0, -1)]
idx.extend(['bidvol.' + str(i) for i in range(1, levels + 1, 1)])
elif columns == 'price' or type == 'p':
idx = ['askprc.' + str(i) for i in range(levels, 0, -1)]
idx.extend(['bidprc.' + str(i) for i in range(1, levels + 1, 1)])
return data.ix[:, idx]
def find_trades(messages, eps=10 ** -6):
if 'time' not in messages.columns:
messages['time'] = messages['sec'] + messages['nano'] / 10 ** 9
if 'type' in messages.columns:
messages = messages[messages.type == 'E']
trades = []
i = 0
while i < len(messages):
time = messages.iloc[i].time
side = messages.iloc[i].side
shares = messages.iloc[i].shares
vwap = messages.iloc[i].price
hit = 0
i += 1
if i == len(messages):
break
while messages.iloc[i].time <= time + eps and messages.iloc[i].side == side:
shares += messages.iloc[i].shares
if messages.iloc[i].price != vwap:
hit = 1
vwap = messages.iloc[i].price * messages.iloc[i].shares / shares + vwap * (
shares - messages.iloc[i].shares) / shares
i += 1
if i == len(messages):
break
# print('TRADE (time={}, side={}, shares={}, vwap={}, hit={})'.format(time, side, shares, vwap, hit))
trades.append([time, side, shares, vwap, hit])
return pd.DataFrame(trades, columns=['time', 'side', 'shares', 'vwap', 'hit'])
def plot_trades(trades):
sells = trades[trades.side == 'B']
buys = trades[trades.side == 'S']
plt.hist(sells.shares, bins=np.arange(-1000, 100, 100), edgecolor='white', color='C0', alpha=0.5)
plt.hist(-buys.shares, bins=np.arange(1, 1100, 100), edgecolor='white', color='C1', alpha=0.5)
plt.show()
plt.clf()
def nodups(books, messages):
"""Return messages and books with rows remove for orders that didn't change book."""
assert books.shape[0] == messages.shape[0], "books and messages do not have the same number of rows"
subset = books.columns.drop(['sec', 'nano', 'name'])
dups = books.duplicated(subset=subset)
return books[~dups].reset_index(), messages[~dups].reset_index()
def combine(messages, hidden):
"""Combine hidden executions with message data."""
messages = messages.drop(['index', 'sec', 'nano', 'name', 'refno', 'mpid'], axis=1)
hidden['type'] = 'H'
hidden = hidden.drop(['hit'], axis=1)
hidden = hidden.rename(columns={'vwap': 'price'})
combined = pd.concat([messages, hidden])
return combined.sort_values(by='time', axis=0)
|
mit
| 8,576,444,294,337,752,000
| 36.861015
| 232
| 0.489086
| false
| 3.925556
| false
| false
| false
|
conejoninja/pelisalacarta
|
python/main-classic/channels/vepelis.py
|
1
|
18213
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para VePelis
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
__channel__ = "vepelis"
__category__ = "F"
__type__ = "generic"
__title__ = "VePelis"
__language__ = "ES"
__creationdate__ = "20130528"
DEBUG = config.get_setting("debug")
def isGeneric():
return True
def mainlist(item):
logger.info("[vepelis.py] mainlist")
itemlist = []
itemlist.append( Item(channel=__channel__, title="Ultimas Agregadas", action="listado2" , url="http://www.vepelis.com/pelicula/ultimas-peliculas" , extra="http://www.vepelis.com/pelicula/ultimas-peliculas"))
itemlist.append( Item(channel=__channel__, title="Estrenos en DVD" , action="listado2" , url="http://www.vepelis.com/pelicula/ultimas-peliculas/estrenos-dvd" , extra="http://www.vepelis.com/pelicula/ultimas-peliculas/estrenos-dvd"))
itemlist.append( Item(channel=__channel__, title="Peliculas en Cartelera", action="listado2" , url="http://www.vepelis.com/pelicula/ultimas-peliculas/cartelera" , extra="http://www.vepelis.com/pelicula/ultimas-peliculas/cartelera"))
itemlist.append( Item(channel=__channel__, title="Ultimas Actualizadas" , action="listado2" , url="http://www.vepelis.com/pelicula/ultimas-peliculas/ultimas/actualizadas" , extra="http://www.vepelis.com/pelicula/ultimas-peliculas/ultimas/actualizadas"))
itemlist.append( Item(channel=__channel__, title="Por Genero" , action="generos" , url="http://www.vepelis.com/"))
itemlist.append( Item(channel=__channel__, title="Por Orden Alfabetico" , action="alfabetico" , url="http://www.vepelis.com/"))
itemlist.append( Item(channel=__channel__, title="Buscar" , action="search" , url="http://www.vepelis.com/"))
return itemlist
def listarpeliculas(item):
logger.info("[vepelis.py] listarpeliculas")
# Descarga la página
data = scrapertools.cachePage(item.url)
extra = item.extra
# Extrae las entradas de la pagina seleccionada
'''<td class="DarkText" align="center" valign="top" width="100px" height="160px" style="background-color:#1e1e1e;" onmouseover="this.style.backgroundColor='#000000'" onmouseout="this.style.backgroundColor='#1e1e1e'"><p style="margin-bottom: 3px;border-bottom:#ABABAB 1px solid">
<a href="http://www.peliculasaudiolatino.com/movies/Larry_Crowne.html"><img src="http://www.peliculasaudiolatino.com/poster/85x115/peliculas/movieimg/movie1317696842.jpg" alt="Larry Crowne" border="0" height="115" width="85"></a>'''
patron = '<td class=.*?<a '
patron += 'href="([^"]+)"><img src="([^"]+)" alt="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
itemlist = []
for match in matches:
scrapedurl = match[0]
scrapedtitle = match[2]
scrapedtitle = unicode( scrapedtitle, "iso-8859-1" , errors="replace" ).encode("utf-8")
scrapedthumbnail = match[1]
scrapedplot = ""
logger.info(scrapedtitle)
# Añade al listado
itemlist.append( Item(channel=__channel__, action="findvideos", title=scrapedtitle , fulltitle=scrapedtitle, url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , extra=extra , folder=True) )
# Extrae la marca de siguiente página
patron = 'Anterior.*? :: <a href="/../../.*?/page/([^"]+)">Siguiente '
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
for match in matches:
if len(matches)>0:
scrapedurl = extra+match
scrapedtitle = "!Pagina Siguiente"
scrapedthumbnail = ""
scrapedplot = ""
itemlist.append( Item(channel=__channel__, action="listarpeliculas", title=scrapedtitle , fulltitle=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , extra=extra , folder=True) )
return itemlist
def findvideos(item):
logger.info("[vepelis.py] videos")
# Descarga la página
data = scrapertools.cachePage(item.url)
title = item.title
scrapedthumbnail = item.thumbnail
itemlist = []
patron = '<li><a href="#ms.*?">([^"]+)</a></li>.*?<iframe src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
#itemlist.append( Item(channel=__channel__, action="play", title=title , fulltitle=item.fulltitle, url=item.url , thumbnail=scrapedthumbnail , folder=False) )
if (DEBUG): scrapertools.printMatches(matches)
for match in matches:
url = match[1]
title = "SERVIDOR: " + match[0]
title = unicode( title, "iso-8859-1" , errors="replace" ).encode("utf-8")
itemlist.append( Item(channel=__channel__, action="play", title=title , fulltitle=item.fulltitle, url=url , thumbnail=scrapedthumbnail , folder=False) )
return itemlist
def play(item):
logger.info("[vepelis.py] play")
itemlist=[]
from servers import servertools
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.channel=__channel__
videoitem.action="play"
videoitem.folder=False
return itemlist
#data2 = scrapertools.cache_page(item.url)
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/mv.php?url=","http://www.megavideo.com/?v=")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/videobb.php?url=","http://www.videobb.com/watch_video.php?v=")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/vidbux.php?url=","http://www.vidbux.com/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/vidxden.php?url=","http://www.vidxden.com/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/videozer.php?url=","http://www.videozer.com/video/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/v/pl/play.php?url=","http://www.putlocker.com/embed/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/v/mv/play.php?url=","http://www.modovideo.com/frame.php?v=")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/v/ss/play.php?url=","http://www.sockshare.com/embed/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/v/vb/play.php?url=","http://vidbull.com/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/sockshare.php?url=","http://www.sockshare.com/embed/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/moevide.php?url=","http://moevideo.net/?page=video&uid=")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/novamov.php?url=","http://www.novamov.com/video/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/movshare.php?url=","http://www.movshare.net/video/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/divxstage.php?url=","http://www.divxstage.net/video/")
#listavideos = servertools.findvideos(data2)
#for video in listavideos:
# invalid = video[1]
# invalid = invalid[0:8]
# if invalid!= "FN3WE43K" and invalid!="9CC3F8&e":
# scrapedtitle = item.title+video[0]
# videourl = item.url
# server = video[2]
# if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+videourl+"]")
#logger.info("url=" + item.url)
# Añade al listado de XBMC
#itemlist.append( Item(channel=__channel__, action="play", title=scrapedtitle , fulltitle=item.fulltitle, url=videourl , server=server , folder=False) )
# itemlist.append( Item(channel=__channel__, action="play" , title=item.title , url=item.url, thumbnail="", plot="", server=item.url))
# return itemlist
def generos(item):
logger.info("[vepelis.py] generos")
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
patron = '>.*?<li><a title="(.*?)" href="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
if (DEBUG): scrapertools.printMatches(matches)
for match in matches:
scrapedurl = urlparse.urljoin("",match[1])
scrapedurl = scrapedurl.replace(".html","/page/0.html")
extra = scrapedurl.replace ("/page/0.html","/page/")
scrapedtitle = match[0]
#scrapedtitle = scrapedtitle.replace("","")
scrapedthumbnail = ""
scrapedplot = ""
logger.info(scrapedtitle)
if scrapedtitle=="Eroticas +18":
if config.get_setting("adult_mode") == "true":
itemlist.append( Item(channel=__channel__, action="listado2", title="Eroticas +18" , url="http://www.myhotamateurvideos.com" , thumbnail=scrapedthumbnail , plot=scrapedplot , extra="" , folder=True) )
else:
if scrapedtitle <> "" and len(scrapedtitle) < 20 and scrapedtitle <> "Iniciar Sesion":
itemlist.append( Item(channel=__channel__, action="listado2", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , extra=extra, folder=True) )
itemlist = sorted(itemlist, key=lambda Item: Item.title)
return itemlist
def alfabetico(item):
logger.info("[cinewow.py] listalfabetico")
extra = item.url
itemlist = []
itemlist.append( Item(channel=__channel__, action="listado2" , title="0-9", url="http://www.vepelis.com/letra/09.html", extra="http://www.vepelis.com/letra/09.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="A" , url="http://www.vepelis.com/letra/a.html", extra="http://www.vepelis.com/letra/a.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="B" , url="http://www.vepelis.com/letra/b.html", extra="http://www.vepelis.com/letra/b.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="C" , url="http://www.vepelis.com/letra/c.html", extra="http://www.vepelis.com/letra/c.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="E" , url="http://www.vepelis.com/letra/d.html", extra="http://www.vepelis.com/letra/d.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="D" , url="http://www.vepelis.com/letra/e.html", extra="http://www.vepelis.com/letra/e.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="F" , url="http://www.vepelis.com/letra/f.html", extra="http://www.vepelis.com/letra/f.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="G" , url="http://www.vepelis.com/letra/g.html", extra="http://www.vepelis.com/letra/g.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="H" , url="http://www.vepelis.com/letra/h.html", extra="http://www.vepelis.com/letra/h.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="I" , url="http://www.vepelis.com/letra/i.html", extra="http://www.vepelis.com/letra/i.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="J" , url="http://www.vepelis.com/letra/j.html", extra="http://www.vepelis.com/letra/j.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="K" , url="http://www.vepelis.com/letra/k.html", extra="http://www.vepelis.com/letra/k.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="L" , url="http://www.vepelis.com/letra/l.html", extra="http://www.vepelis.com/letra/l.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="M" , url="http://www.vepelis.com/letra/m.html", extra="http://www.vepelis.com/letra/m.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="N" , url="http://www.vepelis.com/letra/n.html", extra="http://www.vepelis.com/letra/n.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="O" , url="http://www.vepelis.com/letra/o.html", extra="http://www.vepelis.com/letra/o.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="P" , url="http://www.vepelis.com/letra/p.html", extra="http://www.vepelis.com/letra/p.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="Q" , url="http://www.vepelis.com/letra/q.html", extra="http://www.vepelis.com/letra/q.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="R" , url="http://www.vepelis.com/letra/r.html", extra="http://www.vepelis.com/letra/r.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="S" , url="http://www.vepelis.com/letra/s.html", extra="http://www.vepelis.com/letra/s.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="T" , url="http://www.vepelis.com/letra/t.html", extra="http://www.vepelis.com/letra/t.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="U" , url="http://www.vepelis.com/letra/u.html", extra="http://www.vepelis.com/letra/u.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="V" , url="http://www.vepelis.com/letra/v.html", extra="http://www.vepelis.com/letra/v.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="W" , url="http://www.vepelis.com/letra/w.html", extra="http://www.vepelis.com/letra/w.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="X" , url="http://www.vepelis.com/letra/x.html", extra="http://www.vepelis.com/letra/x.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="Y" , url="http://www.vepelis.com/letra/y.html", extra="http://www.vepelis.com/letra/y.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="Z" , url="http://www.vepelis.com/letra/z.html", extra="http://www.vepelis.com/letra/z.html"))
return itemlist
def listado2(item):
logger.info("[vepelis.py] listado2")
extra = item.extra
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
patron = '<h2 class="titpeli.*?<a href="([^"]+)" title="([^"]+)".*?peli_img_img">.*?<img src="([^"]+)".*?<strong>Idioma</strong>:.*?/>([^"]+)</div>.*?<strong>Calidad</strong>: ([^"]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
if (DEBUG): scrapertools.printMatches(matches)
for match in matches:
scrapedurl = match[0] #urlparse.urljoin("",match[0])
scrapedtitle = match[1] + ' - ' + match[4]
scrapedtitle = unicode( scrapedtitle, "iso-8859-1" , errors="replace" ).encode("utf-8")
scrapedthumbnail = match[2]
#scrapedplot = match[0]
#itemlist.append( Item(channel=__channel__, action="findvideos", title=scrapedtitle , fulltitle=scrapedtitle, url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=__channel__, action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl , thumbnail=scrapedthumbnail , folder=True) )
#if extra<>"":
# Extrae la marca de siguiente página
#patron = 'page=(.*?)"><span><b>'
patron = '<span><b>(.*?)</b></span>'
matches = re.compile(patron,re.DOTALL).findall(data)
#if DEBUG: scrapertools.printMatches(matches)
for match in matches:
#if len(matches)>0:
nu = int(match[0]) + 1
scrapedurl = extra + "?page=" + str(nu)
scrapedtitle = "!Pagina Siguiente ->"
scrapedthumbnail = ""
scrapedplot = ""
itemlist.append( Item(channel=__channel__, action="listado2", title=scrapedtitle , fulltitle=scrapedtitle, url=scrapedurl , thumbnail=scrapedthumbnail , extra=extra , folder=True) )
return itemlist
def search(item,texto):
logger.info("[vepelis.py] search")
itemlist = []
texto = texto.replace(" ","+")
try:
# Series
item.url="http://www.vepelis.com/buscar/?q=%s"
item.url = item.url % texto
item.extra = ""
itemlist.extend(listado2(item))
itemlist = sorted(itemlist, key=lambda Item: Item.title)
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
'''url = "http://www.peliculasaudiolatino.com/series-anime"
data = scrapertools.cachePage(url)
# Extrae las entradas de todas series
patronvideos = '<li>[^<]+'
patronvideos += '<a.+?href="([\D]+)([\d]+)">[^<]+'
patronvideos += '.*?/>(.*?)</a>'
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
scrapedtitle = match[2].strip()
# Realiza la busqueda
if scrapedtitle.lower()==texto.lower() or texto.lower() in scrapedtitle.lower():
logger.info(scrapedtitle)
scrapedurl = urlparse.urljoin(url,(match[0]+match[1]))
scrapedthumbnail = urlparse.urljoin("http://www.peliculasaudiolatino.com/images/series/",(match[1]+".png"))
scrapedplot = ""
# Añade al listado
itemlist.append( Item(channel=__channel__, action="listacapitulos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist'''
# Verificación automática de canales: Esta función debe devolver "True" si está ok el canal.
def test():
from servers import servertools
# mainlist
mainlist_items = mainlist(Item())
# Da por bueno el canal si alguno de los vídeos de "Novedades" devuelve mirrors
novedades_items = listado2(mainlist_items[0])
bien = False
for novedades_item in novedades_items:
mirrors = servertools.find_video_items( item=novedades_item )
if len(mirrors)>0:
bien = True
break
return bien
|
gpl-3.0
| 6,332,740,761,873,854,000
| 55.691589
| 283
| 0.652599
| false
| 3.148988
| false
| false
| false
|
smn/malva
|
malva/utils.py
|
1
|
1658
|
# -*- test-case-name: malva.tests.test_utils -*-
from twisted.internet.serialport import SerialPort
from twisted.internet.defer import DeferredList, Deferred
from twisted.internet import reactor
from txgsm import txgsm
from serial.tools import list_ports
class ModemProbe(object):
protocol = txgsm.TxGSMProtocol
serial_port_class = SerialPort
def __init__(self, verbose):
self.verbose = verbose
def available_ports(self):
return list_ports.comports()
def probe_ports(self, timeout=2):
dl = [self.probe_port(port, timeout)
for port, _, _ in self.available_ports()]
return DeferredList(dl, consumeErrors=True)
def setup_protocol(self, port):
# separate function for easier stubbing in a test
proto = self.protocol()
proto.verbose = self.verbose
self.serial_port_class(proto, port, reactor)
return proto
def probe_port(self, port, timeout):
def get_results(probe_result):
(_, imsi, _, manufacturer, _) = probe_result
return (port, imsi, manufacturer)
d = self.get_modem(port, timeout)
d.addCallback(lambda modem: modem.probe())
d.addCallback(get_results)
return d
def get_modems(self, timeout=2):
dl = [self.get_modem(port, timeout)
for port, _, _ in self.available_ports()]
return DeferredList(dl, consumeErrors=True)
def get_modem(self, port, timeout):
d = Deferred()
d.addCallback(self.setup_protocol)
reactor.callLater(timeout, d.cancel)
reactor.callLater(0, d.callback, port)
return d
|
bsd-3-clause
| 5,435,535,710,121,342,000
| 28.607143
| 57
| 0.638721
| false
| 3.829099
| false
| false
| false
|
codycollier/netropy
|
netropy/record.py
|
1
|
2678
|
"""record
The record data structure and helper functions.
"""
import collections
import hashlib
# -----------------------------------------------------------------------------
# The main data structure
# -----------------------------------------------------------------------------
record_fields = ['version', 'frequency', 'timeStamp',
'seedValue', 'previousOutputValue',
'signatureValue', 'outputValue', 'statusCode']
record_field_ints = ('frequency', 'timeStamp')
Record = collections.namedtuple('Record', record_fields)
# -----------------------------------------------------------------------------
# Parsing helpers
# -----------------------------------------------------------------------------
def _extract_value(field_name, raw_xml):
"""Extract a value from raw xml
Simplistic string parsing version...
"""
val = raw_xml.split("%s>" % field_name)[1].rstrip('</')
return val
def parse_record_xml(record_xml):
"""Parse record xml and return a dictionary
Simplistic string parsing version...
"""
rec = {}
for field_name in record_fields:
val = _extract_value(field_name, record_xml)
if field_name in record_field_ints:
val = int(val)
rec[field_name] = val
return rec
# -----------------------------------------------------------------------------
# Record validation
# -----------------------------------------------------------------------------
def verify_record(record):
"""Verify a record is internally consistent
signatureValue - This can't be verified as there is no public key
outputValue - This should be a hash of the signatureValue
From the schema file info for outputValue:
The SHA-512 hash of the signatureValue as a 64 byte hex string
reminder:
The outputValue hash is a hash of the signatureValue byte string, not
the signatureValue hex string. See decode('hex').
"""
signature_value = record['signatureValue']
output_value = record['outputValue']
sv_hash = hashlib.sha512(signature_value.decode('hex')).hexdigest().upper()
return sv_hash == output_value
def verify_pair(record1, record2):
"""Verify two records which are chained together
Any given record (except the first) should be chained to the previous
by a matching hash in previousOutputValue.
From the schema file info for outputValue:
The SHA-512 hash value for the previous record - 64 byte hex string
"""
rec1_output_value = record1['outputValue']
rec2_previous_output_value = record2['previousOutputValue']
return rec1_output_value == rec2_previous_output_value
|
mit
| -1,230,551,760,263,086,600
| 28.428571
| 79
| 0.558626
| false
| 4.799283
| false
| false
| false
|
jerpat/csmake
|
csmake-manifest/CsmakeModules/CsversionHLinuxConfigApt.py
|
1
|
3468
|
# <copyright>
# (c) Copyright 2017 Hewlett Packard Enterprise Development LP
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# </copyright>
# <copyright>
# (c) Copyright 2017 Hewlett Packard Enterprise Development LP
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# </copyright>
from Csmake.CsmakeAspect import CsmakeAspect
import urlparse
from datetime import datetime
import sys
class CsversionHLinuxConfigApt(CsmakeAspect):
"""Purpose: To capture the information provided to a HLinuxConfigApt
section.
Options: tag - Provides a context name for the information
for example - cs-mgmt-base-image or cs-mgmt-sources
Joinpoints: end__build - captures the metadata from the section
Creates Environment:
__Csversion__ - A dictionary where product metadata is stored under
'product'. 'product' is product info keyed off of
the type of data stored, in this case 'apt'.
The same metadata/tag combination will be overwritten
if pulled twice.
The structure of the product dictionary is
a dictionary of tags from builds with apt
{ 'product' : { <tag> : { 'apt' : { <apt options>} } } }
"""
REQUIRED_OPTIONS = ['tag']
def end__build(self, phase, options, hlinuxsection, hlinuxoptions):
if '__Csversion__' not in self.env.env:
self.env.env['__Csversion__'] = {}
self.log.debug("__Csversion__ not found creating new")
if 'product' not in self.env.env['__Csversion__']:
self.env.env['__Csversion__']['product'] = {}
self.log.debug("product not found, creating new")
versdict = self.env.env['__Csversion__']['product']
if 'apt' not in versdict:
versdict['apt'] = {}
self.log.debug("build data not found, creating new")
else:
if options['tag'] in versdict['apt']:
self.log.warning("apt, Tag: %s :: Overwriting %s",
options['tag'],
str(versdict['apt'][options['tag']]) )
versdict['apt'][options['tag']] = dict(hlinuxoptions)
self.log.passed()
return True
|
gpl-3.0
| 2,895,465,660,196,402,700
| 45.24
| 80
| 0.647636
| false
| 4.308075
| false
| false
| false
|
cemoody/lda2vec
|
lda2vec/negative_sampling.py
|
1
|
7611
|
import numpy
import six
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class NegativeSamplingFunction(function.Function):
ignore_label = -1
def __init__(self, sampler, sample_size):
self.sampler = sampler
self.sample_size = sample_size
def _make_samples(self, t):
if hasattr(self, 'samples'):
return self.samples # for testing
size = int(t.shape[0])
# first one is the positive, and others are sampled negatives
samples = self.sampler((size, self.sample_size + 1))
samples[:, 0] = t
self.samples = samples
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, t_type, w_type = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim == 2,
t_type.dtype == numpy.int32,
t_type.ndim == 1,
x_type.shape[0] == t_type.shape[0],
w_type.dtype == numpy.float32,
w_type.ndim == 2,
)
def forward_cpu(self, inputs):
x, t, W = inputs
self.ignore_mask = (t != self.ignore_label)
self._make_samples(t)
loss = numpy.float32(0.0)
for i, (ix, k) in enumerate(six.moves.zip(x[self.ignore_mask],
self.samples[self.ignore_mask])):
w = W[k]
f = w.dot(ix)
f[0] *= -1 # positive sample
loss += numpy.sum(numpy.logaddexp(f, 0))
return numpy.array(loss, numpy.float32),
def forward_gpu(self, inputs):
x, t, W = inputs
self.ignore_mask = (t != self.ignore_label)
n_in = x.shape[1]
self._make_samples(t)
self.wx = cuda.elementwise(
'raw T W, raw T x, bool mask, S k, int32 c, int32 m', 'T wx',
'''
T f = 0;
if (mask == 1){
for (int j = 0; j < c; ++j) {
int x_ind[] = {(i / m), j};
int w_ind[] = {k, j};
f += x[x_ind] * W[w_ind];
}
}
wx = f;
''',
'negative_sampling_wx'
)(W, x, self.ignore_mask[:, None], self.samples, n_in,
self.sample_size + 1)
y = cuda.elementwise(
'T wx, int32 c, int32 m', 'T y',
'''
T f = wx;
if (i % m == 0) {
f = -f;
}
T loss;
if (f < 0) {
loss = __logf(1 + __expf(f));
} else {
loss = f + __logf(1 + __expf(-f));
}
y = loss;
''',
'negative_sampling_forward'
)(self.wx, n_in, self.sample_size + 1)
# TODO(okuta): merge elementwise
loss = cuda.cupy.sum(y * self.ignore_mask[:, None].astype('float32'))
return loss,
def backward_cpu(self, inputs, grads):
x, t, W = inputs
gloss, = grads
gx = numpy.zeros_like(x)
gW = numpy.zeros_like(W)
for i, (ix, k) in enumerate(six.moves.zip(x[self.ignore_mask],
self.samples[self.ignore_mask])):
w = W[k]
f = w.dot(ix)
# g == -y * gloss / (1 + exp(yf))
f[0] *= -1
g = gloss / (1 + numpy.exp(-f))
g[0] *= -1
gx[i] = g.dot(w)
for ik, ig in six.moves.zip(k, g):
gW[ik] += ig * ix
return gx, None, gW
def backward_gpu(self, inputs, grads):
cupy = cuda.cupy
x, t, W = inputs
gloss, = grads
n_in = x.shape[1]
g = cuda.elementwise(
'T wx, raw T gloss, int32 m', 'T g',
'''
T y;
if (i % m == 0) {
y = 1;
} else {
y = -1;
}
g = -y * gloss[0] / (1.0f + __expf(wx * y));
''',
'negative_sampling_calculate_g'
)(self.wx, gloss, self.sample_size + 1)
gx = cupy.zeros_like(x)
cuda.elementwise(
'raw T g, raw T W, bool mask, raw S k, int32 c, int32 m', 'T gx',
'''
int d = i / c;
T w = 0;
if (mask == 1){
for (int j = 0; j < m; ++j) {
w += g[d * m + j] * W[k[d * m + j] * c + i % c];
}
}
gx = w;
''',
'negative_sampling_calculate_gx'
)(g, W, self.ignore_mask[:, None], self.samples, n_in,
self.sample_size + 1, gx)
gW = cupy.zeros_like(W)
cuda.elementwise(
'T g, raw T x, S k, bool mask, int32 c, int32 m',
'raw T gW',
'''
T gi = g;
if (mask == 1) {
for (int j = 0; j < c; ++j) {
atomicAdd(&gW[k * c + j], gi * x[(i / m) * c + j]);
}
}
''',
'negative_sampling_calculate_gw'
)(g, x, self.samples, self.ignore_mask[:, None], n_in,
self.sample_size + 1, gW)
return gx, None, gW
def negative_sampling(x, t, W, sampler, sample_size):
"""Negative sampling loss function.
In natural language processing, especially language modeling, the number of
words in a vocabulary can be very large.
Therefore, you need to spend a lot of time calculating the gradient of the
embedding matrix.
By using the negative sampling trick you only need to calculate the
gradient for a few sampled negative examples.
The objective function is below:
.. math::
f(x, p) = \\log \\sigma(x^\\top w_p) + \\
k E_{i \\sim P(i)}[\\log \\sigma(- x^\\top w_i)],
where :math:`\sigma(\cdot)` is a sigmoid function, :math:`w_i` is the
weight vector for the word :math:`i`, and :math:`p` is a positive example.
It is approximeted with :math:`k` examples :math:`N` sampled from
probability :math:`P(i)`, like this:
.. math::
f(x, p) \\approx \\log \\sigma(x^\\top w_p) + \\
\\sum_{n \\in N} \\log \\sigma(-x^\\top w_n).
Each sample of :math:`N` is drawn from the word distribution :math:`P(w)`.
This is calculated as :math:`P(w) = \\frac{1}{Z} c(w)^\\alpha`, where
:math:`c(w)` is the unigram count of the word :math:`w`, :math:`\\alpha` is
a hyper-parameter, and :math:`Z` is the normalization constant.
Args:
x (~chainer.Variable): Batch of input vectors.
t (~chainer.Variable): Vector of groundtruth labels.
W (~chainer.Variable): Weight matrix.
sampler (function): Sampling function. It takes a shape and returns an
integer array of the shape. Each element of this array is a sample
from the word distribution. A :class:`~chainer.utils.WalkerAlias`
object built with the power distribution of word frequency is
recommended.
sample_size (int): Number of samples.
See: `Distributed Representations of Words and Phrases and their\
Compositionality <http://arxiv.org/abs/1310.4546>`_
.. seealso:: :class:`~chainer.links.NegativeSampling`.
"""
return NegativeSamplingFunction(sampler, sample_size)(x, t, W)
# Monkey-patch the chainer code to replace the negative sampling
# with the one used here
import chainer.links as L
import chainer.functions as F
negative_sampling.patched = True
L.NegativeSampling.negative_sampling = negative_sampling
F.negative_sampling = negative_sampling
|
mit
| -4,506,923,810,108,634,600
| 31.665236
| 79
| 0.496518
| false
| 3.51385
| false
| false
| false
|
seraphln/onedrop
|
crawler/api_proxy.py
|
1
|
5434
|
# coding=utf8
#
"""
跟远程API服务器交互的通用逻辑
"""
import os
import json
import socket
import urllib
import datetime
import requests
import traceback
import config
API_HOST = "http://180.76.149.212:8083"
GRAPHQL_HOST = "%s/graphql?query=%%s" % API_HOST
def to_url_params(params):
"""
根据GET参数的请求对URL进行编码
@param params: GET参数
@type params: Dict
:return: urllib.urlencode(params)
"""
if not params:
return ""
new_params = {}
for k,v in params.items():
if isinstance(v, unicode):
new_params[k] = v.encode('utf-8')
elif isinstance(v, str):
new_params[k] = v
else:
raise
return urllib.urlencode(new_params)
def request(method, url, params=None):
"""
封装起来的请求远程服务器的操作
@param method: 请求的HTTP Method
@type method: String
@param url: 请求的PATH
@type url: String
@param params: 请求所带的参数
@type params: Dict
@param ak: 请求所带的access_Key
@type ak: String
:return: api_response
"""
start_time = str(datetime.datetime.now())
headers = {}
headers['X-Auth-Access-Key'] = config.API_ACCESS_KEY
query_dict = {"headers": headers,
"verify": False,
"timeout": 60}
data = None if not params else params
if method == 'GET' and params:
url += '?' + to_url_params(params)
data = None
else:
if data:
query_dict["data"] = data
status_code = 0
try:
resp = requests.request(method, url, **query_dict)
status_code = resp.status_code
resp = resp.json()
except:
resp = {'success':False, 'result':traceback.format_exc()}
resp['status_code'] = status_code
resp['time'] = resp.get('time', {})
resp['time']['api_start'] = start_time
resp['time']['api_end'] = str(datetime.datetime.now())
return resp
def get_crawler_seed():
"""
获取采集的种子
:return: {"data": {"tasks": cseeds[0].get("node")}}
"""
query_str = ''' query fetchCrawlerSeeds{allCrawlerSeeds(source: "pcbaby") {
edges {
node {
id,
name,
source,
url,
status
}
}
}}
'''
data = request("GET", GRAPHQL_HOST % query_str)
cseeds = data.get("data", {}).get("allCrawlerSeeds", {}).get("edges", [{}])
print cseeds
if not cseeds or cseeds[0].get("status") == "finished":
return {}
else:
return {"data": {"seeds": cseeds[0].get("node")}}
def get_crawler_task(source):
"""
从anduin的远程服务请求一个监控任务
@param source: 爬虫任务对应的来源
@type source: String
:return: {"data": {"tasks": ctasks[0].get("node")}}
"""
query_str = ''' query fetchCrawlerTasks{allCrawlerTasks(source: "%s") {
edges {
node {
id,
name,
url,
status,
category,
ttype
}
}
}}
'''
query_str = query_str % source
data = request("GET", GRAPHQL_HOST % query_str)
ctasks = data.get("data", {}).get("allCrawlerTasks", {}).get("edges", [{}])
if not ctasks or ctasks[0].get("status") == "finished":
return {}
else:
return {"data": {"tasks": ctasks[0].get("node")}}
def update_crawler_task_by_rest_api(task_result):
"""
通过post请求将接口数据更新到远程服务器
@param task_result: 爬虫任务采集的结果
@type task_result: Dict
:return: {}
"""
url = "%s/update_crawler_task/" % API_HOST
data = {"task_result": task_result}
return request("POST", url, params=data)
def register_crawler_node(task_result):
""" 将当前爬虫节点注册到服务器上 """
query_str = '''
mutation MTMutation {
cnodes(input: {nodeInfo: "%s"}) {
cnode {
id,
name,
remoteAddr,
status
}
}
}
'''
query_str = query_str % str(task_result)
url = GRAPHQL_HOST % query_str
return request("POST", url)
def update_crawler_task(task_result):
""" 将数据更新到远程服务器上 """
query_str = '''
mutation MTMutation {
ctasks(input: {taskResult: "%s"}) {
ctask {
id,
status
}
}
}
'''
query_str = query_str % str(task_result)
url = GRAPHQL_HOST % query_str
return request("POST", url)
if __name__ == "__main__":
#import json
#import base64
#task_result = {"name": "%s-%s" % (socket.gethostname(), os.getpid())}
#print register_crawler_node(base64.urlsafe_b64encode(json.dumps(task_result)))
print get_crawler_seed()
|
gpl-3.0
| 1,172,086,396,405,874,000
| 23.528571
| 83
| 0.481359
| false
| 3.47738
| false
| false
| false
|
pytn/pytn
|
pytn/proposals/models.py
|
1
|
1067
|
from django.db import models
from symposion.proposals.models import ProposalBase
class Proposal(ProposalBase):
AUDIENCE_LEVEL_NOVICE = 1
AUDIENCE_LEVEL_EXPERIENCED = 2
AUDIENCE_LEVEL_INTERMEDIATE = 3
AUDIENCE_LEVELS = [
(AUDIENCE_LEVEL_NOVICE, "Novice"),
(AUDIENCE_LEVEL_INTERMEDIATE, "Intermediate"),
(AUDIENCE_LEVEL_EXPERIENCED, "Experienced"),
]
audience_level = models.IntegerField(choices=AUDIENCE_LEVELS)
recording_release = models.BooleanField(
default=True,
help_text="By submitting your proposal, you agree to give permission to the conference organizers to record, edit, and release audio and/or video of your presentation. If you do not agree to this, please uncheck this box."
)
def __unicode__(self):
return self.title
class Meta:
abstract = True
class TalkProposal(Proposal):
class Meta:
verbose_name = "talk proposal"
class TutorialProposal(Proposal):
class Meta:
verbose_name = "tutorial proposal"
|
mit
| 4,846,354,523,018,538,000
| 26.358974
| 230
| 0.677601
| false
| 3.67931
| false
| false
| false
|
PhilHarnish/forge
|
src/puzzle/problems/crossword/cryptic_problem.py
|
1
|
10923
|
import collections
from data import chain, crossword, warehouse
from data.alphabets import cryptic_keywords
from puzzle.problems.crossword import _base_crossword_problem
class CrypticProblem(_base_crossword_problem._BaseCrosswordProblem):
def __init__(self, name, lines, **kwargs):
super(CrypticProblem, self).__init__(name, lines, **kwargs)
self._plan = None
self._tokens = None
def _init(self):
if self._plan is None and self._tokens is None:
parsed, plan = _compile(self.lines[0])
self._tokens = chain.Chain(parsed)
self._plan = plan
@staticmethod
def score(lines):
if len(lines) > 1:
return 0
line = lines[0]
parts = line.split()
if any(part in cryptic_keywords.ALL_INDICATORS for part in parts):
return 1
return _base_crossword_problem.score(lines) * .9 # Lower than normal.
def _solve(self):
self._init()
solutions = _Solutions(self._notes, self._min_length, self._max_length)
_visit(self._tokens, self._plan, solutions)
return solutions
def _compile(clue):
words_api = warehouse.get('/api/words')
result = []
indicators_seen = collections.defaultdict(list)
for i, token in enumerate(crossword.tokenize_clue(clue)):
indicator_token = token
base_form = words_api.base_form(token)
if base_form in cryptic_keywords.ALL_INDICATORS:
indicator_token = base_form
if indicator_token in cryptic_keywords.ALL_INDICATORS:
for indicator in cryptic_keywords.ALL_INDICATORS[indicator_token]:
indicators_seen[indicator].append(i)
result.append([token])
plan = sorted(indicators_seen.items(), key=lambda i: _VISIT_ORDER[i[0]])
return result, plan
def _visit(tokens, plan, solutions):
words_api = warehouse.get('/api/words')
# First pass: perform any necessary expansions.
for _, words in tokens.items():
source = words[0]
if source in cryptic_keywords.SHORTHAND_CONVERSIONS:
words.extend(cryptic_keywords.SHORTHAND_CONVERSIONS[source])
words.extend(words_api.expand(source).keys())
for indicator, positions in plan:
try:
_VISIT_MAP[indicator](tokens, positions, solutions)
except NotImplementedError:
print('Indicator for "%s" not implemented' % indicator)
raise NotImplementedError('Indicator for "%s" not implemented' % indicator)
except Exception:
print('Error visiting %s for %s' % (
indicator, ' '.join(words[0] for words in tokens)
))
raise
if not solutions:
# Attempt to find the solution from pieces of the expanded words.
_visit_concatenate(tokens, [], solutions)
if not solutions:
# Finally, attempt to find the solution from just 1 expanded word.
_visit_edge_words(tokens, [], solutions)
def _visit_initial(tokens, positions, solutions):
del solutions # Initial indicator produces more tokens.
for position in positions:
tokens.pop(position)
for _, words in tokens.items():
source = words[0]
words.append(source[0])
for position in reversed(positions):
tokens.restore(position)
def _visit_edge_words(tokens, positions, solutions):
del positions
top_words = warehouse.get('/words/unigram')
for edge in (tokens[0], tokens[-1]):
for token in edge[1:]: # Skip first word.
if token in top_words:
solutions.add(token, .33, 'synonym for edge word "%s"', [[edge[0]]])
def _visit_word_edges(tokens, positions, solutions):
del solutions # Edge indicator produces more tokens.
for position in positions:
tokens.pop(position)
for _, words in tokens.items():
source = words[0]
words.append(source[0] + source[-1])
for position in reversed(positions):
tokens.restore(position)
def _visit_reversal(tokens, positions, solutions):
del solutions # Initial indicator produces more tokens.
for position in positions:
tokens.pop(position)
for _, words in tokens.items():
source = words[0]
words.append(''.join(reversed(source)))
for position in reversed(positions):
tokens.restore(position)
def _visit_embedded(tokens, positions, solutions):
min_length = solutions.min_length
max_length = solutions.max_length
acc = []
pos_map = []
start_map = []
for pos, expanded in tokens.items():
source = expanded[0]
acc.append(source)
for i in range(len(source)):
pos_map.append(pos)
start_map.append(i == 0)
search_text = ''.join(acc)
trie = warehouse.get('/words/unigram/trie')
interesting_threshold = trie.interesting_threshold()
end = len(search_text) - min_length
ignored = set(acc) # Ignore words from clue itself.
for offset in range(end + 1): # End should be inclusive.
for result, weight in trie.walk(search_text[offset:]):
if result in ignored:
continue
result_length = len(result)
if result_length >= min_length and result_length <= max_length:
base_weight = min(1, weight / interesting_threshold)
# Demote scores for start-of-word.
if start_map[offset]:
base_weight *= .9
# Score = % of word not banned by `positions`.
score = base_weight * (
sum(pos_map[i] not in positions for i in
range(offset, offset + result_length))
) / result_length
start_pos = pos_map[offset]
end_pos = pos_map[offset + result_length - 1] + 1
embedded_slice = tokens[start_pos:end_pos]
solutions.add(result, score, 'embedded in %s', embedded_slice)
def _visit_anagram(tokens, positions, solutions):
end = len(tokens)
min_length = solutions.min_length
max_length = solutions.max_length
anagram_positions = set(positions)
anagram_index = warehouse.get('/words/unigram/anagram_index')
trie = warehouse.get('/words/unigram/trie')
interesting_threshold = trie.interesting_threshold()
banned_max = len(anagram_positions)
def _add(acc, banned_max):
parts = []
banned_matches = 0
for word, pos in acc:
parts.append(word)
if pos in anagram_positions:
banned_matches += 1
elif word in cryptic_keywords.CONCATENATE_INDICATORS:
# Special case for concatenate keywords which frequently join two
# chunks of an anagram.
banned_matches += 1
banned_max += 1
solution = ''.join(parts)
if solution not in anagram_index:
return
anagrams = anagram_index[solution]
# Score is 0 if all acc are from possitions; .5 if 1/2 are, etc.
if not anagram_positions:
score = 1
else:
score = 1 - (banned_matches / banned_max)
for anagram in anagrams:
if anagram != solution:
base_weight = min(1, trie[anagram] / interesting_threshold)
solutions.add(anagram, base_weight * score, 'anagram of %s', acc)
def _crawl(pos, acc, acc_length):
# Try to form total word from all remaining words.
for i in range(pos, end):
words = tokens[i]
for word in words:
word_length = len(word)
new_length = acc_length + word_length
if new_length > max_length:
continue
acc_length = new_length
acc.append((word, i))
if min_length <= new_length <= max_length:
_add(acc, banned_max)
elif new_length < max_length:
_crawl(i + 1, acc, acc_length)
acc_length -= word_length
acc.pop()
_crawl(0, [], 0)
def _visit_concatenate(tokens, positions, solutions):
end = len(tokens)
min_length = solutions.min_length
max_length = solutions.max_length
concatenate_positions = set(positions)
trie = warehouse.get('/words/unigram/trie')
interesting_threshold = trie.interesting_threshold()
def _add(acc):
if len(acc) == 1:
return # Ignore complete words in input.
parts = []
banned_matches = 0
for word, pos in acc:
parts.append(word)
if pos in concatenate_positions:
banned_matches += 1
solution = ''.join(parts)
if solution not in trie:
return
# Score is 0 if all acc are from possitions; .5 if 1/2 are, etc.
if not concatenate_positions:
score = 1
else:
score = 1 - (banned_matches / len(concatenate_positions))
base_weight = min(1, trie[solution] / interesting_threshold)
solutions.add(solution, base_weight * score, 'concatenation of %s', acc)
def _crawl(pos, acc, acc_length):
if pos in concatenate_positions and pos + 1 < end:
# Optionally, skip ahead to next position using current acc.
_crawl(pos + 1, acc, acc_length)
# Try to form total word from all remaining starting points.
for i in range(pos, end):
words = tokens[i]
for word in words:
word_length = len(word)
new_length = acc_length + word_length
if new_length > max_length:
continue
acc_length = new_length
acc.append((word, i))
if new_length >= min_length and new_length <= max_length:
_add(acc)
elif new_length < max_length and trie.has_keys_with_prefix(
''.join(a[0] for a in acc)):
_crawl(i + 1, acc, acc_length)
acc_length -= word_length
acc.pop()
_crawl(0, [], 0)
def _visit_homophone(tokens, positions, solutions):
del tokens, positions
if not solutions:
raise NotImplementedError('Homophones not implemented')
def _visit_insert(tokens, positions, solutions):
if not solutions:
# "INSERT" indicator is usually a subset of functionality provided by
# "ANAGRAM".
_visit_anagram(tokens, positions, solutions)
if not solutions:
raise NotImplementedError()
class _Solutions(dict):
def __init__(self, notes, min_length, max_length):
super(_Solutions, self).__init__()
self._notes = notes
self.min_length = min_length
self.max_length = max_length
def add(self, solution, weight, note, ingredients):
if solution not in self or weight > self[solution]:
self[solution] = weight
self._notes[solution].clear()
if note:
self._notes[solution].append(
note % ', '.join(words[0] for words in ingredients))
_VISIT_MAP = collections.OrderedDict([
# Embedded clues only use original words.
(cryptic_keywords.EMBEDDED_INDICATORS, _visit_embedded),
# Producers.
(cryptic_keywords.INITIAL_INDICATORS, _visit_initial),
(cryptic_keywords.EDGES_INDICATORS, _visit_word_edges),
(cryptic_keywords.REVERSAL_INDICATORS, _visit_reversal),
# Reducers.
(cryptic_keywords.ANAGRAM_INDICATORS, _visit_anagram),
(cryptic_keywords.CONCATENATE_INDICATORS, _visit_concatenate),
# TODO: Incomplete implementation. Redundant with anagram indicator.
(cryptic_keywords.INSERT_INDICATORS, _visit_insert),
# TODO: Incomplete implementation. This should be up with "producers".
(cryptic_keywords.HOMOPHONE_INDICATORS, _visit_homophone),
])
_VISIT_ORDER = dict([(indicator, i) for i, indicator in enumerate(_VISIT_MAP)])
|
mit
| -3,694,395,314,970,705,000
| 33.457413
| 81
| 0.657786
| false
| 3.596641
| false
| false
| false
|
mpetyx/energagement
|
energagement/myapp/migrations/0014_auto_20150823_1721.py
|
1
|
4241
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('myapp', '0013_auto_20150731_0044'),
]
operations = [
migrations.RemoveField(
model_name='building',
name='ape_kwh',
),
migrations.RemoveField(
model_name='building',
name='co2_lt_m2',
),
migrations.RemoveField(
model_name='building',
name='co2_tn_m2',
),
migrations.RemoveField(
model_name='building',
name='cosf',
),
migrations.RemoveField(
model_name='building',
name='euro_forecast',
),
migrations.RemoveField(
model_name='building',
name='euro_m2_electricity',
),
migrations.RemoveField(
model_name='building',
name='euro_m2_liquidfuel',
),
migrations.RemoveField(
model_name='building',
name='euro_m2_monthly',
),
migrations.RemoveField(
model_name='building',
name='kwh_m2',
),
migrations.RemoveField(
model_name='building',
name='kwh_m2_cooling',
),
migrations.RemoveField(
model_name='building',
name='kwh_m2_heating',
),
migrations.RemoveField(
model_name='building',
name='kwh_m2_lighting',
),
migrations.RemoveField(
model_name='building',
name='kwh_m2_usagehours',
),
migrations.RemoveField(
model_name='building',
name='kwh_m2_user',
),
migrations.RemoveField(
model_name='building',
name='lt_m2',
),
migrations.RemoveField(
model_name='electricvehicle',
name='co2_tn_user',
),
migrations.RemoveField(
model_name='electricvehicle',
name='euro_forecast',
),
migrations.RemoveField(
model_name='electricvehicle',
name='euro_m2_monthly',
),
migrations.RemoveField(
model_name='electricvehicle',
name='euro_user',
),
migrations.RemoveField(
model_name='electricvehicle',
name='kwh_user',
),
migrations.RemoveField(
model_name='streetlighting',
name='ape_kwh',
),
migrations.RemoveField(
model_name='streetlighting',
name='co2_lt_m2',
),
migrations.RemoveField(
model_name='streetlighting',
name='co2_tn_km',
),
migrations.RemoveField(
model_name='streetlighting',
name='cosf',
),
migrations.RemoveField(
model_name='streetlighting',
name='euro_forecast',
),
migrations.RemoveField(
model_name='streetlighting',
name='euro_line',
),
migrations.RemoveField(
model_name='streetlighting',
name='euro_monthly',
),
migrations.RemoveField(
model_name='streetlighting',
name='kwh_km',
),
migrations.RemoveField(
model_name='streetlighting',
name='kwh_light',
),
migrations.RemoveField(
model_name='streetlighting',
name='kwh_line',
),
migrations.RemoveField(
model_name='streetlighting',
name='operatinglights_percentage',
),
migrations.AddField(
model_name='building',
name='co2_lt',
field=models.ManyToManyField(related_name='co2_lt_b', blank=True, null=True, to='myapp.Value'),
preserve_default=True,
),
migrations.AddField(
model_name='building',
name='lt',
field=models.ManyToManyField(related_name='lt', blank=True, null=True, to='myapp.Value'),
preserve_default=True,
),
]
|
mit
| -554,861,424,052,546,050
| 27.273333
| 107
| 0.500354
| false
| 4.403946
| false
| false
| false
|
davidraleigh/cxxtest
|
python/cxxtest/cxxtest_misc.py
|
1
|
2691
|
#!/usr/bin/python
#-------------------------------------------------------------------------
# CxxTest: A lightweight C++ unit testing library.
# Copyright (c) 2008 Sandia Corporation.
# This software is distributed under the LGPL License v2.1
# For more information, see the COPYING file in the top CxxTest directory.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#-------------------------------------------------------------------------
import sys
import os
def abort( problem ):
'''Print error message and exit'''
sys.stderr.write( '\n' )
sys.stderr.write( problem )
sys.stderr.write( '\n\n' )
sys.exit(2)
def resolve_symlinks(orig_path):
drive,tmp = os.path.splitdrive(os.path.normpath(orig_path))
if not drive:
drive = os.path.sep
parts = tmp.split(os.path.sep)
actual_path = [drive]
while parts:
actual_path.append(parts.pop(0))
if not os.path.islink(os.path.join(*actual_path)):
continue
actual_path[-1] = os.readlink(os.path.join(*actual_path))
tmp_drive, tmp_path = os.path.splitdrive(
dereference_path(os.path.join(*actual_path)) )
if tmp_drive:
drive = tmp_drive
actual_path = [drive] + tmp_path.split(os.path.sep)
return os.path.join(*actual_path)
def relpath(path, start=None):
"""Return a relative version of a path.
(provides compatibility with Python < 2.6)"""
# Some notes on implementation:
# - We rely on resolve_symlinks to correctly resolve any symbolic
# links that may be present in the paths
# - The explicit handling od the drive name is critical for proper
# function on Windows (because os.path.join('c:','foo') yields
# "c:foo"!).
if not start:
start = os.getcwd()
ref_drive, ref_path = os.path.splitdrive(
resolve_symlinks(os.path.abspath(start)) )
if not ref_drive:
ref_drive = os.path.sep
start = [ref_drive] + ref_path.split(os.path.sep)
while '' in start:
start.remove('')
pth_drive, pth_path = os.path.splitdrive(
resolve_symlinks(os.path.abspath(path)) )
if not pth_drive:
pth_drive = os.path.sep
path = [pth_drive] + pth_path.split(os.path.sep)
while '' in path:
path.remove('')
i = 0
max = min(len(path), len(start))
while i < max and path[i] == start[i]:
i += 1
if i < 2:
return os.path.join(*path)
else:
rel = ['..']*(len(start)-i) + path[i:]
if rel:
return os.path.join(*rel)
else:
return '.'
|
lgpl-2.1
| 1,456,833,161,039,569,400
| 33.5
| 74
| 0.578595
| false
| 3.597594
| false
| false
| false
|
liviu-/ding
|
ding/ding.py
|
1
|
4914
|
#!/usr/bin/env python
"""Simple CLI beep tool"""
from __future__ import unicode_literals
from __future__ import print_function
import re
import os
import sys
import time
import datetime
import argparse
VERSION = '2.1.0'
N_BEEPS = 4
WAIT_BEEPS = 0.15
def relative_time(arg):
"""Validate user provided relative time"""
if not re.match('\d+[smh]( +\d+[smh])*', arg):
raise argparse.ArgumentTypeError("Invalid time format: {}".format(arg))
return arg
def absolute_time(arg):
"""Validate user provided absolute time"""
if not all([t.isdigit() for t in arg.split(':')]):
raise argparse.ArgumentTypeError("Invalid time format: {}".format(arg))
# Valid time (e.g. hour must be between 0..23)
try:
datetime.time(*map(int, arg.split(':')))
except ValueError as e:
raise argparse.ArgumentTypeError("Invalid time format: {}".format(e))
return arg
def get_args(args):
"""Parse commandline arguments"""
parent_parser = argparse.ArgumentParser(
add_help=False, description='Lightweight time management CLI tool')
parent_parser.add_argument(
'-n', '--no-timer', action='store_true', help='Hide the countdown timer')
parent_parser.add_argument(
'-c', '--command', type=str, help='Use a custom command instead of the default beep')
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--version', action='version', version=VERSION)
subparsers = parser.add_subparsers(dest='mode')
subparsers.required = True
parser_in = subparsers.add_parser('in', parents=[parent_parser])
parser_in.add_argument('time', nargs='+', type=relative_time,
help='relative time \d+[smh]( +\d+[smh])* (e.g. 1h 30m)')
parser_every = subparsers.add_parser('every', parents=[parent_parser])
parser_every.add_argument('time', nargs='+', type=relative_time,
help='relative time \d+[smh]( +\d+[smh])* (e.g. 2m 15s)')
parser_at = subparsers.add_parser('at', parents=[parent_parser])
parser_at.add_argument('time', type=absolute_time, help='absolute time [hh:[mm[:ss]]]')
return parser.parse_args(args)
class TimeParser():
"""Class helping with parsing user provided time into seconds"""
time_map = {
's': 1,
'm': 60,
'h': 60 * 60,
}
def __init__(self, time, relative):
self.time = time
self.relative = relative
def get_seconds(self):
return self._get_seconds_relative() if self.relative else self._get_seconds_absolute()
def _get_seconds_relative(self):
return sum([self.time_map[t[-1]] * int(t[:-1]) for t in self.time])
def _get_seconds_absolute(self):
now = datetime.datetime.now()
user_time = (datetime.datetime.combine(datetime.date.today(),
datetime.time(*map(int, self.time.split(':')))))
return ((user_time - now).seconds if user_time > now
else (user_time + datetime.timedelta(days=1) - now).seconds)
def countdown(seconds, notimer=False):
"""Countdown for `seconds`, printing values unless `notimer`"""
if not notimer:
os.system('cls' if os.name == 'nt' else 'clear') # initial clear
while seconds > 0:
start = time.time()
# print the time without a newline or carriage return
# this leaves the cursor at the end of the time while visible
if not notimer:
print(datetime.timedelta(seconds=seconds), end='')
sys.stdout.flush()
seconds -= 1
time.sleep(1 - time.time() + start)
# emit a carriage return
# this moves the cursor back to the beginning of the line
# so the next time overwrites the current time
if not notimer:
print(end='\r')
def beep(seconds, command):
"""Make the beep noise"""
for _ in range(N_BEEPS):
if command:
os.system(command)
else:
sys.stdout.write('\a')
sys.stdout.flush()
time.sleep(WAIT_BEEPS)
def parse_time(args):
"""Figure out the number of seconds to wait"""
relative = args.mode == 'in' or args.mode == "every"
parser = TimeParser(args.time, relative)
return parser.get_seconds()
def main(args=sys.argv[1:]):
args = get_args(args)
while True:
try:
seconds = parse_time(args)
countdown(seconds, args.no_timer)
beep(seconds, args.command)
# doing `if` here so there just can't be any stack printed for an interrupt
if args.mode != "every":
break
except KeyboardInterrupt:
print() # ending current line
break # without printing useless stack...
if __name__ == '__main__':
main()
|
mit
| -4,648,106,790,088,834,000
| 32.202703
| 98
| 0.59361
| false
| 3.903098
| false
| false
| false
|
keithadavidson/ansible-mezzanine
|
deploy/scripts/set_mezzanine_settings.py
|
1
|
1811
|
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2015 Keith Davidson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# A script to set the site domain
# Assumes two environment variables
#
# PROJECT_DIR: the project directory (e.g., ~/projname)
import os
import sys
# Add the project directory to system path
project_dir = os.path.expanduser(os.environ['PROJECT_DIR'])
sys.path.append(project_dir)
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from mezzanine.conf import settings
settings.TWITTER_ACCESS_TOKEN_KEY = os.environ['TWITTER_ACCESS_TOKEN_KEY']
settings.TWITTER_ACCESS_TOKEN_SECRET = os.environ['TWITTER_ACCESS_TOKEN_SECRET']
settings.TWITTER_CONSUMER_KEY = os.environ['TWITTER_CONSUMER_KEY']
settings.TWITTER_CONSUMER_SECRET = os.environ['TWITTER_CONSUMER_SECRET']
|
mit
| -3,355,165,800,266,858,500
| 39.244444
| 80
| 0.773606
| false
| 3.788703
| false
| false
| false
|
Crompulence/cpl-library
|
utils/design_topology/CFD.py
|
1
|
4796
|
import numpy as np
import matplotlib.pyplot as plt
from mpi4py import MPI
from cplpy import CPL
from draw_grid import draw_grid
class CFD():
def __init__(self, npxyz, xyzL, xyz_orig, ncxyz):
#initialise MPI and CPL
self.comm = MPI.COMM_WORLD
self.CPL = CPL()
self.CFD_COMM = self.CPL.init(CPL.CFD_REALM)
self.nprocs_realm = self.CFD_COMM.Get_size()
# Parameters of the cpu topology (cartesian grid)
self.npxyz = np.array(npxyz, order='F', dtype=np.int32)
self.NProcs = np.product(npxyz)
self.xyzL = np.array(xyzL, order='F', dtype=np.float64)
self.xyz_orig = np.array(xyz_orig, order='F', dtype=np.float64)
self.ncxyz = np.array(ncxyz, order='F', dtype=np.int32)
if (self.nprocs_realm != self.NProcs):
print("Non-coherent number of processes in CFD ", self.nprocs_realm,
" no equal to ", self.npxyz[0], " X ", self.npxyz[1], " X ", self.npxyz[2])
MPI.Abort(errorcode=1)
#Setup coupled simulation
self.cart_comm = self.CFD_COMM.Create_cart([self.npxyz[0], self.npxyz[1], self.npxyz[2]])
self.CPL.setup_cfd(self.cart_comm, self.xyzL, self.xyz_orig, self.ncxyz)
#Get limits of overlap region
self.olap_limits = self.CPL.get_olap_limits()
self.portion = self.CPL.my_proc_portion(self.olap_limits)
[self.ncxl, self.ncyl, self.nczl] = self.CPL.get_no_cells(self.portion)
self.dx = self.CPL.get("xl_cfd")/float(self.CPL.get("ncx"))
self.dy = self.CPL.get("yl_cfd")/float(self.CPL.get("ncy"))
self.dz = self.CPL.get("zl_cfd")/float(self.CPL.get("ncz"))
self.ioverlap = (self.CPL.get("icmax_olap")-self.CPL.get("icmin_olap")+1)
self.joverlap = (self.CPL.get("jcmax_olap")-self.CPL.get("jcmin_olap")+1)
self.koverlap = (self.CPL.get("kcmax_olap")-self.CPL.get("kcmin_olap")+1)
self.xoverlap = self.ioverlap*self.dx
self.yoverlap = self.joverlap*self.dy
self.zoverlap = self.koverlap*self.dz
def recv_CPL_data(self):
# recv data to plot
self.recv_array = np.zeros((1, self.ncxl, self.ncyl, self.nczl), order='F', dtype=np.float64)
self.recv_array, ierr = self.CPL.recv(self.recv_array, self.olap_limits)
def plot_grid(self, ax):
#Plot CFD and coupler Grid
draw_grid(ax,
nx=self.CPL.get("ncx"),
ny=self.CPL.get("ncy"),
nz=self.CPL.get("ncz"),
px=self.CPL.get("npx_cfd"),
py=self.CPL.get("npy_cfd"),
pz=self.CPL.get("npz_cfd"),
xmin=self.CPL.get("x_orig_cfd"),
ymin=self.CPL.get("y_orig_cfd"),
zmin=self.CPL.get("z_orig_cfd"),
xmax=(self.CPL.get("icmax_olap")+1)*self.dx,
ymax=self.CPL.get("yl_cfd"),
zmax=(self.CPL.get("kcmax_olap")+1)*self.dz,
lc = 'r',
label='CFD')
#Plot MD domain
draw_grid(ax, nx=1, ny=1, nz=1,
px=self.CPL.get("npx_md"),
py=self.CPL.get("npy_md"),
pz=self.CPL.get("npz_md"),
xmin=self.CPL.get("x_orig_md"),
ymin=-self.CPL.get("yl_md")+self.yoverlap,
zmin=self.CPL.get("z_orig_md"),
xmax=(self.CPL.get("icmax_olap")+1)*self.dx,
ymax=self.yoverlap,
zmax=(self.CPL.get("kcmax_olap")+1)*self.dz,
label='MD')
def plot_data(self, ax):
# === Plot both grids ===
#Plot x component on grid
x = np.linspace(self.CPL.get("x_orig_cfd")+.5*self.dx,
self.xoverlap-.5*self.dx,self.ioverlap)
z = np.linspace(self.CPL.get("z_orig_cfd")+.5*self.dz,
self.zoverlap-.5*self.dz,self.koverlap)
try:
for j in range(self.joverlap):
ax.plot(x, 0.5*self.dy*(self.recv_array[0,:,j,0]+1.+2*j), 's-')
except ValueError:
print("Arrays not equal:", x.shape, z.shape, self.recv_array.shape)
def finalise(self):
self.CPL.finalize()
MPI.Finalize()
if __name__ == '__main__':
#Get input file
import inpututils
ip = inpututils.InputMod("./CFD.in")
npxyz = ip.read_input("npxyz")
xyzL = ip.read_input("xyzL")
xyz_orig = ip.read_input("xyz_orig")
ncxyz = ip.read_input("ncxyz")
cfd = CFD(npxyz=npxyz,
xyzL = xyzL,
xyz_orig = xyz_orig,
ncxyz = ncxyz)
cfd.recv_CPL_data()
fig, ax = plt.subplots(1,1)
cfd.plot_grid(ax)
cfd.plot_data(ax)
plt.show()
cfd.finalise()
|
gpl-3.0
| -1,381,466,288,594,465,300
| 34.791045
| 101
| 0.541076
| false
| 2.910194
| false
| false
| false
|
caterinaurban/Typpete
|
typpete/unittests/inference/generic_test.py
|
1
|
1346
|
# type_params {'generic_tolist': ['GTL'], 'flatten': ['FL'], 'flatten_dict': ['DK','DV']}
def generic_tolist(a):
return [a]
u = generic_tolist(1.2)
u[0] = 2.4
v = generic_tolist(True)
v2 = v[v[0]]
def flatten(lists):
"""
Flattens a list of lists into a flat list
"""
return [item for sublist in lists for item in sublist]
def flatten_dict(dicts,
defaults):
"""
Flattens a dict of lists, i.e., concatenates all lists for the same keys.
"""
result = {}
for key in defaults:
result[key] = []
for d in dicts:
for key, value in d.items():
if key in result:
result[key].extend(value)
else:
result[key] = value
return result
a = flatten([[1,2], [1,2], [True, False]])
a2 = flatten([["hi"], ['yo', 'sup']])
a4 = a[a[0]]
b = [{1:[2]}, {True: [True]}, {5: [1.2, 2]}]
c = b[0][1]
d = flatten_dict(b, [True, 1])
e = flatten_dict([{1.2: ['hi']}], [3, 5])
class A:
def bar(self):
return 1
class B(A):
pass
ff = flatten_dict([{'hi': [A()]}, {'sup': [A()], 'hey': [B(), A()]}], ['asd', 'erer'])
ff['hi'][0].bar()
# flatten := Callable[[List[List[FL]]], List[FL]]
# flatten_dict := Callable[[List[Dict[DV, List[DK]]], List[DV]], Dict[DV, List[DK]]]
# generic_tolist := Callable[[GTL], List[GTL]]
|
mpl-2.0
| 5,171,939,318,511,768,000
| 21.830508
| 89
| 0.520802
| false
| 2.888412
| false
| false
| false
|
hedvig/project-config
|
zuul/openstack_functions.py
|
1
|
2255
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
def set_log_url(item, job, params):
if hasattr(item.change, 'refspec'):
path = "%s/%s/%s/%s/" % (
params['ZUUL_CHANGE'][-2:], params['ZUUL_CHANGE'],
params['ZUUL_PATCHSET'], params['ZUUL_PIPELINE'])
elif hasattr(item.change, 'ref'):
path = "%s/%s/%s/" % (
params['ZUUL_NEWREV'][:2], params['ZUUL_NEWREV'],
params['ZUUL_PIPELINE'])
else:
path = params['ZUUL_PIPELINE'] + '/'
params['BASE_LOG_PATH'] = path
params['LOG_PATH'] = path + '%s/%s/' % (job.name,
params['ZUUL_UUID'][:7])
def reusable_node(item, job, params):
if 'OFFLINE_NODE_WHEN_COMPLETE' in params:
del params['OFFLINE_NODE_WHEN_COMPLETE']
def set_node_options(item, job, params):
# Force tox to pass through ZUUL_ variables
zuul_params = [x for x in params.keys() if x.startswith('ZUUL_')]
params['TOX_TESTENV_PASSENV'] = ' '.join(zuul_params)
# Set up log url parameter for all jobs
set_log_url(item, job, params)
# Default to single use node. Potentially overriden below.
# Select node to run job on.
params['OFFLINE_NODE_WHEN_COMPLETE'] = '1'
proposal_re = r'^.*(merge-release-tags|(propose|upstream)-(.*?)-(constraints-.*|updates?|update-liberty))$' # noqa
release_re = r'^.*-(forge|jenkinsci|mavencentral|pypi-(both|wheel)|npm)-upload$'
hook_re = r'^hook-(.*?)-(rtfd)$'
# jobs run on the persistent proposal and release workers
if (re.match(proposal_re, job.name) or re.match(release_re, job.name) or
re.match(hook_re, job.name)):
reusable_node(item, job, params)
|
apache-2.0
| -4,446,275,520,323,971,000
| 40.759259
| 119
| 0.636807
| false
| 3.350669
| false
| false
| false
|
linearregression/socorro
|
socorro/external/es/supersearch.py
|
1
|
19711
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import datetime
import re
from elasticsearch_dsl import Search, A, F, Q
from elasticsearch.exceptions import NotFoundError
from socorro.external import (
BadArgumentError,
)
from socorro.external.es.super_search_fields import SuperSearchFields
from socorro.lib import datetimeutil
from socorro.lib.search_common import SearchBase
BAD_INDEX_REGEX = re.compile('\[\[(.*)\] missing\]')
class SuperSearch(SearchBase):
def __init__(self, *args, **kwargs):
self.config = kwargs.get('config')
self.es_context = self.config.elasticsearch.elasticsearch_class(
self.config.elasticsearch
)
self.all_fields = SuperSearchFields(config=self.config).get_fields()
# Create a map to associate a field's name in the database to its
# exposed name (in the results and facets).
self.database_name_to_field_name_map = dict(
(x['in_database_name'], x['name'])
for x in self.all_fields.values()
)
kwargs.update(fields=self.all_fields)
super(SuperSearch, self).__init__(
*args, **kwargs
)
def get_connection(self):
with self.es_context() as conn:
return conn
def generate_list_of_indices(self, from_date, to_date, es_index=None):
"""Return the list of indices to query to access all the crash reports
that were processed between from_date and to_date.
The naming pattern for indices in elasticsearch is configurable, it is
possible to have an index per day, per week, per month...
Parameters:
* from_date datetime object
* to_date datetime object
"""
if es_index is None:
es_index = self.config.elasticsearch_index
indices = []
current_date = from_date
while current_date <= to_date:
index = current_date.strftime(es_index)
# Make sure no index is twice in the list
# (for weekly or monthly indices for example)
if index not in indices:
indices.append(index)
current_date += datetime.timedelta(days=1)
return indices
def get_indices(self, dates):
"""Return the list of indices to use for given dates. """
start_date = None
end_date = None
for date in dates:
if '>' in date.operator:
start_date = date.value
if '<' in date.operator:
end_date = date.value
return self.generate_list_of_indices(start_date, end_date)
def format_field_names(self, hit):
"""Return a hit with each field's database name replaced by its
exposed name. """
new_hit = {}
for field in hit:
new_field = field
if '.' in new_field:
# Remove the prefix ("processed_crash." or "raw_crash.").
new_field = new_field.split('.')[-1]
new_field = self.database_name_to_field_name_map.get(
new_field, new_field
)
new_hit[new_field] = hit[field]
return new_hit
def format_fields(self, hit):
"""Return a well formatted document.
Elasticsearch returns values as lists when using the `fields` option.
This function removes the list when it contains zero or one element.
It also calls `format_field_names` to correct all the field names.
"""
hit = self.format_field_names(hit)
for field in hit:
if isinstance(hit[field], (list, tuple)):
if len(hit[field]) == 0:
hit[field] = None
elif len(hit[field]) == 1:
hit[field] = hit[field][0]
return hit
def get_field_name(self, value, full=True):
try:
field_ = self.all_fields[value]
except KeyError:
raise BadArgumentError(
value,
msg='Unknown field "%s"' % value
)
if not field_['is_returned']:
# Returning this field is not allowed.
raise BadArgumentError(
value,
msg='Field "%s" is not allowed to be returned' % value
)
field_name = '%s.%s' % (
field_['namespace'],
field_['in_database_name']
)
if full and field_['has_full_version']:
# If the param has a full version, that means what matters
# is the full string, and not its individual terms.
field_name += '.full'
return field_name
def format_aggregations(self, aggregations):
"""Return aggregations in a form that looks like facets.
We used to expose the Elasticsearch facets directly. This is thus
needed for backwards compatibility.
"""
aggs = aggregations.to_dict()
for agg in aggs:
for i, bucket in enumerate(aggs[agg]['buckets']):
sub_aggs = {}
for key in bucket:
# Go through all sub aggregations. Those are contained in
# all the keys that are not 'key' or 'count'.
if key in ('key', 'key_as_string', 'doc_count'):
continue
sub_aggs[key] = [
{
# For date data, Elasticsearch exposes a timestamp
# in 'key' and a human-friendly string in
# 'key_as_string'. We thus check if the later
# exists to expose it, and return the normal
# 'key' if not.
'term': x.get('key_as_string', x['key']),
'count': x['doc_count'],
}
for x in bucket[key]['buckets']
]
aggs[agg]['buckets'][i] = {
'term': bucket.get('key_as_string', bucket['key']),
'count': bucket['doc_count'],
}
if sub_aggs:
aggs[agg]['buckets'][i]['facets'] = sub_aggs
aggs[agg] = aggs[agg]['buckets']
return aggs
def get(self, **kwargs):
"""Return a list of results and aggregations based on parameters.
The list of accepted parameters (with types and default values) is in
the database and can be accessed with the super_search_fields service.
"""
# Filter parameters and raise potential errors.
params = self.get_parameters(**kwargs)
# Find the indices to use to optimize the elasticsearch query.
indices = self.get_indices(params['date'])
# Create and configure the search object.
search = Search(
using=self.get_connection(),
index=indices,
doc_type=self.config.elasticsearch.elasticsearch_doctype,
)
# Create filters.
filters = None
for field, sub_params in params.items():
sub_filters = None
for param in sub_params:
if param.name.startswith('_'):
# By default, all param values are turned into lists,
# even when they have and can have only one value.
# For those we know there can only be one value,
# so we just extract it from the made-up list.
if param.name == '_results_offset':
results_from = param.value[0]
elif param.name == '_results_number':
results_number = param.value[0]
elif param.name == '_facets_size':
facets_size = param.value[0]
elif param.name == '_histogram_interval.date':
histogram_interval_date = param.value[0]
# Don't use meta parameters in the query.
continue
field_data = self.all_fields[param.name]
name = '%s.%s' % (
field_data['namespace'],
field_data['in_database_name']
)
if param.data_type in ('date', 'datetime'):
param.value = datetimeutil.date_to_string(param.value)
elif param.data_type == 'enum':
param.value = [x.lower() for x in param.value]
elif param.data_type == 'str' and not param.operator:
param.value = [x.lower() for x in param.value]
args = {}
filter_type = 'term'
filter_value = None
if not param.operator:
# contains one of the terms
if len(param.value) == 1:
val = param.value[0]
if not isinstance(val, basestring) or (
isinstance(val, basestring) and ' ' not in val
):
filter_value = val
# If the term contains white spaces, we want to perform
# a phrase query. Thus we do nothing here and let this
# value be handled later.
else:
filter_type = 'terms'
filter_value = param.value
elif param.operator == '=':
# is exactly
if field_data['has_full_version']:
name = '%s.full' % name
filter_value = param.value
elif param.operator == '>':
# greater than
filter_type = 'range'
filter_value = {
'gt': param.value
}
elif param.operator == '<':
# lower than
filter_type = 'range'
filter_value = {
'lt': param.value
}
elif param.operator == '>=':
# greater than or equal to
filter_type = 'range'
filter_value = {
'gte': param.value
}
elif param.operator == '<=':
# lower than or equal to
filter_type = 'range'
filter_value = {
'lte': param.value
}
elif param.operator == '__null__':
# is null
filter_type = 'missing'
args['field'] = name
if filter_value is not None:
args[name] = filter_value
if args:
if param.operator_not:
new_filter = ~F(filter_type, **args)
else:
new_filter = F(filter_type, **args)
if sub_filters is None:
sub_filters = new_filter
elif param.data_type == 'enum':
sub_filters |= new_filter
else:
sub_filters &= new_filter
continue
# These use a wildcard and thus need to be in a query
# instead of a filter.
operator_wildcards = {
'~': '*%s*', # contains
'$': '%s*', # starts with
'^': '*%s' # ends with
}
if param.operator in operator_wildcards:
if field_data['has_full_version']:
name = '%s.full' % name
query_type = 'wildcard'
args[name] = (
operator_wildcards[param.operator] % param.value
)
elif not param.operator:
# This is a phrase that was passed down.
query_type = 'simple_query_string'
args['query'] = param.value[0]
args['fields'] = [name]
args['default_operator'] = 'and'
if args:
query = Q(query_type, **args)
if param.operator_not:
query = ~query
search = search.query(query)
else:
# If we reach this point, that means the operator is
# not supported, and we should raise an error about that.
raise NotImplementedError(
'Operator %s is not supported' % param.operator
)
if filters is None:
filters = sub_filters
elif sub_filters is not None:
filters &= sub_filters
search = search.filter(filters)
# Restricting returned fields.
fields = []
for param in params['_columns']:
for value in param.value:
if not value:
continue
field_name = self.get_field_name(value, full=False)
fields.append(field_name)
search = search.fields(fields)
# Sorting.
sort_fields = []
for param in params['_sort']:
for value in param.value:
if not value:
continue
# Values starting with a '-' are sorted in descending order.
# In order to retrieve the database name of the field, we
# must first remove the '-' part and add it back later.
# Example: given ['product', '-version'], the results will be
# sorted by ascending product and descending version.
desc = False
if value.startswith('-'):
desc = True
value = value[1:]
field_name = self.get_field_name(value, full=False)
if desc:
# The underlying library understands that '-' means
# sorting in descending order.
field_name = '-' + field_name
sort_fields.append(field_name)
search = search.sort(*sort_fields)
# Pagination.
results_to = results_from + results_number
search = search[results_from:results_to]
# Create facets.
for param in params['_facets']:
for value in param.value:
if not value:
continue
field_name = self.get_field_name(value)
search.aggs.bucket(
value,
'terms',
field=field_name,
size=facets_size,
)
# Create signature aggregations.
if params.get('_aggs.signature'):
sig_bucket = A(
'terms',
field=self.get_field_name('signature'),
size=facets_size,
)
for param in params['_aggs.signature']:
for value in param.value:
if not value:
continue
field_name = self.get_field_name(value)
sig_bucket.bucket(
value,
'terms',
field=field_name,
size=facets_size,
)
search.aggs.bucket('signature', sig_bucket)
# Create date histograms.
if params.get('_histogram.date'):
date_bucket = A(
'date_histogram',
field=self.get_field_name('date'),
interval=histogram_interval_date,
)
for param in params['_histogram.date']:
for value in param.value:
if not value:
continue
field_name = self.get_field_name(value)
val_bucket = A(
'terms',
field=field_name,
size=facets_size,
)
date_bucket.bucket(value, val_bucket)
search.aggs.bucket('histogram_date', date_bucket)
# Query and compute results.
hits = []
if params['_return_query'][0].value[0]:
# Return only the JSON query that would be sent to elasticsearch.
return {
'query': search.to_dict(),
'indices': indices,
}
# We call elasticsearch with a computed list of indices, based on
# the date range. However, if that list contains indices that do not
# exist in elasticsearch, an error will be raised. We thus want to
# remove all failing indices until we either have a valid list, or
# an empty list in which case we return no result.
while True:
try:
results = search.execute()
for hit in results:
hits.append(self.format_fields(hit.to_dict()))
total = search.count()
aggregations = self.format_aggregations(results.aggregations)
break # Yay! Results!
except NotFoundError, e:
missing_index = re.findall(BAD_INDEX_REGEX, e.error)[0]
if missing_index in indices:
del indices[indices.index(missing_index)]
else:
# Wait what? An error caused by an index that was not
# in the request? That should never happen, but in case
# it does, better know it.
raise
if indices:
# Update the list of indices and try again.
# Note: we need to first empty the list of indices before
# updating it, otherwise the removed indices never get
# actually removed.
search = search.index().index(*indices)
else:
# There is no index left in the list, return an empty
# result.
hits = []
total = 0
aggregations = {}
break
return {
'hits': hits,
'total': total,
'facets': aggregations,
}
# For backwards compatibility with the previous elasticsearch module.
# All those methods used to live in this file, but have been moved to
# the super_search_fields.py file now. Since the configuration of the
# middleware expect those to still be here, we bind them for now.
def get_fields(self, **kwargs):
return SuperSearchFields(config=self.config).get_fields(**kwargs)
def create_field(self, **kwargs):
return SuperSearchFields(config=self.config).create_field(**kwargs)
def update_field(self, **kwargs):
return SuperSearchFields(config=self.config).update_field(**kwargs)
def delete_field(self, **kwargs):
return SuperSearchFields(config=self.config).delete_field(**kwargs)
def get_missing_fields(self):
return SuperSearchFields(config=self.config).get_missing_fields()
|
mpl-2.0
| 1,042,928,458,989,342,600
| 35.981238
| 79
| 0.488205
| false
| 4.89836
| true
| false
| false
|
fabian0010/Blaze
|
pyasn1/type/namedval.py
|
1
|
2701
|
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pyasn1.sf.net/license.html
#
# ASN.1 named integers
#
from pyasn1 import error
__all__ = ['NamedValues']
class NamedValues(object):
def __init__(self, *namedValues):
self.nameToValIdx = {}
self.valToNameIdx = {}
self.namedValues = ()
automaticVal = 1
for namedValue in namedValues:
if isinstance(namedValue, tuple):
name, val = namedValue
else:
name = namedValue
val = automaticVal
if name in self.nameToValIdx:
raise error.PyAsn1Error('Duplicate name %s' % (name,))
self.nameToValIdx[name] = val
if val in self.valToNameIdx:
raise error.PyAsn1Error('Duplicate value %s=%s' % (name, val))
self.valToNameIdx[val] = name
self.namedValues = self.namedValues + ((name, val),)
automaticVal += 1
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, ', '.join([repr(x) for x in self.namedValues]))
def __str__(self):
return str(self.namedValues)
def __eq__(self, other):
return tuple(self) == tuple(other)
def __ne__(self, other):
return tuple(self) != tuple(other)
def __lt__(self, other):
return tuple(self) < tuple(other)
def __le__(self, other):
return tuple(self) <= tuple(other)
def __gt__(self, other):
return tuple(self) > tuple(other)
def __ge__(self, other):
return tuple(self) >= tuple(other)
def __hash__(self):
return hash(tuple(self))
def getName(self, value):
if value in self.valToNameIdx:
return self.valToNameIdx[value]
def getValue(self, name):
if name in self.nameToValIdx:
return self.nameToValIdx[name]
def getValues(self, *names):
try:
return [self.nameToValIdx[name] for name in names]
except KeyError:
raise error.PyAsn1Error(
'Unknown bit identifier(s): %s' % (set(names).difference(self.nameToValIdx),)
)
# TODO support by-name subscription
def __getitem__(self, i):
return self.namedValues[i]
def __len__(self):
return len(self.namedValues)
def __add__(self, namedValues):
return self.__class__(*self.namedValues + namedValues)
def __radd__(self, namedValues):
return self.__class__(*namedValues + tuple(self))
def clone(self, *namedValues):
return self.__class__(*tuple(self) + namedValues)
# XXX clone/subtype?
|
mit
| 7,432,628,266,542,734,000
| 27.431579
| 99
| 0.569789
| false
| 3.730663
| false
| false
| false
|
jmeyers314/jtrace
|
batoid/coordTransform.py
|
1
|
3059
|
from . import _batoid
from .coordSys import CoordSys
import numpy as np
class CoordTransform:
"""Transformation between two coordinate systems.
Parameters
----------
fromSys : CoordSys
Origin coordinate systems.
toSys : CoordSys
Destination coordinate systems.
"""
def __init__(self, fromSys, toSys):
self.fromSys = fromSys
self.toSys = toSys
self.dr = fromSys.rot.T@(toSys.origin - fromSys.origin)
self.drot = fromSys.rot.T@toSys.rot
def __getstate__(self):
return self.fromSys, self.toSys
def __setstate__(self, d):
self.__init__(*d)
def __eq__(self, rhs):
if not isinstance(rhs, CoordTransform): return False
return (
self.fromSys == rhs.fromSys and
self.toSys == rhs.toSys
)
def __ne__(self, rhs):
return not (self == rhs)
def applyForward(self, rv):
"""Apply forward-direction transformation to RayVector.
Parameters
----------
rv : RayVector
Rays to transform.
Returns
-------
transformed : RayVector
Reference to input RayVector transformed in place.
"""
from .trace import applyForwardTransform
return applyForwardTransform(self, rv)
def applyReverse(self, rv):
"""Apply reverse-direction transformation to RayVector.
Parameters
----------
rv : RayVector
Rays to transform.
Returns
-------
transformed : RayVector
Reference to input RayVector transformed in place.
"""
from .trace import applyReverseTransform
return applyReverseTransform(self, rv)
def applyForwardArray(self, x, y, z):
"""Apply forward-direction transformation to ndarrays.
Parameters
----------
x, y, z : ndarray
Coordinates to transform.
Returns
-------
xyz : ndarray
Transformed coordinates.
Notes
-----
Unlike applyForward, this method does not transform in-place, but
returns a newly created ndarray.
"""
r = np.array([x, y, z], dtype=float).T
r -= self.dr
return self.drot.T@r.T
def applyReverseArray(self, x, y, z):
"""Apply reverse-direction transformation to ndarrays.
Parameters
----------
x, y, z : ndarray
Coordinates to transform.
Returns
-------
xyz : ndarray
Transformed coordinates.
Notes
-----
Unlike applyReverse, this method does not transform in-place, but
returns a newly created ndarray.
"""
r = np.array([x, y, z], dtype=float)
r = (self.drot@r).T
r += self.dr
return r.T
def __repr__(self):
return f"CoordTransform({self.fromSys!r}, {self.toSys!r})"
def __hash__(self):
return hash(("CoordTransform", self.fromSys, self.toSys))
|
bsd-2-clause
| -2,283,730,100,674,697,700
| 24.705882
| 73
| 0.552141
| false
| 4.485337
| false
| false
| false
|
tparks5/tor-stem
|
test/integ/installation.py
|
1
|
5352
|
"""
Tests installation of our library.
"""
import glob
import os
import shutil
import sys
import tarfile
import threading
import unittest
import stem
import stem.util.system
import test.util
from test.util import only_run_once
INSTALL_MISMATCH_MSG = "Running 'python setup.py sdist' doesn't match our git contents in the following way. The manifest in our setup.py may need to be updated...\n\n"
BASE_INSTALL_PATH = '/tmp/stem_test'
DIST_PATH = os.path.join(test.util.STEM_BASE, 'dist')
SETUP_THREAD, INSTALL_FAILURE, INSTALL_PATH, SDIST_FAILURE = None, None, None, None
def setup():
"""
Performs setup our tests will need. This mostly just needs disk iops so it
can happen asynchronously with other tests.
"""
global SETUP_THREAD
def _setup():
global INSTALL_FAILURE, INSTALL_PATH, SDIST_FAILURE
original_cwd = os.getcwd()
try:
os.chdir(test.util.STEM_BASE)
try:
os.chdir(test.util.STEM_BASE)
stem.util.system.call('%s setup.py install --prefix %s' % (sys.executable, BASE_INSTALL_PATH), timeout = 60)
stem.util.system.call('%s setup.py clean --all' % sys.executable, timeout = 60) # tidy up the build directory
site_packages_paths = glob.glob('%s/lib*/*/site-packages' % BASE_INSTALL_PATH)
if len(site_packages_paths) != 1:
raise AssertionError('We should only have a single site-packages directory, but instead had: %s' % site_packages_paths)
INSTALL_PATH = site_packages_paths[0]
except Exception as exc:
INSTALL_FAILURE = AssertionError("Unable to install with 'python setup.py install': %s" % exc)
if not os.path.exists(DIST_PATH):
try:
stem.util.system.call('%s setup.py sdist' % sys.executable, timeout = 60)
except Exception as exc:
SDIST_FAILURE = exc
else:
SDIST_FAILURE = AssertionError("%s already exists, maybe you manually ran 'python setup.py sdist'?" % DIST_PATH)
finally:
os.chdir(original_cwd)
if SETUP_THREAD is None:
SETUP_THREAD = threading.Thread(target = _setup)
SETUP_THREAD.start()
return SETUP_THREAD
def clean():
if os.path.exists(BASE_INSTALL_PATH):
shutil.rmtree(BASE_INSTALL_PATH)
if os.path.exists(DIST_PATH):
shutil.rmtree(DIST_PATH)
def _assert_has_all_files(path):
"""
Check that all the files in the stem directory are present in the
installation. This is a very common gotcha since our setup.py
requires us to remember to add new modules and non-source files.
:raises: **AssertionError** files don't match our content
"""
expected, installed = set(), set()
for root, dirnames, filenames in os.walk(os.path.join(test.util.STEM_BASE, 'stem')):
for filename in filenames:
file_format = filename.split('.')[-1]
if file_format not in test.util.IGNORED_FILE_TYPES:
expected.add(os.path.join(root, filename)[len(test.util.STEM_BASE) + 1:])
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
if not filename.endswith('.pyc') and not filename.endswith('egg-info'):
installed.add(os.path.join(root, filename)[len(path) + 1:])
missing = expected.difference(installed)
extra = installed.difference(expected)
if missing:
raise AssertionError("The following files were expected to be in our installation but weren't. Maybe our setup.py needs to be updated?\n\n%s" % '\n'.join(missing))
elif extra:
raise AssertionError("The following files weren't expected to be in our installation.\n\n%s" % '\n'.join(extra))
class TestInstallation(unittest.TestCase):
@only_run_once
def test_install(self):
"""
Installs with 'python setup.py install' and checks we can use what we
install.
"""
if not INSTALL_PATH:
setup().join()
if INSTALL_FAILURE:
raise INSTALL_FAILURE
self.assertEqual(stem.__version__, stem.util.system.call([sys.executable, '-c', "import sys;sys.path.insert(0, '%s');import stem;print(stem.__version__)" % INSTALL_PATH])[0])
_assert_has_all_files(INSTALL_PATH)
@only_run_once
def test_sdist(self):
"""
Creates a source distribution tarball with 'python setup.py sdist' and
checks that it matches the content of our git repository. This primarily is
meant to test that our MANIFEST.in is up to date.
"""
if not stem.util.system.is_available('git'):
self.skipTest('(git unavailable)')
return
setup().join()
if SDIST_FAILURE:
raise SDIST_FAILURE
git_contents = [line.split()[-1] for line in stem.util.system.call('git ls-tree --full-tree -r HEAD')]
# tarball has a prefix 'stem-[verion]' directory so stipping that out
dist_tar = tarfile.open(os.path.join(DIST_PATH, 'stem-dry-run-%s.tar.gz' % stem.__version__))
tar_contents = ['/'.join(info.name.split('/')[1:]) for info in dist_tar.getmembers() if info.isfile()]
issues = []
for path in git_contents:
if path not in tar_contents and path not in ['.gitignore']:
issues.append(' * %s is missing from our release tarball' % path)
for path in tar_contents:
if path not in git_contents and path not in ['MANIFEST.in', 'PKG-INFO']:
issues.append(" * %s isn't expected in our release tarball" % path)
if issues:
self.fail(INSTALL_MISMATCH_MSG + '\n'.join(issues))
|
lgpl-3.0
| 474,349,562,360,036,700
| 31.634146
| 178
| 0.674327
| false
| 3.616216
| true
| false
| false
|
alq666/sre-kpi
|
monitors.py
|
1
|
1349
|
"""Summarizes the monitor reports from Datadog into key metrics
"""
import csv
import os
import sqlite3
import sys
# Prepare the sqlite file for queries
# A denormalized version of the csv
try:
os.remove('monitors.sqlite')
except:
pass
conn = sqlite3.connect('monitors.sqlite')
c = conn.cursor()
c.execute("""
create table monitors
(
day date,
hour integer,
source_type text,
alert_type text,
priority integer,
hostname text,
device text,
alert_name text,
user text,
cnt integer
)
""")
# Consume the csv
reader = csv.reader(sys.stdin)
headers = reader.next()
for l in reader:
# yyyy-mm-dd hh24
day, hour = l[headers.index('hour')].split()
src = l[headers.index('source_type_name')]
alty = l[headers.index('alert_type')]
prio = int(l[headers.index('priority')])
host = l[headers.index('host_name')]
dev = l[headers.index('device_name')]
alnm = l[headers.index('alert_name')]
usrs = l[headers.index('user')].split()
cnt = int(l[headers.index('cnt')])
# In the case of multiple users, denormalize
for usr in usrs:
stmt = """insert into monitors
(day, hour, source_type, alert_type, priority, hostname, device, alert_name, user, cnt) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"""
c.execute(stmt, [day, hour, src, alty, prio, host, dev, alnm, usr, cnt])
conn.commit()
|
mit
| 1,948,408,615,343,619,800
| 23.527273
| 136
| 0.650852
| false
| 3.211905
| false
| false
| false
|
OzFlux/PyFluxPro
|
scripts/pfp_gfALT.py
|
1
|
64336
|
# standard modules
import logging
import os
import traceback
# 3rd party modules
import dateutil
import numpy
import matplotlib.pyplot as plt
import pylab
import scipy
import statsmodels.api as sm
# PFP modules
from scripts import constants as c
from scripts import pfp_io
from scripts import pfp_ts
from scripts import pfp_utils
logger = logging.getLogger("pfp_log")
# functions for GapFillFromAlternate
def GapFillFromAlternate(main_gui, ds4, ds_alt, l4_info, called_by):
'''
This is the gap fill from alternate data GUI.
The alternate data gap fill GUI is displayed separately from the main OzFluxQC GUI.
It consists of text to display the start and end datetime of the file,
two entry boxes for the start and end datetimes of the alternate data gap fill and
a button to insert the gap fill data ("Run") and a button to exit ("Done")
the GUI when we are done. On exit, the OzFluxQC main GUI continues
and eventually writes the gap filled data to file.
'''
# set the default return code
ds4.returncodes["message"] = "normal"
# get the alternate data information
if l4_info[called_by]["info"]["call_mode"] == "interactive":
# put up a plot of the data coverage at L3
gfalternate_plotcoveragelines(ds4, l4_info, called_by)
# call the GapFillFromAlternate GUI
gfalternate_gui(main_gui, ds4, ds_alt, l4_info, called_by)
else:
# ["gui"] settings dictionary done in pfp_gf.ParseL4ControlFile()
gfalternate_run(ds4, ds_alt, l4_info, called_by)
def gfalternate_gui(main_gui, ds4, ds_alt, l4_info, called_by):
# put up the start and end dates
main_gui.l4_ui.ds4 = ds4
main_gui.l4_ui.ds_alt = ds_alt
main_gui.l4_ui.l4_info = l4_info
main_gui.l4_ui.called_by = called_by
main_gui.l4_ui.edit_cfg = main_gui.tabs.tab_dict[main_gui.tabs.tab_index_running]
start_date = ds4.series["DateTime"]["Data"][0].strftime("%Y-%m-%d %H:%M")
end_date = ds4.series["DateTime"]["Data"][-1].strftime("%Y-%m-%d %H:%M")
main_gui.l4_ui.label_DataStartDate_value.setText(start_date)
main_gui.l4_ui.label_DataEndDate_value.setText(end_date)
main_gui.l4_ui.show()
main_gui.l4_ui.exec_()
def gfalternate_autocomplete(ds_tower, ds_alt, l4_info, called_by, mode="verbose"):
"""
Purpose:
Gap fill using alternate data with gaps identified automatically.
Usage:
This routine is usually called after an initial gap filling process, either manual
or automatic monthly or number of days, has been done. It is intended to detect
remaining gaps, figure out the period either side of the gaps needed to get the
minimum number of good points and run the gap filling using alternate data on that
period.
Side effects:
Author: PRI
Date: April 2015
"""
# needs a re-write to improve the logic and simplify the code
# - alt_series_list needs to be ordered by decreasing correlation,
# as currently written the first alternate variable with the numbers
# is chosen
# - gfalternate_main is called from here AFTER we have figured out
# the "best" alternate variable to use but without passing the
# alternate variable name, gfalternate_main then figures out the
# "best" alternate variable by a different method
# - there is duplication of functionality between this routine and
# gfalternate_main
# - there are logical inconsistencies between this routine and
# gfalternate_main
l4a = l4_info[called_by]
mode = "quiet" #"verbose" #"quiet"
if not l4a["gui"]["auto_complete"]:
return
dt_tower = ds_tower.series["DateTime"]["Data"]
nRecs = len(dt_tower)
ts = int(float(ds_tower.globalattributes["time_step"]))
si_tower = pfp_utils.GetDateIndex(dt_tower, l4a["gui"]["startdate"], ts=ts, default=0)
ei_tower = pfp_utils.GetDateIndex(dt_tower, l4a["gui"]["enddate"], ts=ts, default=nRecs-1)
ldt_tower = dt_tower[si_tower: ei_tower + 1]
nRecs_gui = len(ldt_tower)
label_tower_list = l4a["gui"]["series_list"]
for label_tower in label_tower_list:
data_all = {}
label_composite = label_tower + "_composite"
not_enough_points = False
data_composite, _, _ = pfp_utils.GetSeriesasMA(ds_tower, label_composite, si=si_tower, ei=ei_tower)
data_tower, _, _ = pfp_utils.GetSeriesasMA(ds_tower, label_tower, si=si_tower, ei=ei_tower)
mask_composite = numpy.ma.getmaskarray(data_composite)
gapstartend = pfp_utils.contiguous_regions(mask_composite)
if len(gapstartend) == 0:
if mode.lower() != "quiet":
msg = " autocomplete: composite " + label_composite + " has no gaps to fill, skipping ..."
logger.info(msg)
continue
# now check all of the alternate data sources to see if they have anything to contribute
gotdataforgap = [False]*len(gapstartend)
label_output_list = gfalternate_getlabeloutputlist(l4_info, label_tower)
for label_output in label_output_list:
alt_filename = l4a["outputs"][label_output]["file_name"]
ds_alternate = ds_alt[alt_filename]
dt_alternate = ds_alternate.series["DateTime"]["Data"]
si_alternate = pfp_utils.GetDateIndex(dt_alternate, l4a["gui"]["startdate"], ts=ts, default=0)
ei_alternate = pfp_utils.GetDateIndex(dt_alternate, l4a["gui"]["enddate"], ts=ts, default=nRecs-1)
alt_series_list = [item for item in list(ds_alternate.series.keys()) if "_QCFlag" not in item]
alt_series_list = [item for item in alt_series_list if l4a["outputs"][label_output]["target"] in item]
for label_alternate in alt_series_list:
data_alt, _, _ = pfp_utils.GetSeriesasMA(ds_alternate, label_alternate, si=si_alternate, ei=ei_alternate)
data_all[label_alternate] = data_alt
for n, gap in enumerate(gapstartend):
min_points = max([int(((gap[1]-gap[0])+1)*l4a["gui"]["min_percent"]/100),3*l4a["gui"]["nperhr"]])
if numpy.ma.count(data_alt[gap[0]: gap[1]]) >= min_points:
if mode.lower() != "quiet":
msg = " autocomplete: " + label_tower + str(ldt_tower[gap[0]]) + str(ldt_tower[gap[1]]) + " got data to fill gap"
logger.info(msg)
gotdataforgap[n] = True
if numpy.ma.count_masked(data_tower[gap[0]: gap[1]]) == 0:
if mode.lower() != "quiet":
msg = " autocomplete: "+label_tower + str(ldt_tower[gap[0]]) + str(ldt_tower[gap[1]]) + " no gap to fill"
logger.info(msg)
gotdataforgap[n] = False
# finished checking all alternate data sources for data to fill remaining gaps
if mode.lower() != "quiet":
logger.info(" autocomplete: variable %s has %s gaps", label_tower, str(len(gapstartend)))
logger.info(" Auto-complete gap filling for %s (%s gaps)", label_tower, str(gotdataforgap.count(True)))
for n, gap in enumerate(gapstartend):
l4a["gui"]["autoforce"] = False
if not gotdataforgap[n]:
if mode.lower() != "quiet":
gap_startdate = ldt_tower[gap[0]].strftime("%Y-%m-%d %H:%M")
gap_enddate = ldt_tower[gap[1]].strftime("%Y-%m-%d %H:%M")
msg = " autocomplete: no alternate data for " + gap_startdate + " to " + gap_enddate
logger.info(msg)
continue
si = max([0, gap[0]])
ei = min([len(ldt_tower) - 1, gap[1]])
gap_startdate = ldt_tower[si].strftime("%Y-%m-%d %H:%M")
gap_enddate = ldt_tower[ei].strftime("%Y-%m-%d %H:%M")
if mode.lower() != "quiet":
msg = " autocomplete: gap is " + gap_startdate + " to " + gap_enddate
logger.info(msg)
min_points = max([int(((gap[1]-gap[0])+1)*l4a["gui"]["min_percent"]/100), 3*l4a["gui"]["nperhr"]])
num_good_points = 0
num_points_list = list(data_all.keys())
for label in list(data_all.keys()):
if numpy.ma.count(data_all[label][gap[0]:gap[1]]) < min_points:
num_points_list.remove(label)
continue
ngpts = gfalternate_getnumgoodpoints(data_tower[gap[0]:gap[1]], data_all[label][gap[0]:gap[1]])
#ngpts = int(len(data_tower[gap[0]:gap[1]+1])*l4a["gui"]["min_percent"]/100)
num_good_points = max([num_good_points, ngpts])
while num_good_points < min_points:
gap[0] = max(0, gap[0] - l4a["gui"]["nperday"])
gap[1] = min(nRecs_gui - 1, gap[1] + l4a["gui"]["nperday"])
if gap[0] == 0 and gap[1] == nRecs_gui - 1:
msg = " Unable to find enough good points in data set for " + label_tower
logger.warning(msg)
msg = " Replacing missing tower data with unmodified alternate data"
logger.warning(msg)
gap[0] = 0; gap[1] = -1
l4a["gui"]["autoforce"] = True
not_enough_points = True
if not_enough_points: break
min_points = max([int(((gap[1]-gap[0])+1)*l4a["gui"]["min_percent"]/100), 3*l4a["gui"]["nperhr"]])
for label in num_points_list:
ngpts = gfalternate_getnumgoodpoints(data_tower[gap[0]:gap[1]+1], data_all[label][gap[0]:gap[1]+1])
#ngpts = int(len(data_tower[gap[0]:gap[1]+1])*l4a["gui"]["min_percent"]/100)
if ngpts > num_good_points:
num_good_points = ngpts
gapfillperiod_startdate = ldt_tower[gap[0]].strftime("%Y-%m-%d %H:%M")
gapfillperiod_enddate = ldt_tower[gap[1]].strftime("%Y-%m-%d %H:%M")
if mode.lower() != "quiet":
msg = " autocomplete: gap fill period is " + gapfillperiod_startdate + " to " + gapfillperiod_enddate
logger.info(msg)
l4a["run"]["startdate"] = ldt_tower[gap[0]].strftime("%Y-%m-%d %H:%M")
l4a["run"]["enddate"] = ldt_tower[gap[1]].strftime("%Y-%m-%d %H:%M")
gfalternate_main(ds_tower, ds_alt, l4_info, called_by, label_tower_list=[label_tower])
if l4a["info"]["call_mode"] == "interactive":
gfalternate_plotcoveragelines(ds_tower, l4_info, called_by)
if not_enough_points: break
def gfalternate_createdataandstatsdict(ldt_tower, data_tower, attr_tower, l4a):
"""
Purpose:
Creates the data_dict and stat_dict to hold data and statistics during gap filling from
alternate data sources.
Usage:
Side effects:
Called by:
Calls:
Author: PRI
Date: May 2015
"""
data_dict = {}
stat_dict = {}
label_tower = l4a["run"]["label_tower"]
label_composite = l4a["run"]["label_composite"]
data_dict["DateTime"] = {"data": ldt_tower}
data_dict[label_tower] = {"attr": attr_tower,
"output_list": [label_tower, label_composite],
"data": data_tower}
data_dict[label_composite] = {"data": numpy.ma.masked_all_like(data_tower),
"fitcorr": numpy.ma.masked_all_like(data_tower),
"attr": attr_tower}
stat_dict[label_tower] = {"startdate": l4a["run"]["startdate"],
"enddate": l4a["run"]["enddate"]}
stat_dict[label_composite] = {"startdate": l4a["run"]["startdate"],
"enddate":l4a["run"]["enddate"]}
return data_dict, stat_dict
def gfalternate_done(alt_gui):
"""
Purpose:
Finishes up after gap filling from alternate data:
- destroy the GapFillFromAlternate GUI
- plot the summary statistics
- write the summary statistics to an Excel file
Usage:
Side effects:
Author: PRI
Date: August 2014
"""
# plot the summary statistics
#gfalternate_plotsummary(ds,alternate_info)
# close any open plots
if len(plt.get_fignums()) != 0:
for i in plt.get_fignums():
plt.close(i)
# destroy the alternate GUI
alt_gui.close()
# write Excel spreadsheet with fit statistics
pfp_io.xl_write_AlternateStats(alt_gui.ds4, alt_gui.l4_info)
# put the return code into ds.returncodes
alt_gui.ds4.returncodes["message"] = "normal"
def gfalternate_getalternatevaratmaxr(ds_tower, ds_alternate, l4a, mode="verbose"):
"""
Purpose:
Get a list of alternate variable names that are sorted based on correlation
with the tower data.
Usage:
Side effects:
Author: PRI
Date: August 2014
"""
# get a list of alternate variables for this tower variable
label_tower = l4a["run"]["label_tower"]
label_output = l4a["run"]["label_output"]
startdate = l4a["run"]["startdate"]
enddate = l4a["run"]["enddate"]
ts = int(float(ds_tower.globalattributes["time_step"]))
ldt_tower = ds_tower.series["DateTime"]["Data"]
si_tower = pfp_utils.GetDateIndex(ldt_tower, startdate, ts=ts)
ei_tower = pfp_utils.GetDateIndex(ldt_tower, enddate, ts=ts)
data_tower, _, _ = pfp_utils.GetSeriesasMA(ds_tower, label_tower, si=si_tower, ei=ei_tower)
# local pointers to the start and end indices
ldt_alternate = ds_alternate.series["DateTime"]["Data"]
si_alternate = pfp_utils.GetDateIndex(ldt_alternate, startdate, ts=ts)
ei_alternate = pfp_utils.GetDateIndex(ldt_alternate, enddate, ts=ts)
# create an array for the correlations and a list for the alternate variables in order of decreasing correlation
if "usevars" not in l4a["outputs"][label_output]:
altvar_list = gfalternate_getalternatevarlist(ds_alternate, l4a["run"]["label_tower"])
else:
altvar_list = l4a["outputs"][label_output]["usevars"]
r = numpy.zeros(len(altvar_list))
# loop over the variables in the alternate file
for idx, var in enumerate(altvar_list):
# get the alternate data
data_alternate, _, _ = pfp_utils.GetSeriesasMA(ds_alternate, var, si=si_alternate, ei=ei_alternate)
l4a["run"]["gotminpoints_alternate"] = gfalternate_gotminpoints(data_alternate, l4a,
label_tower, mode="quiet")
if numpy.ma.count(data_alternate) > l4a["run"]["min_points"]:
# check the lengths of the tower and alternate data are the same
if len(data_alternate) != len(data_tower):
msg = "gfalternate_getalternatevaratmaxr: alternate data length is " + str(len(data_alternate))
logger.info(msg)
msg = "gfalternate_getalternatevaratmaxr: tower data length is " + str(len(data_tower))
logger.info(msg)
raise ValueError('gfalternate_getalternatevaratmaxr: data_tower and data_alternate lengths differ')
# put the correlation into the r array
rval = numpy.ma.corrcoef(data_tower, data_alternate)[0, 1]
if rval == "nan": rval = float(0)
else:
if mode!="quiet":
msg = " getalternatevaratmaxr: not enough good data in alternate "+var
logger.error(msg)
rval = float(0)
r[idx] = numpy.ma.filled(rval, float(c.missing_value))
# save the correlation array for later plotting
l4a["run"]["r"] = r
# sort the correlation array and the alternate variable list
idx = numpy.flipud(numpy.argsort(r))
altvar_list_sorted = [altvar_list[j] for j in list(idx)]
# return the name of the alternate variable that has the highest correlation with the tower data
if l4a["outputs"][label_output]["source"].lower() == "access":
altvar_list_sorted = altvar_list_sorted[0:1]
return altvar_list_sorted
def gfalternate_getalternatevarlist(ds_alternate, label):
"""
Purpose:
Get a list of alternate variable names from the alternate data structure.
Usage:
Side effects:
Author: PRI
Date: August 2014
"""
alternate_var_list = [item for item in list(ds_alternate.series.keys()) if label in item]
# remove any extraneous Fn labels (alternate has Fn_lw and Fn_sw)
if label=="Fn":
alternate_var_list = [item for item in alternate_var_list if "lw" not in item]
alternate_var_list = [item for item in alternate_var_list if "sw" not in item]
# check the series in the alternate data
if len(alternate_var_list)==0:
logger.error("gfalternate_getalternatevarlist: series %s not in alternate data file", label)
return alternate_var_list
def gfalternate_getdataas2d(odt, data, l4a):
"""
Purpose:
Return data, a 1D array, as a 2D array with hours along axis=0 and days along
axis=1
Usage:
Side effects:
The 1D array, data, is truncated at the start and end to make whole days.
Author: PRI
Date: August 2014
"""
ts = l4a["info"]["time_step"]
nperday = l4a["gui"]["nperday"]
si = 0
while abs(odt[si].hour + float(odt[si].minute)/60 - float(ts)/60) > c.eps:
si = si + 1
ei = len(odt) - 1
while abs(odt[ei].hour + float(odt[ei].minute)/60) > c.eps:
ei = ei - 1
data_wholedays = data[si: ei + 1]
ndays = len(data_wholedays)//nperday
return numpy.ma.reshape(data_wholedays, [ndays, nperday])
def gfalternate_getdielaverage(data_dict, l4a):
odt = data_dict["DateTime"]["data"]
label_tower = l4a["run"]["label_tower"]
output_list = list(data_dict[label_tower]["output_list"])
diel_avg = {}
for label_output in output_list:
diel_avg[label_output] = {}
if "data" in list(data_dict[label_output].keys()):
data_2d = gfalternate_getdataas2d(odt, data_dict[label_output]["data"], l4a)
diel_avg[label_output]["data"] = numpy.ma.average(data_2d, axis=0)
if "fitcorr" in list(data_dict[label_output].keys()):
data_2d = gfalternate_getdataas2d(odt, data_dict[label_output]["fitcorr"], l4a)
diel_avg[label_output]["fitcorr"] = numpy.ma.average(data_2d, axis=0)
return diel_avg
def gfalternate_getfitcorrecteddata(data_dict, stat_dict, l4a):
"""
Wrapper for the various methods of fitting the alternate data to the tower data.
"""
if l4a["run"]["fit_type"].lower() == "ols":
gfalternate_getolscorrecteddata(data_dict, stat_dict, l4a)
if l4a["run"]["fit_type"].lower() == "ols_thru0":
gfalternate_getolscorrecteddata(data_dict, stat_dict, l4a)
if l4a["run"]["fit_type"].lower() == "mrev":
gfalternate_getmrevcorrected(data_dict, stat_dict, l4a)
if l4a["run"]["fit_type"].lower() == "replace":
gfalternate_getreplacedata(data_dict, stat_dict, l4a)
if l4a["run"]["fit_type"].lower() == "rma":
gfalternate_getrmacorrecteddata(data_dict, stat_dict, l4a)
if l4a["run"]["fit_type"].lower() == "odr":
gfalternate_getodrcorrecteddata(data_dict, stat_dict, l4a)
def gfalternate_getlabeloutputlist(l4_info, label_tower):
l4a = l4_info["GapFillFromAlternate"]
l4m = l4_info["MergeSeries"]
olist = [item for item in list(l4a["outputs"].keys()) if l4a["outputs"][item]["target"] == label_tower]
for item in list(l4m.keys()):
if label_tower in list(l4m[item].keys()):
mlist = l4m[item][label_tower]["source"]
label_output_list = []
for item in mlist:
if item in olist: label_output_list.append(item)
return label_output_list
def gfalternate_getcorrecteddata(ds_alternate, data_dict, stat_dict, l4a):
label_output = l4a["run"]["label_output"]
label_alternate = l4a["run"]["label_alternate"]
if l4a["run"]["nogaps_tower"]:
# tower data has no gaps
stat_dict[label_output][label_alternate]["nLags"] = int(0)
data_dict[label_output][label_alternate]["lagcorr"] = numpy.ma.copy(data_dict[label_output][label_alternate]["data"])
data_dict[label_output][label_alternate]["fitcorr"] = numpy.ma.copy(data_dict[label_output][label_alternate]["data"])
stat_dict[label_output][label_alternate]["slope"] = float(0)
stat_dict[label_output][label_alternate]["offset"] = float(0)
stat_dict[label_output][label_alternate]["eqnstr"] = "No gaps in tower"
elif not l4a["run"]["nogaps_tower"] and l4a["run"]["gotminpoints_both"]:
# got enough good points common to both data series
gfalternate_getlagcorrecteddata(ds_alternate, data_dict, stat_dict, l4a)
gfalternate_getfitcorrecteddata(data_dict, stat_dict, l4a)
elif not l4a["run"]["nogaps_tower"] and not l4a["run"]["gotminpoints_both"]:
stat_dict[label_output][label_alternate]["nLags"] = int(0)
data_dict[label_output][label_alternate]["lagcorr"] = numpy.ma.copy(data_dict[label_output][label_alternate]["data"])
if l4a["run"]["fit_type"].lower() == "replace":
gfalternate_getfitcorrecteddata(data_dict, stat_dict, l4a)
else:
data_dict[label_output][label_alternate]["fitcorr"] = numpy.ma.masked_all_like(data_dict[label_output][label_alternate]["data"])
stat_dict[label_output][label_alternate]["slope"] = float(0)
stat_dict[label_output][label_alternate]["offset"] = float(0)
stat_dict[label_output][label_alternate]["eqnstr"] = "Too few points"
else:
msg = "getcorrecteddata: Unrecognised combination of logical tests"
logger.error(msg)
def gfalternate_getlagcorrecteddata(ds_alternate, data_dict, stat_dict, l4a):
label_tower = l4a["run"]["label_tower"]
label_output = l4a["run"]["label_output"]
label_alternate = l4a["run"]["label_alternate"]
data_tower = data_dict[label_tower]["data"]
data_alternate = data_dict[label_output][label_alternate]["data"]
ldt_alternate = ds_alternate.series["DateTime"]["Data"]
startdate = l4a["run"]["startdate"]
enddate = l4a["run"]["enddate"]
ts = l4a["info"]["time_step"]
si_alternate = pfp_utils.GetDateIndex(ldt_alternate, startdate, ts=ts)
ei_alternate = pfp_utils.GetDateIndex(ldt_alternate, enddate, ts=ts)
if l4a["run"]["lag"].lower() == "yes":
maxlags = l4a["gui"]["max_lags"]
_, corr = pfp_ts.get_laggedcorrelation(data_tower, data_alternate, maxlags)
nLags = numpy.argmax(corr) - l4a["gui"]["max_lags"]
if nLags > l4a["gui"]["nperhr"]*6:
logger.error("getlagcorrecteddata: lag is more than 6 hours for %s", label_tower)
si_alternate = si_alternate - nLags
ei_alternate = ei_alternate - nLags
data_alternate, _, _ = pfp_utils.GetSeriesasMA(ds_alternate, label_alternate, si=si_alternate, ei=ei_alternate, mode="mirror")
data_dict[label_output][label_alternate]["lagcorr"] = data_alternate
stat_dict[label_output][label_alternate]["nLags"] = nLags
else:
data_dict[label_output][label_alternate]["lagcorr"] = numpy.ma.copy(data_dict[label_output][label_alternate]["data"])
stat_dict[label_output][label_alternate]["nLags"] = int(0)
def gfalternate_getmrevcorrected(data_dict, stat_dict, l4a):
"""
Fit alternate data to tower data by replacing means and equalising variance.
"""
odt = data_dict["DateTime"]["data"]
label_tower = l4a["run"]["label_tower"]
label_output = l4a["run"]["label_output"]
label_alternate = l4a["run"]["label_alternate"]
# local copies of the data
data_tower = numpy.ma.copy(data_dict[label_tower]["data"])
data_alternate = numpy.ma.copy(data_dict[label_output][label_alternate]["data"])
data_2d = gfalternate_getdataas2d(odt, data_tower, l4a)
data_twr_hravg = numpy.ma.average(data_2d, axis=0)
data_2d = gfalternate_getdataas2d(odt, data_alternate, l4a)
data_alt_hravg = numpy.ma.average(data_2d, axis=0)
# calculate the means
mean_tower = numpy.ma.mean(data_tower)
mean_alternate = numpy.ma.mean(data_alternate)
# calculate the variances
var_twr_hravg = numpy.ma.var(data_twr_hravg)
var_alt_hravg = numpy.ma.var(data_alt_hravg)
var_ratio = var_twr_hravg/var_alt_hravg
# correct the alternate data
data_dict[label_output][label_alternate]["fitcorr"] = ((data_alternate - mean_alternate)*var_ratio) + mean_tower
stat_dict[label_output][label_alternate]["eqnstr"] = "Mean replaced, equal variance"
stat_dict[label_output][label_alternate]["slope"] = float(0)
stat_dict[label_output][label_alternate]["offset"] = float(0)
def gfalternate_getnumgoodpoints(data_tower, data_alternate):
mask = numpy.ma.mask_or(data_tower.mask, data_alternate.mask, copy=True, shrink=False)
return len(numpy.where(mask == False)[0])
def gfalternate_getodrcorrecteddata(data_dict, stat_dict, l4a):
"""
Calculate the orthogonal distance regression fit between 2 1D arrays.
"""
label_tower = l4a["run"]["label_tower"]
label_output = l4a["run"]["label_output"]
label_alternate = l4a["run"]["label_alternate"]
y_in = numpy.ma.copy(data_dict[label_tower]["data"])
x_in = numpy.ma.copy(data_dict[label_output][label_alternate]["lagcorr"])
mask = numpy.ma.mask_or(x_in.mask, y_in.mask, copy=True, shrink=False)
x = numpy.ma.compressed(numpy.ma.array(x_in ,mask=mask, copy=True))
y = numpy.ma.compressed(numpy.ma.array(y_in, mask=mask, copy=True))
# attempt an ODR fit
linear = scipy.odr.Model(pfp_utils.linear_function)
mydata = scipy.odr.Data(x, y)
myodr = scipy.odr.ODR(mydata, linear, beta0=[1, 0])
myoutput = myodr.run()
odr_slope = myoutput.beta[0]
odr_offset = myoutput.beta[1]
data_dict[label_output][label_alternate]["fitcorr"] = odr_slope * x_in + odr_offset
stat_dict[label_output][label_alternate]["slope"] = odr_slope
stat_dict[label_output][label_alternate]["offset"] = odr_offset
stat_dict[label_output][label_alternate]["eqnstr"] = "y = %.3fx + %.3f"%(odr_slope, odr_offset)
def gfalternate_getolscorrecteddata(data_dict, stat_dict, l4a):
"""
Calculate the ordinary least squares fit between 2 1D arrays.
"""
label_tower = l4a["run"]["label_tower"]
label_output = l4a["run"]["label_output"]
label_alternate = l4a["run"]["label_alternate"]
y_in = numpy.ma.copy(data_dict[label_tower]["data"])
x_in = numpy.ma.copy(data_dict[label_output][label_alternate]["lagcorr"])
mask = numpy.ma.mask_or(x_in.mask,y_in.mask, copy=True, shrink=False)
x = numpy.ma.compressed(numpy.ma.array(x_in, mask=mask, copy=True))
y = numpy.ma.compressed(numpy.ma.array(y_in, mask=mask, copy=True))
# attempt an OLS fit
if l4a["run"]["fit_type"].lower() == "ols_thru0":
resols = sm.OLS(y, x).fit()
data_dict[label_output][label_alternate]["fitcorr"] = resols.params[0]*x_in
stat_dict[label_output][label_alternate]["slope"] = resols.params[0]
stat_dict[label_output][label_alternate]["offset"] = float(0)
stat_dict[label_output][label_alternate]["eqnstr"] = "y = %.3fx"%(resols.params[0])
else:
resols = sm.OLS(y, sm.add_constant(x, prepend=False)).fit()
if resols.params.shape[0] == 2:
data_dict[label_output][label_alternate]["fitcorr"] = resols.params[0]*x_in+resols.params[1]
stat_dict[label_output][label_alternate]["slope"] = resols.params[0]
stat_dict[label_output][label_alternate]["offset"] = resols.params[1]
stat_dict[label_output][label_alternate]["eqnstr"] = "y = %.3fx + %.3f"%(resols.params[0], resols.params[1])
else:
data_dict[label_output][label_alternate]["fitcorr"] = numpy.ma.copy(x_in)
stat_dict[label_output][label_alternate]["slope"] = float(0)
stat_dict[label_output][label_alternate]["offset"] = float(0)
stat_dict[label_output][label_alternate]["eqnstr"] = "OLS error, replaced"
def gfalternate_getoutputstatistics(data_dict, stat_dict, l4a):
label_tower = l4a["run"]["label_tower"]
output_list = list(data_dict[label_tower]["output_list"])
if label_tower in output_list:
output_list.remove(label_tower)
for label in output_list:
# OLS slope and offset
if l4a["run"]["fit_type"] != "replace":
x_in = numpy.ma.copy(data_dict[label]["fitcorr"])
y_in = numpy.ma.copy(data_dict[label_tower]["data"])
mask = numpy.ma.mask_or(x_in.mask, y_in.mask, copy=True, shrink=False)
x = numpy.ma.compressed(numpy.ma.array(x_in, mask=mask, copy=True))
y = numpy.ma.compressed(numpy.ma.array(y_in, mask=mask, copy=True))
# get the array lengths
nx = len(x)
# attempt an OLS fit
if nx >= l4a["run"]["min_points"]:
if l4a["run"]["fit_type"].lower() == "ols":
resols = sm.OLS(y, sm.add_constant(x, prepend=False)).fit()
if resols.params.shape[0] == 2:
stat_dict[label]["slope"] = resols.params[0]
stat_dict[label]["offset"] = resols.params[1]
stat_dict[label]["eqnstr"] = "y = %.3fx + %.3f"%(resols.params[0], resols.params[1])
else:
stat_dict[label]["slope"] = float(0)
stat_dict[label]["offset"] = float(0)
stat_dict[label]["eqnstr"] = "OLS error"
else:
resols = sm.OLS(y, x).fit()
stat_dict[label]["slope"] = resols.params[0]
stat_dict[label]["offset"] = float(0)
stat_dict[label]["eqnstr"] = "y = %.3fx"%(resols.params[0])
else:
stat_dict[label]["slope"] = float(0)
stat_dict[label]["offset"] = float(0)
stat_dict[label]["eqnstr"] = "Too few points"
else:
stat_dict[label]["slope"] = float(1)
stat_dict[label]["offset"] = float(0)
stat_dict[label]["eqnstr"] = "Data replaced"
# number of points
stat_dict[label]["No. points"] = len(data_dict[label_tower]["data"])
num = numpy.ma.count(data_dict[label]["fitcorr"])-numpy.ma.count(data_dict[label_tower]["data"])
if num < 0: num = 0
stat_dict[label]["No. filled"] = trap_masked_constant(num)
# correlation coefficient
r = numpy.ma.corrcoef(data_dict[label_tower]["data"], data_dict[label]["fitcorr"])
stat_dict[label]["r"] = trap_masked_constant(r[0,1])
# means
avg = numpy.ma.mean(data_dict[label_tower]["data"])
stat_dict[label]["Avg (Tower)"] = trap_masked_constant(avg)
avg = numpy.ma.mean(data_dict[label]["fitcorr"])
stat_dict[label]["Avg (Alt)"] = trap_masked_constant(avg)
# variances
var_tower = numpy.ma.var(data_dict[label_tower]["data"])
stat_dict[label]["Var (Tower)"] = trap_masked_constant(var_tower)
var_alt = numpy.ma.var(data_dict[label]["fitcorr"])
stat_dict[label]["Var (Alt)"] = trap_masked_constant(var_alt)
if var_alt != 0:
stat_dict[label]["Var ratio"] = trap_masked_constant(var_tower/var_alt)
else:
stat_dict[label]["Var ratio"] = float(c.missing_value)
# RMSE & NMSE
error = (data_dict[label_tower]["data"]-data_dict[label]["fitcorr"])
rmse = numpy.ma.sqrt(numpy.ma.average(error*error))
stat_dict[label]["RMSE"] = trap_masked_constant(rmse)
data_range = numpy.ma.max(data_dict[label_tower]["data"])-numpy.ma.min(data_dict[label_tower]["data"])
data_range = numpy.maximum(data_range, 1)
if numpy.ma.is_masked(data_range) or abs(data_range) < c.eps:
nmse = float(c.missing_value)
else:
nmse = rmse/data_range
stat_dict[label]["NMSE"] = trap_masked_constant(nmse)
# bias & fractional bias
stat_dict[label]["Bias"] = trap_masked_constant(numpy.ma.average(error))
norm_error = (error)/(0.5*(data_dict[label_tower]["data"]+data_dict[label]["fitcorr"]))
stat_dict[label]["Frac Bias"] = trap_masked_constant(numpy.ma.average(norm_error))
def gfalternate_getreplacedata(data_dict, stat_dict, l4a):
label_output = l4a["run"]["label_output"]
label_alternate = l4a["run"]["label_alternate"]
data_alternate = data_dict[label_output][label_alternate]["lagcorr"]
data_dict[label_output][label_alternate]["fitcorr"] = numpy.ma.copy(data_alternate)
stat_dict[label_output][label_alternate]["slope"] = float(1)
stat_dict[label_output][label_alternate]["offset"] = float(0)
stat_dict[label_output][label_alternate]["eqnstr"] = "No OLS, replaced"
def gfalternate_getrmacorrecteddata(data_dict, stat_dict, l4a):
"""
Calculate the ordinary least squares fit between 2 1D arrays.
"""
label_tower = l4a["run"]["label_tower"]
label_output = l4a["run"]["label_output"]
label_alternate = l4a["run"]["label_alternate"]
y_in = numpy.ma.copy(data_dict[label_tower]["data"])
x_in = numpy.ma.copy(data_dict[label_output][label_alternate]["lagcorr"])
mask = numpy.ma.mask_or(x_in.mask, y_in.mask, copy=True, shrink=False)
x = numpy.ma.compressed(numpy.ma.array(x_in, mask=mask, copy=True))
y = numpy.ma.compressed(numpy.ma.array(y_in, mask=mask, copy=True))
# attempt an OLS fit
if l4a["run"]["fit_type"].lower() == "ols_thru0":
resols = sm.OLS(y, x).fit()
rma_slope = resols.params[0]/numpy.sqrt(resols.rsquared)
rma_offset = numpy.mean(y) - rma_slope * numpy.mean(x)
data_dict[label_output][label_alternate]["fitcorr"] = rma_slope*x_in
stat_dict[label_output][label_alternate]["slope"] = rma_slope
stat_dict[label_output][label_alternate]["offset"] = float(0)
stat_dict[label_output][label_alternate]["eqnstr"] = "y = %.3fx"%(rma_slope)
else:
resols = sm.OLS(y, sm.add_constant(x, prepend=False)).fit()
if resols.params.shape[0] == 2:
rma_slope = resols.params[0]/numpy.sqrt(resols.rsquared)
rma_offset = numpy.mean(y) - rma_slope * numpy.mean(x)
data_dict[label_output][label_alternate]["fitcorr"] = rma_slope*x_in+rma_offset
stat_dict[label_output][label_alternate]["slope"] = rma_slope
stat_dict[label_output][label_alternate]["offset"] = rma_offset
stat_dict[label_output][label_alternate]["eqnstr"] = "y = %.3fx + %.3f"%(rma_slope, rma_offset)
else:
data_dict[label_output][label_alternate]["fitcorr"] = numpy.ma.copy(x_in)
stat_dict[label_output][label_alternate]["slope"] = float(0)
stat_dict[label_output][label_alternate]["offset"] = float(0)
stat_dict[label_output][label_alternate]["eqnstr"] = "RMA error, replaced"
def gfalternate_gotdataforgaps(data, data_alternate, l4a, mode="verbose"):
"""
Returns true if the alternate series has data where the composite series has gaps.
"""
return_code = True
ind = numpy.where((numpy.ma.getmaskarray(data) == True) & (numpy.ma.getmaskarray(data_alternate) == False))[0]
if len(ind) == 0:
if mode == "verbose":
label_alternate = l4a["run"]["label_alternate"]
msg = " Alternate series " + label_alternate + " has nothing to contribute"
logger.info(msg)
return_code = False
return return_code
def gfalternate_gotnogaps(data, label, mode="verbose"):
"""
Returns true if the data series has no gaps, false if there are gaps
"""
return_code = True
if numpy.ma.count_masked(data) == 0:
if mode == "verbose":
msg = " No gaps in " + label
logger.info(msg)
return_code = True
else:
return_code = False
return return_code
def gfalternate_gotminpoints(data, l4a, label, mode="verbose"):
"""
Returns true if data contains more than the minimum number of points required
or data contains less than the minimum number but the fit type is replace.
"""
return_code = True
if numpy.ma.count(data) < l4a["run"]["min_points"]:
if mode == "verbose":
msg = " Less than " + str(l4a["gui"]["min_percent"]) + " % data in series "
msg = msg + label + ", skipping ..."
logger.info(msg)
msg = "gotminpoints: " + label + " " + str(numpy.ma.count(data))
msg = msg + " " + str(l4a["run"]["min_points"])
logger.info(msg)
return_code = False
return return_code
def gfalternate_gotminpointsboth(data_tower, data_alternate, l4a, label_tower, label_alternate, mode="verbose"):
return_code = True
mask = numpy.ma.mask_or(numpy.ma.getmaskarray(data_tower), numpy.ma.getmaskarray(data_alternate),
copy=True, shrink=False)
if len(numpy.where(mask == False)[0]) < l4a["run"]["min_points"]:
if mode != "quiet":
msg = " Less than " + str(l4a["run"]["min_percent"]) + " % good data common to both series "
logger.info(msg)
msg = "gotminpointsboth: " + label_tower + " " + str(numpy.ma.count(data_tower))
msg = msg + " " + str(l4a["run"]["min_points"])
logger.info(msg)
msg = "gotminpointsboth: " + label_alternate + " " + str(numpy.ma.count(data_alternate))
msg = msg + " " + str(l4a["run"]["min_points"])
logger.info(msg)
return_code = False
return return_code
def gfalternate_initplot(data_dict, l4a, **kwargs):
pd = {"margin_bottom":0.075, "margin_top":0.05, "margin_left":0.075, "margin_right":0.05,
"xy_height":0.25, "xy_width":0.20, "xyts_space":0.05, "xyxy_space":0.05, "ts_width":0.9,
"text_left":0.675, "num_left":0.825, "row_bottom":0.35, "row_space":0.030}
# calculate bottom of the first time series and the height of the time series plots
label_tower = l4a["run"]["label_tower"]
label_composite = l4a["run"]["label_composite"]
output_list = list(data_dict[label_tower]["output_list"])
for item in [label_tower, label_composite]:
if item in output_list: output_list.remove(item)
nts = len(output_list) + 1
pd["ts_bottom"] = pd["margin_bottom"] + pd["xy_height"] + pd["xyts_space"]
pd["ts_height"] = (1.0 - pd["margin_top"] - pd["ts_bottom"])/nts
for key, value in kwargs.items():
pd[key] = value
return pd
def gfalternate_loadoutputdata(ds_tower, data_dict, l4a):
ldt_tower = ds_tower.series["DateTime"]["Data"]
label_output = l4a["run"]["label_output"]
flag_code = l4a["outputs"][label_output]["flag_code"]
label_composite = l4a["run"]["label_composite"]
label_alternate = l4a["run"]["label_alternate"]
ts = l4a["info"]["time_step"]
si = pfp_utils.GetDateIndex(ldt_tower, l4a["run"]["startdate"], ts=ts, default=0)
ei = pfp_utils.GetDateIndex(ldt_tower, l4a["run"]["enddate"], ts=ts, default=len(ldt_tower))
if l4a["gui"]["overwrite"]:
ind1 = numpy.where(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["data"]) == False)[0]
else:
ind1 = numpy.where((numpy.ma.getmaskarray(data_dict[label_output]["data"]) == True)&
(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["data"]) == False))[0]
data_dict[label_output]["data"][ind1] = data_dict[label_output][label_alternate]["data"][ind1]
if l4a["gui"]["overwrite"]:
ind2 = numpy.where(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["fitcorr"]) == False)[0]
else:
ind2 = numpy.where((numpy.ma.getmaskarray(data_dict[label_output]["fitcorr"]) == True)&
(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["fitcorr"]) == False))[0]
data_dict[label_output]["fitcorr"][ind2] = data_dict[label_output][label_alternate]["fitcorr"][ind2]
if l4a["gui"]["overwrite"]:
ind3 = numpy.where(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["data"]) == False)[0]
else:
ind3 = numpy.where((numpy.ma.getmaskarray(data_dict[label_composite]["data"]) == True)&
(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["data"]) == False))[0]
data_dict[label_composite]["data"][ind3] = data_dict[label_output][label_alternate]["data"][ind3]
if l4a["gui"]["overwrite"]:
ind4 = numpy.where(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["fitcorr"]) == False)[0]
else:
ind4 = numpy.where((numpy.ma.getmaskarray(data_dict[label_composite]["fitcorr"]) == True)&
(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["fitcorr"]) == False))[0]
data_dict[label_composite]["fitcorr"][ind4] = data_dict[label_output][label_alternate]["fitcorr"][ind4]
if l4a["gui"]["overwrite"]:
ind5 = numpy.where(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["fitcorr"]) == False)[0]
else:
ind5 = numpy.where((abs(ds_tower.series[label_composite]["Data"][si:ei+1]-float(c.missing_value)) < c.eps)&
(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["fitcorr"]) == False))[0]
ds_tower.series[label_composite]["Data"][si:ei+1][ind5] = numpy.ma.filled(data_dict[label_output][label_alternate]["fitcorr"][ind5], c.missing_value)
ds_tower.series[label_composite]["Flag"][si:ei+1][ind5] = numpy.int32(flag_code)
if l4a["gui"]["overwrite"]:
ind6 = numpy.where(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["fitcorr"]) == False)[0]
else:
ind6 = numpy.where((abs(ds_tower.series[label_output]["Data"][si:ei+1]-float(c.missing_value)) < c.eps)&
(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["fitcorr"]) == False))[0]
ds_tower.series[label_output]["Data"][si:ei+1][ind6] = numpy.ma.filled(data_dict[label_output][label_alternate]["fitcorr"][ind6], c.missing_value)
ds_tower.series[label_output]["Flag"][si:ei+1][ind6] = numpy.int32(flag_code)
def gfalternate_main(ds_tower, ds_alt, l4_info, called_by, label_tower_list=None):
"""
This is the main routine for using alternate data to gap fill drivers.
"""
l4a = l4_info[called_by]
mode = "quiet" #"quiet" #"verbose"
ts = int(float(ds_tower.globalattributes["time_step"]))
startdate = l4a["run"]["startdate"]
enddate = l4a["run"]["enddate"]
logger.info(" Gap fill with alternate: " + startdate + " to " + enddate)
# get local pointer to the datetime series
dt_tower = ds_tower.series["DateTime"]["Data"]
si_tower = pfp_utils.GetDateIndex(dt_tower, startdate, ts=ts, default=0)
ei_tower = pfp_utils.GetDateIndex(dt_tower, enddate, ts=ts, default=len(dt_tower)-1)
ldt_tower = dt_tower[si_tower:ei_tower + 1]
# now loop over the variables to be gap filled using the alternate data
if label_tower_list == None:
label_tower_list = l4a["gui"]["series_list"]
for label_tower in label_tower_list:
l4a["run"]["label_tower"] = label_tower
label_composite = label_tower + "_composite"
l4a["run"]["label_composite"] = label_composite
# read the tower data and check for gaps
data_tower, _, attr_tower = pfp_utils.GetSeriesasMA(ds_tower, label_tower, si=si_tower, ei=ei_tower)
l4a["run"]["min_points"] = int(len(data_tower)*l4a["gui"]["min_percent"]/100)
# check to see if we have any gaps to fill
l4a["run"]["nogaps_tower"] = gfalternate_gotnogaps(data_tower, label_tower, mode=mode)
# check to see if we have more than the minimum number of points
l4a["run"]["gotminpoints_tower"] = gfalternate_gotminpoints(data_tower, l4a, label_tower, mode=mode)
# initialise a dictionary to hold the data
data_dict, stat_dict = gfalternate_createdataandstatsdict(ldt_tower, data_tower, attr_tower, l4a)
# get a list of the output names for this tower series
label_output_list = gfalternate_getlabeloutputlist(l4_info, label_tower)
# loop over the outputs for this tower series
for label_output in label_output_list:
l4a["run"]["label_output"] = label_output
l4a["run"]["alternate_name"] = l4a["outputs"][label_output]["alternate_name"]
# update the alternate_info dictionary
gfalternate_update_alternate_info(l4a)
# update the dictionaries
stat_dict[label_output] = {"startdate": startdate,
"enddate": enddate}
data_dict[label_output] = {"data": numpy.ma.masked_all_like(data_tower),
"fitcorr": numpy.ma.masked_all_like(data_tower),
"attr": attr_tower,
"source": l4a["outputs"][label_output]["source"]}
# get a local pointer to the alternate data structure
ds_alternate = ds_alt[l4a["outputs"][label_output]["file_name"]]
ldt_alternate = ds_alternate.series["DateTime"]["Data"]
# start and end idices for this time range in the alternate data
si_alternate = pfp_utils.GetDateIndex(ldt_alternate, startdate, ts=ts, default=0)
ei_alternate = pfp_utils.GetDateIndex(ldt_alternate, enddate, ts=ts, default=len(ldt_alternate)-1)
# get the alternate series that has the highest correlation with the tower data
label_alternate_list = gfalternate_getalternatevaratmaxr(ds_tower, ds_alternate, l4a, mode=mode)
# loop over alternate variables
for label_alternate in label_alternate_list:
l4a["run"]["label_alternate"] = label_alternate
# get the raw alternate data
data_alternate, _, attr_alternate = pfp_utils.GetSeriesasMA(ds_alternate, label_alternate, si=si_alternate, ei=ei_alternate)
# check this alternate variable to see if there are enough points
l4a["run"]["gotminpoints_alternate"] = gfalternate_gotminpoints(data_alternate, l4a, label_alternate, mode=mode)
l4a["run"]["gotdataforgaps_alternate"] = gfalternate_gotdataforgaps(data_dict[label_output]["data"], data_alternate, l4a, mode=mode)
l4a["run"]["gotminpoints_both"] = gfalternate_gotminpointsboth(data_tower, data_alternate, l4a, label_tower, label_alternate, mode=mode)
# update the data and sata dictionaries
stat_dict[label_output][label_alternate] = {"startdate": startdate,
"enddate": enddate}
if label_output not in data_dict[label_tower]["output_list"]:
data_dict[label_tower]["output_list"].append(label_output)
data_dict[label_output][label_alternate] = {"data": data_alternate,
"attr": attr_alternate}
gfalternate_getcorrecteddata(ds_alternate, data_dict, stat_dict, l4a)
gfalternate_loadoutputdata(ds_tower, data_dict, l4a)
# check to see if we have alternate data for this whole period, if so there is no reason to continue
ind_tower = numpy.where(abs(ds_tower.series[label_output]["Data"][si_tower:ei_tower+1]-float(c.missing_value)) < c.eps)[0]
if len(ind_tower) == 0:
break
# we have completed the loop over the alternate data for this output
# now do the statistics, diurnal average and daily averages for this output
gfalternate_getoutputstatistics(data_dict, stat_dict, l4a)
for label_output in label_output_list:
for result in l4a["outputs"][label_output]["results"]:
l4a["outputs"][label_output]["results"][result].append(stat_dict[label_output][result])
if l4a["run"]["nogaps_tower"]:
if l4a["gui"]["show_all"]:
pass
else:
continue
# plot the gap filled data
pd = gfalternate_initplot(data_dict, l4a)
diel_avg = gfalternate_getdielaverage(data_dict, l4a)
# reserve figure number 0 for the coverage lines/progress plot
gfalternate_plotcomposite(data_dict, stat_dict, diel_avg, l4a, pd)
def gfalternate_plotcomposite(data_dict, stat_dict, diel_avg, l4a, pd):
# set up some local pointers
label_tower = l4a["run"]["label_tower"]
label_composite = l4a["run"]["label_composite"]
time_step = l4a["info"]["time_step"]
points_test = numpy.ma.count(data_dict[label_tower]["data"]) < l4a["run"]["min_points"]
fit_test = l4a["run"]["fit_type"] != "replace"
if points_test and fit_test: return
# turn on interactive plotting
if l4a["gui"]["show_plots"]:
plt.ion()
else:
plt.ioff()
# create the figure canvas or re-use existing
if plt.fignum_exists(1):
fig = plt.figure(1)
plt.clf()
else:
fig = plt.figure(1, figsize=(13, 8))
fig.canvas.set_window_title(label_tower)
# get the plot title string
title = l4a["info"]["site_name"] + " : Comparison of tower and alternate data for " + label_tower
plt.figtext(0.5, 0.96, title, ha='center', size=16)
# bottom row of XY plots: scatter plot of 30 minute data
rect1 = [0.10, pd["margin_bottom"], pd["xy_width"], pd["xy_height"]]
xyscatter = plt.axes(rect1)
xyscatter.set_ylabel("Tower (" + data_dict[label_tower]["attr"]["units"] + ")")
xyscatter.set_xlabel("Alt (" + data_dict[label_composite]["attr"]["units"] + ")")
text = str(time_step) + " minutes"
xyscatter.text(0.6, 0.075, text, fontsize=10, horizontalalignment="left",
transform=xyscatter.transAxes)
xyscatter.plot(data_dict[label_composite]["fitcorr"], data_dict[label_tower]["data"], 'b.')
# trap caes where all fitted, corrected data is masked
mamin = numpy.ma.min(data_dict[label_composite]["fitcorr"])
mamax = numpy.ma.max(data_dict[label_composite]["fitcorr"])
if not numpy.ma.is_masked(mamin) and not numpy.ma.is_masked(mamax):
xfit = numpy.array([mamin,mamax])
yfit = xfit*stat_dict[label_composite]["slope"] + stat_dict[label_composite]["offset"]
xyscatter.plot(xfit, yfit, 'g--', linewidth=3)
xyscatter.text(0.5, 0.9, stat_dict[label_composite]["eqnstr"], fontsize=8,
horizontalalignment='center', transform=xyscatter.transAxes, color='green')
# bottom row of XY plots: scatter plot of diurnal averages
ind = numpy.arange(l4a["gui"]["nperday"])/float(l4a["gui"]["nperhr"])
rect2 = [0.40, pd["margin_bottom"], pd["xy_width"], pd["xy_height"]]
diel_axes = plt.axes(rect2)
diel_axes.plot(ind, diel_avg[label_composite]["fitcorr"], 'g-', label="Alt (fit)")
diel_axes.plot(ind, diel_avg[label_composite]["data"], 'b-', label="Alt")
diel_axes.set_ylabel(label_tower + " (" + data_dict[label_tower]["attr"]["units"] + ")")
diel_axes.set_xlim(0, 24)
diel_axes.xaxis.set_ticks([0, 6, 12, 18, 24])
diel_axes.set_xlabel('Hour')
diel_axes.plot(ind, diel_avg[label_tower]["data"], 'ro', label="Tower")
diel_axes.legend(loc='upper right', frameon=False, prop={'size':8})
# top row: time series
ts_axes = []
rect3 = [pd["margin_left"], pd["ts_bottom"], pd["ts_width"], pd["ts_height"]]
ts_axes.append(plt.axes(rect3))
ts_axes[0].plot(data_dict["DateTime"]["data"], data_dict[label_tower]["data"], 'ro', label="Tower")
ts_axes[0].plot(data_dict["DateTime"]["data"], data_dict[label_composite]["fitcorr"], 'g-', label="Alt (fitted)")
ts_axes[0].set_xlim(data_dict["DateTime"]["data"][0], data_dict["DateTime"]["data"][-1])
ts_axes[0].legend(loc='upper right', frameon=False, prop={'size':10})
ts_axes[0].set_ylabel(label_tower + " (" + data_dict[label_tower]["attr"]["units"] + ")")
output_list = list(data_dict[label_tower]["output_list"])
for item in [label_tower, label_composite]:
if item in output_list: output_list.remove(item)
for n, label_output in enumerate(output_list):
n = n + 1
source = data_dict[label_output]["source"]
this_bottom = pd["ts_bottom"] + n*pd["ts_height"]
rect = [pd["margin_left"], this_bottom, pd["ts_width"], pd["ts_height"]]
ts_axes.append(plt.axes(rect, sharex=ts_axes[0]))
ts_axes[n].plot(data_dict["DateTime"]["data"], data_dict[label_output]["data"], 'b-', label=source)
plt.setp(ts_axes[n].get_xticklabels(), visible=False)
ts_axes[n].legend(loc='upper right', frameon=False, prop={'size':10})
ts_axes[n].set_ylabel(label_tower + " (" + data_dict[label_tower]["attr"]["units"] + ")")
# write the comparison statistics
stats_list = ["Var (Alt)", "Var (Tower)", "RMSE", "Bias", "r", "No. filled", "No. points"]
for n, item in enumerate(stats_list):
row_posn = pd["margin_bottom"] + n*pd["row_space"]
plt.figtext(pd["text_left"], row_posn, item)
plt.figtext(pd["num_left"], row_posn, '%.4g'%(stat_dict[label_composite][item]))
# save a hard copy of the plot
sdt = data_dict["DateTime"]["data"][0].strftime("%Y%m%d")
edt = data_dict["DateTime"]["data"][-1].strftime("%Y%m%d")
figname = l4a["info"]["site_name"].replace(" ", "") + "_Alternate_" + label_tower
figname = figname + "_" + sdt + "_" + edt + '.png'
figname = os.path.join(l4a["info"]["plot_path"], figname)
fig.savefig(figname, format='png')
# draw the plot on the screen
if l4a["gui"]["show_plots"]:
plt.draw()
pfp_utils.mypause(1)
plt.ioff()
else:
plt.ion()
def gfalternate_plotcoveragelines(ds_tower, l4_info, called_by):
"""
Purpose:
Plot a line representing the coverage of variables being gap filled.
Usage:
Author: PRI
Date: Back in the day
"""
# local pointer to l4_info["GapFillFromAlternate"]
l4a = l4_info[called_by]
# local pointer to datetime
ldt = ds_tower.series["DateTime"]["Data"]
# get the site name and the start and end date
site_name = ds_tower.globalattributes["site_name"]
start_date = ldt[0].strftime("%Y-%m-%d")
end_date = ldt[-1].strftime("%Y-%m-%d")
# list of targets to plot
targets = [l4a["outputs"][output]["target"] for output in list(l4a["outputs"].keys())]
targets = list(set(targets))
ylabel_list = [""] + targets + [""]
ylabel_right_list = [""]
colors = ["blue", "red", "green", "yellow", "magenta", "black", "cyan", "brown"]
xsize = 15.0
ysize = max([len(targets)*0.2, 1])
if l4a["gui"]["show_plots"]:
plt.ion()
else:
plt.ioff()
if plt.fignum_exists(0):
fig = plt.figure(0)
plt.clf()
ax1 = plt.subplot(111)
else:
fig = plt.figure(0, figsize=(xsize, ysize))
ax1 = plt.subplot(111)
title = "Coverage: " + site_name + " " + start_date + " to " + end_date
fig.canvas.set_window_title(title)
plt.ylim([0, len(targets) + 1])
plt.xlim([ldt[0], ldt[-1]])
for label, n in zip(targets, list(range(1, len(targets) + 1))):
data_series, _, _ = pfp_utils.GetSeriesasMA(ds_tower, label)
percent = 100*numpy.ma.count(data_series)/len(data_series)
ylabel_right_list.append("{0:.0f}%".format(percent))
ind_series = numpy.ma.ones(len(data_series))*float(n)
ind_series = numpy.ma.masked_where(numpy.ma.getmaskarray(data_series) == True, ind_series)
plt.plot(ldt, ind_series, color=colors[numpy.mod(n, 8)], linewidth=1)
if label+"_composite" in list(ds_tower.series.keys()):
data_composite, _, _ = pfp_utils.GetSeriesasMA(ds_tower, label+"_composite")
ind_composite = numpy.ma.ones(len(data_composite))*float(n)
ind_composite = numpy.ma.masked_where(numpy.ma.getmaskarray(data_composite) == True, ind_composite)
plt.plot(ldt, ind_composite, color=colors[numpy.mod(n,8)], linewidth=4)
ylabel_posn = list(range(0, len(targets)+2))
pylab.yticks(ylabel_posn, ylabel_list)
ylabel_right_list.append("")
ax2 = ax1.twinx()
pylab.yticks(ylabel_posn, ylabel_right_list)
fig.tight_layout()
if l4a["gui"]["show_plots"]:
plt.draw()
pfp_utils.mypause(1)
plt.ioff()
else:
plt.ion()
def gfalternate_quit(alt_gui):
""" Quit the GapFillFromAlternate GUI."""
# put the return code into ds.returncodes
alt_gui.ds4.returncodes["message"] = "quit"
alt_gui.ds4.returncodes["value"] = 1
# destroy the alternate GUI
alt_gui.close()
def gfalternate_run_interactive(alt_gui):
"""
Purpose:
Gets settings from the GapFillFromAlternate GUI and loads them
into the l4_info["gui"] dictionary
Usage:
Called when the "Run" button is clicked.
Side effects:
Loads settings into the l4_info["gui"] dictionary.
Author: PRI
Date: Re-written July 2019
"""
# local pointers to useful things
try:
ds_tower = alt_gui.ds4
ds_alt = alt_gui.ds_alt
called_by = alt_gui.called_by
l4_info = alt_gui.l4_info
l4a = l4_info[called_by]
# populate the l4_info["gui"] dictionary with things that will be useful
ts = int(float(ds_tower.globalattributes["time_step"]))
l4a["gui"]["nperhr"] = int(float(60)/ts + 0.5)
l4a["gui"]["nperday"] = int(float(24)*l4a["gui"]["nperhr"] + 0.5)
l4a["gui"]["max_lags"] = int(float(12)*l4a["gui"]["nperhr"] + 0.5)
# window period length
if str(alt_gui.radioButtons.checkedButton().text()) == "Manual":
l4a["gui"]["period_option"] = 1
elif str(alt_gui.radioButtons.checkedButton().text()) == "Months":
l4a["gui"]["period_option"] = 2
l4a["gui"]["number_months"] = int(alt_gui.lineEdit_NumberMonths.text())
elif str(alt_gui.radioButtons.checkedButton().text()) == "Days":
l4a["gui"]["period_option"] = 3
l4a["gui"]["number_days"] = int(alt_gui.lineEdit_NumberDays.text())
# plot settings
l4a["gui"]["overwrite"] = alt_gui.checkBox_Overwrite.isChecked()
l4a["gui"]["show_plots"] = alt_gui.checkBox_ShowPlots.isChecked()
l4a["gui"]["show_all"] = alt_gui.checkBox_PlotAll.isChecked()
# auto-complete settings
l4a["gui"]["auto_complete"] = alt_gui.checkBox_AutoComplete.isChecked()
l4a["gui"]["autoforce"] = False
# minimum percentage of good data required
l4a["gui"]["min_percent"] = max(int(str(alt_gui.lineEdit_MinPercent.text())),1)
# get the start and end datetimes entered in the alternate GUI
if len(str(alt_gui.lineEdit_StartDate.text())) != 0:
l4a["gui"]["startdate"] = str(alt_gui.lineEdit_StartDate.text())
else:
l4a["gui"]["startdate"] = l4a["info"]["startdate"]
if len(str(alt_gui.lineEdit_EndDate.text())) != 0:
l4a["gui"]["enddate"] = str(alt_gui.lineEdit_EndDate.text())
else:
l4a["gui"]["enddate"] = l4a["info"]["enddate"]
# now do the work
gfalternate_run(ds_tower, ds_alt, l4_info, called_by)
except Exception:
msg = " Error running L4, see below for details ..."
logger.error(msg)
error_message = traceback.format_exc()
logger.error(error_message)
return
def gfalternate_run(ds_tower, ds_alt, l4_info, called_by):
"""
Purpose:
Run the main routine for gap filling meteorological data.
Usage:
Side effects:
Author: PRI
Date: Re-written in August 2019
"""
l4a = l4_info[called_by]
# get a list of target variables
series_list = [l4a["outputs"][item]["target"] for item in list(l4a["outputs"].keys())]
l4a["gui"]["series_list"] = sorted(list(set(series_list)))
logger.info(" Gap filling %s using alternate data", l4a["gui"]["series_list"])
# initialise the l4_info["run"] dictionary
l4a["run"] = {"startdate": l4a["gui"]["startdate"],
"enddate": l4a["gui"]["enddate"]}
# run the main gap filling routine depending on window period
if l4a["gui"]["period_option"] == 1:
# manual run, window specified in GUI start and end datetime boxes
logger.info(" Starting manual run ...")
gfalternate_main(ds_tower, ds_alt, l4_info, called_by)
if l4a["info"]["call_mode"] == "interactive":
gfalternate_plotcoveragelines(ds_tower, l4_info, called_by)
logger.info(" Finished manual run ...")
elif l4a["gui"]["period_option"] == 2:
# automated run with window length in months
logger.info(" Starting auto (months) run ...")
startdate = dateutil.parser.parse(l4a["run"]["startdate"])
enddate = startdate + dateutil.relativedelta.relativedelta(months=l4a["gui"]["number_months"])
enddate = min([dateutil.parser.parse(l4a["info"]["enddate"]), enddate])
l4a["run"]["enddate"] = enddate.strftime("%Y-%m-%d %H:%M")
while startdate < enddate:
gfalternate_main(ds_tower, ds_alt, l4_info, called_by)
if l4a["info"]["call_mode"] == "interactive":
gfalternate_plotcoveragelines(ds_tower, l4_info, called_by)
startdate = enddate
l4a["run"]["startdate"] = startdate.strftime("%Y-%m-%d %H:%M")
enddate = startdate + dateutil.relativedelta.relativedelta(months=l4a["gui"]["number_months"])
enddate = min([dateutil.parser.parse(l4a["info"]["enddate"]), enddate])
l4a["run"]["enddate"] = enddate.strftime("%Y-%m-%d %H:%M")
# fill long gaps with autocomplete
gfalternate_autocomplete(ds_tower, ds_alt, l4_info, called_by)
logger.info(" Finished auto (months) run ...")
elif l4a["gui"]["period_option"] == 3:
# automated run with window length in days
logger.info(" Starting auto (days) run ...")
# get the start datetime entered in the alternate GUI
startdate = dateutil.parser.parse(l4a["run"]["startdate"])
# get the end datetime from the start datetime
enddate = startdate + dateutil.relativedelta.relativedelta(days=l4a["gui"]["number_days"])
# clip end datetime to last datetime in tower file
enddate = min([dateutil.parser.parse(l4a["info"]["enddate"]), enddate])
l4a["run"]["enddate"] = enddate.strftime("%Y-%m-%d %H:%M")
while startdate < enddate:
gfalternate_main(ds_tower, ds_alt, l4_info, called_by)
if l4a["info"]["call_mode"] == "interactive":
gfalternate_plotcoveragelines(ds_tower, l4_info, called_by)
startdate = enddate
l4a["run"]["startdate"] = startdate.strftime("%Y-%m-%d %H:%M")
enddate = startdate + dateutil.relativedelta.relativedelta(days=l4a["gui"]["number_days"])
enddate = min([dateutil.parser.parse(l4a["info"]["enddate"]), enddate])
l4a["run"]["enddate"] = enddate.strftime("%Y-%m-%d %H:%M")
gfalternate_autocomplete(ds_tower, ds_alt, l4_info, called_by)
logger.info(" Finished auto (days) run ...")
else:
logger.error("GapFillFromAlternate: unrecognised period option")
def gfalternate_update_alternate_info(l4a):
"""Update the l4_info dictionary."""
label_output = l4a["run"]["label_output"]
l4a["run"]["fit_type"] = l4a["outputs"][label_output]["fit_type"]
l4a["run"]["lag"] = l4a["outputs"][label_output]["lag"]
# autoforce is set true in gfalternate_autocomplete if there is not enough good points
# in the tower data for the whole time series, in this case we will use the alternate
# data "as is" by forcing a "replace" with no lag correction.
if l4a["gui"]["autoforce"]:
l4a["run"]["min_points"] = 0
l4a["run"]["fit_type"] = "replace"
l4a["run"]["lag"] = "no"
def trap_masked_constant(num):
if numpy.ma.is_masked(num):
num = float(c.missing_value)
return num
|
bsd-3-clause
| 3,962,994,394,770,090,000
| 52.792642
| 153
| 0.613529
| false
| 3.332436
| false
| false
| false
|
sravel/scripts
|
local/make_ldhatfiles.py
|
1
|
14611
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# @package make_ldhatfiles.py
# @author Lea Picard, Sebastien RAVEL
"""
The make_ldhatfiles script
==========================
:author: Sebastien Ravel, Lea Picard
:contact: sebastien.ravel@cirad.fr
:date: 08/07/2016
:version: 0.1
Script description
------------------
This Program takes a tab file and returns LDhat .sites and .locs files
Example
-------
>>> make_ldhatfiles.py -wd outPath -t SNP_table.tab -st chomosomeSize.txt
Help Programm
-------------
optional arguments:
- \-h, --help
show this help message and exit
- \-v, --version
display make_ldhatfiles.py version number and exit
Input mandatory infos for running:
- \-wd <path>, --workdir <path>
Path of the directory where files will be created
- \-t <filename>, --tab <filename>
Name of tab file in (input whole path if file is not
in the current working directory
- \-st <filename>, --size_tab <filename>
Name of a tab file containing the identifiers of the
subunits of division (chromosome/scaffold/contig) and
their total size. If some scaffolds are not wanted,
comment the line.
Input infos for running with default values:
- \-dt <int>, --datatype <int>
1 for haplotypic data (default), 2 for genotypic
- \-m <char>, --methode <char>
rhomap or interval (default)
- \-f <char>, --flag <char>
L for CO (default), C pour gene conversion
"""
##################################################
## Modules
##################################################
## Python modules
from sys import version_info, version
try:
assert version_info <= (3,0)
except AssertionError:
print("You are using version %s but version 2.7.x is require for this script!\n" % version.split(" ")[0])
exit(1)
#Import MODULES_SEB
import sys, os
current_dir = os.path.dirname(os.path.abspath(__file__))+"/"
sys.path.insert(1,current_dir+'../modules/')
from MODULES_SEB import directory, relativeToAbsolutePath, dictDict2txt, existant_file
import argparse
try:
import egglib3 as egglib # USE EGGLIB_3
if int(egglib.version.split(".")[0]) != 3 :
print("You are using not use egglib V3!\n" )
exit(1)
except ImportError:
print("You are not able to load egglib V3!\n" )
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
##################################################
## Variables Globales
version = "0.1"
VERSION_DATE = '15/03/2016'
completeLDhatPATH = "completeLDhat"
#intervalLDhatPATH = "interval"
##intervalLDhatPATH = "rhomap"
statLDhatPATH = "statLDhat"
##################################################
## Functions
def build_sites(paramfilename, dataType):
"""fonction adaptée du script build_concensusV2.py pour fichier .sites LDHAT"""
fichier = open(paramfilename,"r")
outfile = open("temps1.tab", "w")
head = fichier.readline()
outfile.write(head.replace("CHROM\t",""))
dictListPositions = {}
for ligne in fichier:
ligne = ligne.rstrip()
lligne = ligne.split("\t")
nameChro = lligne[0]
posSNP = lligne[1]
dictListPositions.setdefault(nameChro, [posSNP]).append(posSNP)
#if nameChro not in dictListPositions.keys():
#dictListPositions[nameChro] = [posSNP]
#else:
#dictListPositions[nameChro].append(posSNP)
ligneoutput="\t".join(lligne[1:2])
ref=lligne[2]
souchestr="\t".join(lligne[3:]).replace("R",ref)
ligneoutput+="\t"+lligne[2]+"\t"+souchestr+"\n"
outfile.write(ligneoutput)
outfile.close()
############################################################################################
# switch matrice
############################################################################################
fichier = open("temps1.tab","r")
outfile = open("temps.tab", "w")
A = []
for ligne in fichier:
tabligne = ligne.rstrip().split("\t")
A.append(tabligne)
#print(A)
for ligne in list(zip(*A)):
outfile.write("\t".join(ligne)+"\n")
outfile.close()
############################################################################################
# Grep consensus
############################################################################################
# Récupère le fichier de conf passer en argument
genometab = "temps.tab"
if paramfilename.count(".") > 1:
outputfilenameSite = ".".join(paramfilename.split(".")[0:-1])+".sites"
outputfilenameFasta = ".".join(paramfilename.split(".")[0:-1])+".fasta"
else:
outputfilenameSite = paramfilename.split(".")[0]+".sites"
outputfilenameFasta = paramfilename.split(".")[0]+".fasta"
outputfileSite = open(outputfilenameSite, 'w')
outputfileFasta = open(outputfilenameFasta, 'w')
# Utilisation du tab
TabFile = open(genometab, "r")
dictseq={}
head=TabFile.readline()
orderlist=[]
for tabline in TabFile:
ltab=tabline.rstrip().split("\t")
souche=ltab[0]
if souche not in dictseq.keys():
dictseq[souche]=""
orderlist.append(souche)
#get nb of sequences to add to file header
nbInd = len(orderlist)
seqreftab=ltab[1:]
dictseq[souche]="".join(seqreftab)
#get nb of SNPs in fasta sequence
nbSNP = len(dictseq[souche])
outputfileSite.write("%i %i %i\n" %(nbInd, nbSNP, dataType))
for souche in orderlist:
IDname = souche
seq = dictseq[souche]
record = SeqRecord(Seq(seq),id=IDname,name=IDname, description="")
SeqIO.write(record,outputfileSite, "fasta")
SeqIO.write(record,outputfileFasta, "fasta")
outputfileSite.close()
outputfileFasta.close()
####### Remove temps file
os.remove(genometab)
os.remove("temps1.tab")
return nbSNP, dictListPositions[nameChro], str(outputfileFasta.name), nbInd
############
## Main code
############
if __name__ == "__main__":
##
# parameters recovery
##
parser = argparse.ArgumentParser(prog='make_ldhatfiles.py', description='''This Program takes a tab file and returns LDhat .sites and .locs files''')
parser.add_argument('-v', '--version', action='version', version='You are using %(prog)s version: ' + version, help=\
'display make_ldhatfiles version number and exit')
filesreq = parser.add_argument_group('Input mandatory infos for running')
filesreq.add_argument('-wd', '--workdir', metavar="<path>",type=directory, required=True, dest = 'workdir', help = 'Path of the directory where files will be created')
filesreq.add_argument('-t', '--tab', metavar="<filename>",type=existant_file, required=True, dest = 'tabFile', help = 'Name of tab file in (input whole path if file is not in the current working directory')
filesreq.add_argument('-st', '--size_tab', metavar="<filename>",type=existant_file, required=True, dest = 'sizeTab', help = 'Name of a tab file containing the identifiers of the subunits of division (chromosome/scaffold/contig) and their total size. If some scaffolds are not wanted, comment the line.')
files = parser.add_argument_group('Input infos for running with default values')
files.add_argument('-dt', '--datatype', metavar="<int>", default=1, type=int, choices=[1,2], dest = 'datatype', help = '1 for haplotypic data (default), 2 for genotypic')
files.add_argument('-m', '--methode', metavar="<char>", default="interval", choices=["interval","rhomap"], dest = 'methode', help = 'rhomap or interval (default)')
files.add_argument('-f', '--flag', metavar="<char>", default="L", choices=["L","C"], dest = 'flag', help = 'L for CO (default), C pour gene conversion')
# check parameters
args = parser.parse_args()
#Welcome message
print("#################################################################")
print("# Welcome in make_ldhatfiles (Version " + version + ") #")
print("#################################################################")
# get arguments
workingObjDir = args.workdir
tabFile = relativeToAbsolutePath(args.tabFile)
sizeTab = relativeToAbsolutePath(args.sizeTab)
dataType = args.datatype
intervalLDhatPATH = args.methode
flag = args.flag
print("\t - Workink Directory: %s" % workingObjDir.pathDirectory)
print("\t - Input Path matrice is: %s" % tabFile)
print("\t - Input Path size is: %s" % sizeTab)
print("\t - dataType is : %s" % dataType)
print("\t - Working with : %s" % intervalLDhatPATH)
print("\t - flag is: %s\n\n" % flag)
#exit()
##
# code
##
# get basename to build the rest of the filenames
basename = tabFile.split("/")[-1].split(".")[0]
# build dictionary of scaffolds for the file to be split into
dictSizes = {}
with open(sizeTab, "r") as sizeTabOpen:
for sizeLine in sizeTabOpen:
# keys = IDs - values = total size
checkChro = sizeLine.split("\t")[0]
sizeChro = sizeLine.rstrip().split("\t")[1]
dictSizes[checkChro] = sizeChro
listRange = dictSizes.keys()
## split by specified subunits (scaffold/chromosome/contig etc)
# keys = subunits to be split, values = files to be written in
dictFilesOut = {}
with open(tabFile, "r") as tabOpen:
# get the header of the original tab file to rebuild split tab file
header = tabOpen.readline()
# start from second line
for line in tabOpen:
# chro = identifier of the subunit in the first column
chro = line.rstrip().split("\t")[0]
# if chro considered belongs to user-defined range
if chro in listRange:
# create subdirectory for the current scaffold
subdir = workingObjDir.pathDirectory+basename+"/"+chro
if not os.path.exists(subdir):
os.makedirs(subdir)
outputName = workingObjDir.pathDirectory+basename+"/"+chro+"/"+basename+"_"+chro+".tab"
# if chro not encountered yet, create file add header and current line
if chro not in dictFilesOut.keys():
dictFilesOut[chro] = open(outputName, "w")
dictFilesOut[chro].write(header)
dictFilesOut[chro].write(line)
# otherwise just add current line to relevant file
else:
dictFilesOut[chro].write(line)
# keys = names of split files, values = nb of SNPs in said file
dictNbSNP = {}
dictListPos = {}
listFasta = []
# for each split file
for fileOut in dictFilesOut.values():
name = fileOut.name
chroName = name.split("/")[-1].split(".")[0].replace(basename+"_","")
fileOut.close()
# create corresponding .sites file and associate Nb of SNPs
dictNbSNP[chroName], listPos, fasta, nbInd = build_sites(name, dataType)
listFasta.append(fasta)
dictListPos[chroName] = listPos
# for each subunit and its list of SNP positions
for checkChro, listPos in dictListPos.items():
if checkChro in dictFilesOut.keys():
outputLocsName = workingObjDir.pathDirectory+basename+"/"+checkChro+"/"+basename+"_"+checkChro+".locs"
# create .locs file
outputLocs = open(outputLocsName, "w")
# write header as NbSNP ScaffSize Flag
txt = "%i %s %s\n" %(dictNbSNP[checkChro], dictSizes[checkChro], flag)
outputLocs.write(txt)
# write SNP positions underneath
txtLocs = " ".join(dictListPos[checkChro])+"\n"
outputLocs.write(txtLocs)
outputLocs.close()
## calculate Pi and Theta values
dictThetaInfo = {}
cs = egglib.stats.ComputeStats()
cs.add_stats('Pi','thetaW')
# load alignement
for nameFasta in listFasta:
scaffold = nameFasta.split("/")[-1].replace(".fasta","").replace(basename+"_","")
# use egglib
align = egglib.io.from_fasta(nameFasta, groups=False)
stats = cs.process_align(align) # extract polymorphism data
# get number of SNPs in file
nbSNP = align.ls
#nbSNP = stats['ls_o']
# print results
if scaffold not in dictThetaInfo:
dictThetaInfo[scaffold] = { "Theta_SNP":stats['thetaW']/align.ls,
"Pi":stats['Pi']/align.ls,
"Nb_SNPs":nbSNP,
"Theta_allSNPs":stats['thetaW'],
"Theta_scaffold":stats['thetaW']/int(dictSizes[scaffold])
}
dicoMeanTheta = {}
sommeTheta,sommeSize = 0, 0
for scaffold, dico in dictThetaInfo.iteritems():
sommeTheta += dico["Theta_allSNPs"]
sommeSize += int(dictSizes[scaffold])
thetaCoreGenome = sommeTheta/sommeSize
with open(workingObjDir.pathDirectory+basename+"/"+basename+"_ThetaValues.tab", "w") as ThetaTab:
ThetaTab.write(dictDict2txt(dictThetaInfo))
ThetaTab.write("\nthetaCoreGenome\t%.4f" % thetaCoreGenome)
#MAKE sh script to run LDhat
objDir = directory(workingObjDir.pathDirectory+basename) # list all directory and files in the path
#nbInd = 13
#thetaCoreGenome = 0.007
cmdLoadR = "module load compiler/gcc/4.9.2 bioinfo/geos/3.4.2 bioinfo/gdal/1.9.2 mpi/openmpi/1.6.5 bioinfo/R/3.2.2"
cmdLookTable = completeLDhatPATH+" -n "+str(nbInd)+" -rhomax 100 -n_pts 201 -theta "+str(thetaCoreGenome)+" -prefix "+objDir.pathDirectory+basename
with open(workingObjDir.pathDirectory+basename+"/runLDhat_"+basename+".sh", "w") as runSHFile:
runSHFile.write("%s\n" % cmdLoadR)
runSHFile.write("%s\n" % cmdLookTable)
for scaff in sorted(objDir.listDir):
scaffObjDir = directory(scaff)
print scaffObjDir.__repr__
siteFile = [s for s in scaffObjDir.listFiles if ".site" in s]
locsFile = [s for s in scaffObjDir.listFiles if ".locs" in s]
basenameScaff = siteFile.split("/")[-1].split(".")[0]
#print basename
cmdCD = "cd "+scaff
if "rhomap" in intervalLDhatPATH:
cmdInterval = intervalLDhatPATH+" -seq "+siteFile+" -loc "+locsFile+" -lk "+objDir.pathDirectory+basename+"new_lk.txt -its 5000000 -bpen 10 -burn 100000 -samp 5000 -prefix "+scaffObjDir.pathDirectory+basenameScaff
if "interval" in intervalLDhatPATH:
cmdInterval = intervalLDhatPATH+" -seq "+siteFile+" -loc "+locsFile+" -lk "+objDir.pathDirectory+basename+"new_lk.txt -its 5000000 -bpen 10 -samp 5000 -prefix "+scaffObjDir.pathDirectory+basenameScaff
cmdStat = statLDhatPATH+" -input "+scaffObjDir.pathDirectory+basenameScaff+"rates.txt -prefix "+scaffObjDir.pathDirectory+basenameScaff
cmdGraph = "makeLDhatgraphs.R -f "+scaffObjDir.pathDirectory+basenameScaff+"rates.txt -o "+scaffObjDir.pathDirectory+basenameScaff+""
#print "%s\n%s\n%s\n%s\n" % (cmdCD,cmdLookTable,cmdInterval,cmdStat)
runSHFile.write("%s\n%s\n%s\n%s\n" % (cmdCD,cmdInterval,cmdStat,cmdGraph))
os.system("chmod 755 "+workingObjDir.pathDirectory+basename+"/runLDhat_"+basename+".sh")
cmdQsub = "qsub -V -q long.q -N "+basename+" -b Y -pe parallel_smp 4 "+workingObjDir.pathDirectory+basename+"/runLDhat_"+basename+".sh"
print(cmdQsub)
#print("\n\nExecution summary:")
#print(" - Outputting \n\
#- %s\n\
#- %s\n\
#- %s\n\n" % (tabFileOut.name,listKeepFile.name,correspondingCDSDir) )
print("#################################################################")
print("# End of execution #")
print("#################################################################")
|
gpl-3.0
| 7,221,963,836,876,002,000
| 31.826966
| 304
| 0.64629
| false
| 3.119368
| false
| false
| false
|
project-rig/rig
|
rig/place_and_route/place/rcm.py
|
1
|
6479
|
"""Reverse Cuthill-McKee based placement.
"""
from collections import defaultdict, deque
from six import itervalues
from rig.place_and_route.place.sequential import place as sequential_place
from rig.links import Links
from rig.netlist import Net
def _get_vertices_neighbours(nets):
"""Generate a listing of each vertex's immedate neighbours in an undirected
interpretation of a graph.
Returns
-------
{vertex: {vertex: weight, ...}), ...}
"""
zero_fn = (lambda: 0)
vertices_neighbours = defaultdict(lambda: defaultdict(zero_fn))
for net in nets:
if net.weight != 0:
for sink in net.sinks:
vertices_neighbours[net.source][sink] += net.weight
vertices_neighbours[sink][net.source] += net.weight
return vertices_neighbours
def _dfs(vertex, vertices_neighbours):
"""Generate all the vertices connected to the supplied vertex in
depth-first-search order.
"""
visited = set()
to_visit = deque([vertex])
while to_visit:
vertex = to_visit.pop()
if vertex not in visited:
yield vertex
visited.add(vertex)
to_visit.extend(vertices_neighbours[vertex])
def _get_connected_subgraphs(vertices, vertices_neighbours):
"""Break a graph containing unconnected subgraphs into a list of connected
subgraphs.
Returns
-------
[set([vertex, ...]), ...]
"""
remaining_vertices = set(vertices)
subgraphs = []
while remaining_vertices:
subgraph = set(_dfs(remaining_vertices.pop(), vertices_neighbours))
remaining_vertices.difference_update(subgraph)
subgraphs.append(subgraph)
return subgraphs
def _cuthill_mckee(vertices, vertices_neighbours):
"""Yield the Cuthill-McKee order for a connected, undirected graph.
`Wikipedia
<https://en.wikipedia.org/wiki/Cuthill%E2%80%93McKee_algorithm>`_ provides
a good introduction to the Cuthill-McKee algorithm. The RCM algorithm
attempts to order vertices in a graph such that their adjacency matrix's
bandwidth is reduced. In brief the RCM algorithm is a breadth-first search
with the following tweaks:
* The search starts from the vertex with the lowest degree.
* Vertices discovered in each layer of the search are sorted by ascending
order of their degree in the output.
.. warning::
This function must not be called on a disconnected or empty graph.
Returns
-------
[vertex, ...]
"""
vertices_degrees = {v: sum(itervalues(vertices_neighbours[v]))
for v in vertices}
peripheral_vertex = min(vertices, key=(lambda v: vertices_degrees[v]))
visited = set([peripheral_vertex])
cm_order = [peripheral_vertex]
previous_layer = set([peripheral_vertex])
while len(cm_order) < len(vertices):
adjacent = set()
for vertex in previous_layer:
adjacent.update(vertices_neighbours[vertex])
adjacent.difference_update(visited)
visited.update(adjacent)
cm_order.extend(sorted(adjacent, key=(lambda v: vertices_degrees[v])))
previous_layer = adjacent
return cm_order
def rcm_vertex_order(vertices_resources, nets):
"""A generator which iterates over the vertices in Reverse-Cuthill-McKee
order.
For use as a vertex ordering for the sequential placer.
"""
vertices_neighbours = _get_vertices_neighbours(nets)
for subgraph_vertices in _get_connected_subgraphs(vertices_resources,
vertices_neighbours):
cm_order = _cuthill_mckee(subgraph_vertices, vertices_neighbours)
for vertex in reversed(cm_order):
yield vertex
def rcm_chip_order(machine):
"""A generator which iterates over a set of chips in a machine in
Reverse-Cuthill-McKee order.
For use as a chip ordering for the sequential placer.
"""
# Convert the Machine description into a placement-problem-style-graph
# where the vertices are chip coordinate tuples (x, y) and each net
# represents the links leaving each chip. This allows us to re-use the
# rcm_vertex_order function above to generate an RCM ordering of chips in
# the machine.
vertices = list(machine)
nets = []
for (x, y) in vertices:
neighbours = []
for link in Links:
if (x, y, link) in machine:
dx, dy = link.to_vector()
neighbour = ((x + dx) % machine.width,
(y + dy) % machine.height)
# In principle if the link to chip is marked as working, that
# chip should be working. In practice this might not be the
# case (especially for carelessly hand-defined Machine
# objects).
if neighbour in machine:
neighbours.append(neighbour)
nets.append(Net((x, y), neighbours))
return rcm_vertex_order(vertices, nets)
def place(vertices_resources, nets, machine, constraints):
"""Assigns vertices to chips in Reverse-Cuthill-McKee (RCM) order.
The `RCM <https://en.wikipedia.org/wiki/Cuthill%E2%80%93McKee_algorithm>`_
algorithm (in graph-centric terms) is a simple breadth-first-search-like
heuristic which attempts to yield an ordering of vertices which would yield
a 1D placement with low network congestion. Placement is performed by
sequentially assigning vertices in RCM order to chips, also iterated over
in RCM order.
This simple placement scheme is described by Torsten Hoefler and Marc Snir
in their paper entitled 'Generic topology mapping strategies for
large-scale parallel architectures' published in the Proceedings of the
international conference on Supercomputing, 2011.
This is a thin wrapper around the :py:func:`sequential
<rig.place_and_route.place.sequential.place>` placement algorithm which
uses an RCM ordering for iterating over chips and vertices.
Parameters
----------
breadth_first : bool
Should vertices be placed in breadth first order rather than the
iteration order of vertices_resources. True by default.
"""
return sequential_place(vertices_resources, nets,
machine, constraints,
rcm_vertex_order(vertices_resources, nets),
rcm_chip_order(machine))
|
gpl-2.0
| -5,137,749,699,306,870,000
| 34.994444
| 79
| 0.656737
| false
| 4.021726
| false
| false
| false
|
electionleaflets/electionleaflets
|
electionleaflets/apps/boundaries/views.py
|
1
|
4341
|
import math
from PIL import Image, ImageDraw
from django.http import HttpResponse, Http404
from django.shortcuts import render_to_response
from boundaries.models import Boundary
from parties.models import Party
from leaflets.models import Leaflet
google_dist = 20037508.34
def leaflet_polygon_options(boundary):
n = Leaflet.objects.filter(leafletconstituency__constituency__boundary = boundary).count()
return {"fill": leaflet_colour(n), "outline": (0,0,0,170)}
def leaflet_popup(boundary):
party_list = [(
p, Leaflet.objects.filter(
leafletconstituency__constituency__boundary=boundary,
publisher_party = p))
for p in Party.objects.filter(
leaflet__leafletconstituency__constituency__boundary=boundary)\
.distinct().order_by('name')]
unclassified_leaflets = Leaflet.objects.filter(
leafletconstituency__constituency__boundary=boundary,
publisher_party = None)
if unclassified_leaflets:
party_list = party_list + [({"name": "Uncategorised"}, unclassified_leaflets)]
return "boundaries/leaflets.html", {"constituency": boundary.constituency,
"party_list": party_list
}
def leaflet_colour(n):
r = math.log((n+1), 2)
return (int(50 + r * 16), int(255 - r * 32), int(100 + r * 16), int(32 + r * 32))
def leaflet_keyvalues():
return [0,2,5,10,20,50,100,200]
maps = {"leaflets": {"polygon_options": leaflet_polygon_options,
"template": leaflet_popup,
"colour": leaflet_colour,
"keyvalues": leaflet_keyvalues()}
}
def getDBzoom(z):
if int(z) > 10:
return 10
else:
return int(z)
def view_key(request, mapname=None, n=None, x=None, y=None):
image = Image.new("RGBA", (int(x), int(y)), maps[mapname]["colour"](int(n)))
response = HttpResponse(mimetype="image/png")
image.save(response, "PNG")
return response
def view_map(request, mapname):
from django.conf import settings
return render_to_response("boundaries/map.html", {"MEDIA_URL":settings.MEDIA_URL, "mapname": mapname, "keyvalues":maps[mapname]["keyvalues"]})
def tile(request, mapname, tz=None, tx=None, ty=None, tilex=256, tiley = 256):
options = maps[str(mapname)]
west, south, east, north = getTileRect(tx, ty, tz)
zoom = 2 ** float(tz)
tx = float(tx)
ty = float(ty)
image = Image.new("RGBA", (256, 256), (0, 0, 0, 0))
draw = ImageDraw.Draw(image)
dbz = getDBzoom(tz)
boundaries_within = Boundary.objects.filter(zoom=dbz, south__lt=north, north__gt=south, east__gt=west, west__lt=east)
for boundary in boundaries_within:
polygon_options = options["polygon_options"](boundary)
coords = eval(boundary.boundary)
l = []
for lng, lat in coords:
x = 256 * (lng - west) / (east - west)
y = 256 * (lat - north) / (south - north)
l.append((int(x), int(y)))
draw.polygon(l, **polygon_options)
del draw
response = HttpResponse(mimetype="image/png")
image.save(response, "PNG")
return response
def popup(request, mapname, x=None, y=None, z=None):
options = maps[str(mapname)]
x = float(x)
y = float(y)
dbz = getDBzoom(z)
possible_boundaries = Boundary.objects.filter(zoom=int(dbz), south__lt=y, north__gt=y, east__gt=x, west__lt=x)
for boundary in possible_boundaries:
coords = eval(boundary.boundary)
inside = False
for (vx0, vy0), (vx1, vy1) in zip(coords, coords[1:] + coords[:1]):
if ((vy0>y) != (vy1>y)) and (x < (vx1-vx0) * (y-vy0) / (vy1-vy0) + vx0):
inside = not(inside)
if inside:
return render_to_response(*options["template"](boundary))
raise Http404
def to_google(x, tilesAtThisZoom):
return google_dist * (1 - 2 * float(x) / tilesAtThisZoom)
def getTileRect(xt, yt, zoomt):
zoom = int(zoomt)
x = int(xt)
y = int(yt)
tilesAtThisZoom = 2 ** zoom
return (-to_google(x, tilesAtThisZoom),
to_google(y + 1, tilesAtThisZoom),
-to_google(x + 1, tilesAtThisZoom),
to_google(y, tilesAtThisZoom))
|
mit
| 428,706,416,601,348,300
| 36.102564
| 146
| 0.601705
| false
| 3.308689
| false
| false
| false
|
sajuptpm/neutron-ipam
|
neutron/tests/unit/metaplugin/fake_plugin.py
|
1
|
2680
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import l3_gwmode_db
class Fake1(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin):
supported_extension_aliases = ['external-net', 'router']
def fake_func(self):
return 'fake1'
def create_network(self, context, network):
session = context.session
with session.begin(subtransactions=True):
net = super(Fake1, self).create_network(context, network)
self._process_l3_create(context, net, network['network'])
return net
def update_network(self, context, id, network):
session = context.session
with session.begin(subtransactions=True):
net = super(Fake1, self).update_network(context, id,
network)
self._process_l3_update(context, net, network['network'])
return net
def delete_network(self, context, id):
return super(Fake1, self).delete_network(context, id)
def create_port(self, context, port):
port = super(Fake1, self).create_port(context, port)
return port
def create_subnet(self, context, subnet):
subnet = super(Fake1, self).create_subnet(context, subnet)
return subnet
def update_port(self, context, id, port):
port = super(Fake1, self).update_port(context, id, port)
return port
def delete_port(self, context, id, l3_port_check=True):
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
self.disassociate_floatingips(context, id)
return super(Fake1, self).delete_port(context, id)
class Fake2(Fake1):
def fake_func(self):
return 'fake2'
def fake_func2(self):
return 'fake2'
def start_rpc_listener(self):
# return value is only used to confirm this method was called.
return 'OK'
|
apache-2.0
| 3,253,998,093,902,717,000
| 34.263158
| 78
| 0.65
| false
| 3.764045
| false
| false
| false
|
rwatson/chromium-capsicum
|
tools/grit/grit/xtb_reader.py
|
1
|
3972
|
#!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Fast and efficient parser for XTB files.
'''
import sys
import xml.sax
import xml.sax.handler
class XtbContentHandler(xml.sax.handler.ContentHandler):
'''A content handler that calls a given callback function for each
translation in the XTB file.
'''
def __init__(self, callback, debug=False):
self.callback = callback
self.debug = debug
# 0 if we are not currently parsing a translation, otherwise the message
# ID of that translation.
self.current_id = 0
# Empty if we are not currently parsing a translation, otherwise the
# parts we have for that translation - a list of tuples
# (is_placeholder, text)
self.current_structure = []
# Set to the language ID when we see the <translationbundle> node.
self.language = ''
# Keep track of the if block we're inside. We can't nest ifs.
self.if_expr = None
def startElement(self, name, attrs):
if name == 'translation':
assert self.current_id == 0 and len(self.current_structure) == 0, (
"Didn't expect a <translation> element here.")
self.current_id = attrs.getValue('id')
elif name == 'ph':
assert self.current_id != 0, "Didn't expect a <ph> element here."
self.current_structure.append((True, attrs.getValue('name')))
elif name == 'translationbundle':
self.language = attrs.getValue('lang')
elif name == 'if':
assert self.if_expr is None, "Can't nest <if> in xtb files"
self.if_expr = attrs.getValue('expr')
def endElement(self, name):
if name == 'translation':
assert self.current_id != 0
# If we're in an if block, only call the callback (add the translation)
# if the expression is True.
should_run_callback = True
if self.if_expr:
should_run_callback = eval(self.if_expr, {}, {'os': sys.platform})
if should_run_callback:
self.callback(self.current_id, self.current_structure)
self.current_id = 0
self.current_structure = []
elif name == 'if':
assert self.if_expr is not None
self.if_expr = None
def characters(self, content):
if self.current_id != 0:
# We are inside a <translation> node so just add the characters to our
# structure.
#
# This naive way of handling characters is OK because in the XTB format,
# <ph> nodes are always empty (always <ph name="XXX"/>) and whitespace
# inside the <translation> node should be preserved.
self.current_structure.append((False, content))
class XtbErrorHandler(xml.sax.handler.ErrorHandler):
def error(self, exception):
pass
def fatalError(self, exception):
raise exception
def warning(self, exception):
pass
def Parse(xtb_file, callback_function, debug=False):
'''Parse xtb_file, making a call to callback_function for every translation
in the XTB file.
The callback function must have the signature as described below. The 'parts'
parameter is a list of tuples (is_placeholder, text). The 'text' part is
either the raw text (if is_placeholder is False) or the name of the placeholder
(if is_placeholder is True).
Args:
xtb_file: file('fr.xtb')
callback_function: def Callback(msg_id, parts): pass
Return:
The language of the XTB, e.g. 'fr'
'''
# Start by advancing the file pointer past the DOCTYPE thing, as the TC
# uses a path to the DTD that only works in Unix.
# TODO(joi) Remove this ugly hack by getting the TC gang to change the
# XTB files somehow?
front_of_file = xtb_file.read(1024)
xtb_file.seek(front_of_file.find('<translationbundle'))
handler = XtbContentHandler(callback=callback_function, debug=debug)
xml.sax.parse(xtb_file, handler)
assert handler.language != ''
return handler.language
|
bsd-3-clause
| -5,619,635,609,388,487,000
| 33.241379
| 81
| 0.674723
| false
| 3.768501
| false
| false
| false
|
mindis/canteen
|
canteen/core/meta.py
|
1
|
9304
|
# -*- coding: utf-8 -*-
'''
canteen meta core
~~~~~~~~~~~~~~~~~
metaclass tools and APIs.
:author: Sam Gammon <sam@keen.io>
:copyright: (c) Keen IO, 2013
:license: This software makes use of the MIT Open Source License.
A copy of this license is included as ``LICENSE.md`` in
the root of the project.
'''
# utils
from ..util import struct
from ..util import decorators
## Globals
_owner_map = {}
grab = lambda x: x.__func__ if hasattr(x, '__func__') else x
owner = lambda x: intern(x.__owner__ if hasattr(x, '__owner__') else x.__name__)
construct = lambda cls, name, bases, properties: type.__new__(cls, name, bases, properties)
metachain = lambda cls, n, b, p: cls.__metachain__.append(construct(cls, n, b, p)) or cls.__metachain__[-1]
class MetaFactory(type):
''' '''
__owner__, __metachain__, __root__ = "BaseMeta", [], True
def __new__(cls, name=None, bases=None, properties=None):
''' '''
if not name or not bases or not isinstance(properties, dict): # pragma: nocover
raise NotImplementedError('`MetaFactory` is meta-abstract and cannot be constructed directly.')
# get ready to construct, do so immediately for ``MetaFactory`` itself and other explicit roots
if '__root__' in properties and properties['__root__']:
del properties['__root__'] # treat as a root - init directly and continue
return construct(cls, name, bases, properties)
# construct, yo. then unconditionally apply it to the metachain and return also, defer to the class'
# ``initialize``, or any of its bases if they have ``initialize`, for constructing the actual class.
return ((grab(properties['initialize'] if 'initialize' in properties else
getattr((x for x in bases if hasattr(x, 'initialize')).next(), 'initialize')))(*(
cls, name, bases, properties))) if (
'initialize' in properties or any((hasattr(b, 'initialize') for b in bases))
) else metachain(cls, name, bases, properties)
def mro(cls):
''' '''
# override metaclass MRO to make them superimposable on each other
if not cls.__metachain__:
return type.mro(cls)
# make sure to enforce MRO semantics
seen, tree, order = set(), [], type.mro(cls)
for group in ([order[0]], order[1:-2], cls.__metachain__, order[-2:]):
for base in group:
if base not in seen: seen.add(base), tree.append(base)
return tuple(tree)
__repr__ = lambda cls: "<meta '%s.%s'>" % (cls.__owner__, cls.__name__)
class Base(type):
''' '''
__owner__, __metaclass__, __root__ = "Base", MetaFactory, True
class Proxy(object):
''' '''
class Factory(Base):
''' '''
__hooks__ = []
def initialize(cls, name, bases, properties):
''' '''
def metanew(_cls, _name, _bases, _properties):
''' '''
# if this metaclass implements the ``Proxy.Register`` class,
# defer to _cls.register directly after construction
if issubclass(_cls, Proxy.Registry):
return grab(_cls.register)(_cls, construct(_cls, _name, _bases, _properties))
return construct(_cls, _name, _bases, _properties) # pragma: nocover
# drop down if we already have a metachain for this tree
if cls.__metachain__: properties['__new__'] = metanew
# construct, yo. then unconditionally apply it to the metachain and return
return metachain(cls, name, bases, properties)
class Registry(Factory):
''' '''
__chain__ = {}
def iter_children(cls):
''' '''
for obj in cls.__chain__[owner(cls)]:
if obj is cls: continue # skip the parent class
yield obj
def children(cls):
''' '''
# remember to filter-out weakrefs that have died
return [child for child in cls.iter_children()]
@staticmethod
def register(meta, target):
''' '''
_owner = owner(target)
# check to see if bases are only roots, if it is a root create a new metabucket
if not any(((False if x in (object, type) else True) for x in target.__bases__)):
meta.__chain__[_owner] = []
return target
# resolve owner and construct
for base in target.__bases__:
if not base in (object, type):
if _owner not in meta.__chain__: meta.__chain__[_owner] = []
meta.__chain__[_owner].append(target)
return target
class Component(Registry):
''' '''
__target__ = None
__binding__ = None
__injector_cache__ = {}
__map__ = {} # holds map of all platform instances
@decorators.classproperty
def singleton_map(cls):
''' '''
return cls.__map__
@classmethod
def reset_cache(cls):
''' '''
cls.__injector_cache__ = {}
cls.__class__.__injector_cache__ = {}
return
@staticmethod
def collapse(cls, spec=None):
''' '''
# try the injector cache
if (cls, spec) not in Proxy.Component.__injector_cache__:
# otherwise, collapse and build one
property_bucket = {}
for metabucket in Proxy.Registry.__chain__.iterkeys():
for concrete in filter(lambda x: issubclass(x.__class__, Proxy.Component), Proxy.Component.__chain__[metabucket]):
namespace = ''
responder, properties = concrete.inject(concrete, cls.__target__, cls.__delegate__) or (None, {})
if not responder: continue # filter out classes that opt-out of injection
if hasattr(concrete, '__binding__'):
def do_pluck(klass, obj):
''' '''
def pluck(property_name):
''' '''
# dereference property aliases
if hasattr(klass, '__aliases__') and property_name in klass.__aliases__:
return getattr(obj, klass.__aliases__[property_name])
return getattr(obj, property_name) # pragma: nocover
setattr(pluck, 'target', klass)
return pluck
if concrete.__binding__:
property_bucket[concrete.__binding__.__alias__] = struct.CallbackProxy(do_pluck(concrete, responder))
if concrete.__binding__.__namespace__:
namespace = concrete.__binding__.__alias__
for bundle in properties:
# clear vars
prop, alias, _global = None, None, False
if not isinstance(bundle, tuple):
property_bucket['.'.join((namespace, bundle)) if namespace else bundle] = (responder, bundle)
continue
prop, alias, _global = bundle
if _global:
property_bucket['.'.join((namespace, alias)) if namespace else alias] = (responder, prop)
continue
property_bucket[alias] = (responder, prop)
# if it's empty, don't cache
if not property_bucket: return {}
# set in cache, unless empty
Proxy.Component.__injector_cache__[(cls, spec)] = property_bucket
# return from cache
return Proxy.Component.__injector_cache__[(cls, spec)]
@staticmethod
def inject(cls, requestor, delegate):
''' '''
# allow class to "prepare" itself (potentially instantiating a singleton)
concrete = cls.__class__.prepare(cls) if hasattr(cls.__class__, 'prepare') else cls
# allow class to indicate it does not wish to inject
if concrete is None: return
# gather injectable attributes
_injectable = set()
if hasattr(cls, '__bindings__'):
for iterator in (cls.__dict__.iteritems(), cls.__class__.__dict__.iteritems()):
for prop, value in iterator:
if cls.__bindings__:
if prop in cls.__bindings__:
func = cls.__dict__[prop] if not isinstance(cls.__dict__[prop], (staticmethod, classmethod)) else cls.__dict__[prop].__func__
do_namespace = func.__binding__.__namespace__ if cls.__binding__.__namespace__ else False
_injectable.add((prop, func.__binding__.__alias__ or prop, do_namespace))
continue
else:
# if no bindings are in use, bind all non-special stuff
if not prop.startswith('__'):
_injectable.add(prop)
# return bound injectables or the whole set
return concrete, _injectable or set(filter(lambda x: not x.startswith('__'), concrete.__dict__.iterkeys()))
@classmethod
def prepare(cls, target):
''' '''
if (not hasattr(target, '__binding__')) or target.__binding__ is None: return
# resolve name, instantiate and register instance singleton
alias = target.__binding__.__alias__ if (hasattr(target.__binding__, '__alias__') and isinstance(target.__binding__, basestring)) else target.__name__
if hasattr(target, '__singleton__') and target.__singleton__:
# if we already have a singleton, give that
if alias in cls.__map__: return cls.__map__[alias]
# otherwise, startup a new singleton
cls.__map__[alias] = target()
return cls.__map__[alias]
return target # pragma: nocover
__all__ = (
'MetaFactory',
'Base',
'Proxy'
)
|
mit
| -6,218,907,525,932,880,000
| 30.013333
| 156
| 0.576849
| false
| 4.232939
| false
| false
| false
|
pypa/twine
|
tests/test_main.py
|
1
|
1168
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import colorama
from twine import __main__ as dunder_main
def test_exception_handling(monkeypatch):
monkeypatch.setattr(sys, "argv", ["twine", "upload", "missing.whl"])
message = "InvalidDistribution: Cannot find file (or expand pattern): 'missing.whl'"
assert dunder_main.main() == colorama.Fore.RED + message + colorama.Style.RESET_ALL
def test_no_color_exception(monkeypatch):
monkeypatch.setattr(sys, "argv", ["twine", "--no-color", "upload", "missing.whl"])
message = "InvalidDistribution: Cannot find file (or expand pattern): 'missing.whl'"
assert dunder_main.main() == message
|
apache-2.0
| -8,956,839,330,193,934,000
| 39.275862
| 88
| 0.735445
| false
| 3.792208
| false
| false
| false
|
gilliM/MFQ
|
ModisFromQgis/help/source/conf.py
|
1
|
7118
|
# -*- coding: utf-8 -*-
#
# ModisFromQgis documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 12 17:11:03 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ModisFromQgis'
copyright = u'2013, Gillian Milani / RSL (UZH)'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_TemplateModuleNames = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TemplateClassdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ModisFromQgis.tex', u'ModisFromQgis Documentation',
u'Gillian Milani / RSL (UZH)', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'TemplateClass', u'ModisFromQgis Documentation',
[u'Gillian Milani / RSL (UZH)'], 1)
]
|
gpl-2.0
| 1,405,499,702,715,317,000
| 31.953704
| 80
| 0.709609
| false
| 3.711157
| true
| false
| false
|
SavinaRoja/Kerminal
|
kerminal/telemachus_api.py
|
1
|
13310
|
# encoding: utf-8
#The information in this module was gleaned from DataLinkHandlers.cs
#https://github.com/richardbunt/Telemachus/blob/master/Telemachus/src/DataLinkHandlers.cs
#Actions are sent to server, result in one action per message
mj_actions = ['mj.smartassoff', # Smart ASS Off
'mj.node', # Node
'mj.prograde', # Prograde
'mj.retrograde', # Retrograde
'mj.normalplus', # Normal Plus
'mj.normalminus', # Normal Minus
'mj.radialplus', # Radial Plus
'mj.radialminus', # Radial Minus
'mj.targetplus', # Target Plus
'mj.targetminus', # Target Minus
'mj.relativeplus', # Relative Plus
'mj.relativeminus', # Relative Minus
'mj.parallelplus', # Parallel Plus
'mj.parallelminus', # Parallel Minus
'mj.surface', # Surface [float heading, float pitch]
'mj.surface2', # Surface [double heading, double pitch]
]
#FlyByWire Stuff
vessel_actions = ['v.setYaw', # Yaw [float yaw]
'v.setPitch', # Pitch [float pitch]
'v.setRoll', # Roll [float roll]
'v.setFbW', # Set Fly by Wire On or Off [bool state]
'v.setPitchYawRollXYZ', # Set pitch, yaw, roll, X, Y and Z [float pitch, yaw, roll, x, y, z]
]
flight_actions = ['f.stage', # Stage
'f.setThrottle', # Set Throttle [float magnitude]
#'f.throttle', # Throttle (plotable)
'f.throttleUp', # Throttle Up
'f.throttleZero', # Throttle Zero
'f.throttleFull', # Throttle Full
'f.throttleDown', # Throttle Down
'f.rcs', # RCS [optional bool on/off]
'f.sas', # SAS [optional bool on/off]
'f.light', # Light [optional bool on/off]
'f.gear', # Gear [optional bool on/off]
'f.brake', # Brake [optional bool on/off]
'f.abort', # Abort [optional bool on/off]
'f.ag1', # Action Group 1 [optional bool on/off]
'f.ag2', # Action Group 2 [optional bool on/off]
'f.ag3', # Action Group 3 [optional bool on/off]
'f.ag4', # Action Group 4 [optional bool on/off]
'f.ag5', # Action Group 5 [optional bool on/off]
'f.ag6', # Action Group 6 [optional bool on/off]
'f.ag7', # Action Group 7 [optional bool on/off]
'f.ag8', # Action Group 8 [optional bool on/off]
'f.ag9', # Action Group 9 [optional bool on/off]
'f.ag10', # Action Group 10 [optional bool on/off]
]
time_warp_actions = ['t.timeWarp', # Time Warp [int rate]
]
#MapView here refers to the in-game orbital map, not the google maps hook
mapview_actions = ['m.toggleMapView', # Toggle Map View
'm.enterMapView', # Enter Map View
'm.exitMapView', # Exit Map View
]
#Plotables are things you can subscribe to; will be sent at each pulse
flight_plotables = ['f.throttle', # Throttle
'v.rcsValue', # Query RCS value
'v.sasValue', # Query SAS value
'v.lightValue', # Query light value
'v.brakeValue', # Query brake value
'v.gearValue', # Query gear value
]
target_plotables = ['tar.o.sma', # Target Semimajor Axis
'tar.o.lan', # Target Longitude of Ascending Node
'tar.o.maae', # Target Mean Anomaly at Epoch
'tar.name', # Target Name
'tar.type', # Target Type
'tar.distance', # Target Distance
'tar.o.velocity', # Target Velocity
'tar.o.PeA', # Target Periapsis
'tar.o.ApA', # Target Apoapsis
'tar.o.timeToAp', # Target Time to Apoapsis
'tar.o.timeToPe', # Target Time to Periapsis
'tar.o.inclination', # Target Inclination
'tar.o.eccentricity', # Target Eccentricity
'tar.o.period', # Target Orbital Period
'tar.o.relativeVelocity', # Target Relative Velocity
#Sends improperly encoded text back!
#'tar.o.trueAnomaly', # Target True Anomaly
'tar.o.orbitingBody', # Target Orbiting Body
'tar.o.argumentOfPeriapsis', # Target Argument of Periapsis
'tar.o.timeToTransition1', # Target Time to Transition 1
'tar.o.timeToTransition2', # Target Time to Transition 2
'tar.o.timeOfPeriapsisPassage', # Target Time of Periapsis Passage
]
docking_plotables = ['dock.ax', # Docking x Angle
'dock.ay', # Relative Pitch Angle
'dock.az', # Docking z Angle
'dock.x', # Target x Distance
'dock.y', # Target y Distance
]
#In my tests, none of these can be used. Breaks the connection
#body_plotables = ['b.name', # Body Name
#'b.maxAtmosphere', # Body Max Atmosphere
#'b.radius', # Body Radius
#'b.number', # Number of Bodies
#'b.o.gravParameter', # Body Gravitational Parameter
#'b.o.relativeVelocity', # Relative Velocity
#'b.o.PeA', # Periapsis
#'b.o.ApA', # Apoapsis
#'b.o.timeToAp', # Time to Apoapsis
#'b.o.timeToPe', # Time to Periapsis
#'b.o.inclination', # Inclination
#'b.o.eccentricity', # Eccentricity
#'b.o.period', # Orbital Period
#'b.o.argumentOfPeriapsis', # Argument of Periapsis
#'b.o.timeToTransition1', # Time to Transition 1
#'b.o.timeToTransition2', # Time to Transition 2
#'b.o.sma', # Semimajor Axis
#'b.o.lan', # Longitude of Ascending Node
#'b.o.maae', # Mean Anomaly at Epoch
#'b.o.timeOfPeriapsisPassage', # Time of Periapsis Passage
#'b.o.trueAnomaly', # True Anomaly
#'b.o.phaseAngle', # Phase Angle
#]
navball_plotables = ['n.heading', # Heading
'n.pitch', # Pitch
'n.roll', # Roll
'n.rawheading', # Raw Heading
'n.rawpitch', # Raw Pitch
'n.rawroll', # Raw Roll
]
vessel_plotables = ['v.altitude', # Altitude
'v.heightFromTerrain', # Height from Terrain
'v.terrainHeight', # Terrain Height
'v.missionTime', # Mission Time
'v.surfaceVelocity', # Surface Velocity
'v.surfaceVelocityx', # Surface Velocity x
'v.surfaceVelocityy', # Surface Velocity y
'v.surfaceVelocityz', # Surface Velocity z
'v.angularVelocity', # Angular Velocity
'v.orbitalVelocity', # Orbital Velocity
'v.surfaceSpeed', # Surface Speed
'v.verticalSpeed', # Vertical Speed
'v.geeForce', # G-Force
'v.atmosphericDensity', # Atmospheric Density
'v.long', # Longitude
'v.lat', # Latitude
'v.dynamicPressure', # Dynamic Pressure
'v.name', # Name
'v.body', # Body Name
'v.angleToPrograde', # Angle to Prograde
]
orbit_plotables = ['o.relativeVelocity', # Relative Velocity
'o.PeA', # Periapsis
'o.ApA', # Apoapsis
'o.timeToAp', # Time to Apoapsis
'o.timeToPe', # Time to Periapsis
'o.inclination', # Inclination
'o.eccentricity', # Eccentricity
'o.epoch', # Epoch
'o.period', # Orbital Period
'o.argumentOfPeriapsis', # Argument of Periapsis
'o.timeToTransition1', # Time to Transition 1
'o.timeToTransition2', # Time to Transition 2
'o.sma', # Semimajor Axis
'o.lan', # Longitude of Ascending Node
'o.maae', # Mean Anomaly at Epoch
'o.timeOfPeriapsisPassage', # Time of Periapsis Passage
'o.trueAnomaly', # True Anomaly'
]
orbit_plots_names = {'o.relativeVelocity': 'Relative Velocity',
'o.PeA': 'Periapsis',
'o.ApA': 'Apoapsis',
'o.timeToAp': 'Time to Apoapsis',
'o.timeToPe': 'Time to Periapsis',
'o.inclination': 'Inclination',
'o.eccentricity': 'Eccentricity',
'o.epoch': 'Epoch',
'o.period': 'Orbital Period',
'o.argumentOfPeriapsis': 'Argument of Periapsis',
'o.timeToTransition1': 'Time to Transition 1',
'o.timeToTransition2': 'Time to Transition 2',
'o.sma': 'Semimajor Axis',
'o.lan': 'Longitude of Ascending Node',
'o.maae': 'Mean Anomaly at Epoch',
'o.timeOfPeriapsisPassage': 'Time of Periapsis Passage',
'o.trueAnomaly': 'True Anomaly',
}
sensor_plotables = [#'s.sensor', # Sensor Information [string sensor type]
's.sensor.temp', # Temperature sensor information
's.sensor.pres', # Pressure sensor information
's.sensor.grav', # Gravity sensor information
's.sensor.acc', # Acceleration sensor information
]
paused_plotables = ['p.paused', # Paused
]
api_plotables = ['a.version', # Telemachus Version
]
time_warp_plotables = ['t.universalTime', # Universal Time
]
resource_plotables = ['r.resourceMax[ElectricCharge]',
'r.resourceCurrent[ElectricCharge]',
'r.resource[ElectricCharge]',
'r.resourceMax[LiquidFuel]',
'r.resourceCurrent[LiquidFuel]',
'r.resource[LiquidFuel]',
'r.resourceMax[Oxidizer]',
'r.resourceCurrent[Oxidizer]',
'r.resource[Oxidizer]',
'r.resourceCurrent[MonoPropellant]',
'r.resource[MonoPropellant]',
'r.resourceMax[XenonGas]',
'r.resourceCurrent[XenonGas]',
'r.resource[XenonGas]',
'r.resourceMax[IntakeAir]',
'r.resourceCurrent[IntakeAir]',
'r.resource[IntakeAir]']
#These consitute the safe set of plotable values to work with
plotables = flight_plotables + target_plotables + docking_plotables + \
navball_plotables + vessel_plotables + orbit_plotables + \
sensor_plotables + api_plotables + time_warp_plotables + \
resource_plotables
resources = ['r.resource', # Resource Information [string resource type]
'r.resourceCurrent', # Resource Information for Current Stage [string resource type]
'r.resourceMax', # Max Resource Information [string resource type]
]
apis = ['a.api', # API Listing
'a.ip', # IP Addresses
'a.apiSubSet', # Subset of the API Listing [string api1, string api2, ... , string apiN]
'a.version', # Telemachus Version
]
|
gpl-3.0
| 2,619,396,659,809,475,000
| 52.24
| 111
| 0.456123
| false
| 4.066606
| false
| false
| false
|
YoungKwonJo/mlxtend
|
mlxtend/plotting/remove_chartchunk.py
|
1
|
1051
|
# Sebastian Raschka 08/13/2014
# mlxtend Machine Learning Library Extensions
# matplotlib utilities for removing chartchunk
def remove_borders(axes, left=False, bottom=False, right=True, top=True):
"""
A function to remove chartchunk from matplotlib plots, such as axes
spines, ticks, and labels.
Keyword arguments:
axes: An iterable containing plt.gca() or plt.subplot() objects, e.g. [plt.gca()].
left, bottom, right, top: Boolean to specify which plot axes to hide.
"""
for ax in axes:
ax.spines["top"].set_visible(not top)
ax.spines["right"].set_visible(not right)
ax.spines["bottom"].set_visible(not bottom)
ax.spines["left"].set_visible(not left)
if bottom:
ax.tick_params(bottom="off", labelbottom="off")
if top:
ax.tick_params(top="off")
if left:
ax.tick_params(left="off", labelleft="off")
if right:
ax.tick_params(right="off")
|
bsd-3-clause
| -849,386,927,102,821,400
| 35.241379
| 94
| 0.591817
| false
| 3.849817
| false
| false
| false
|
johnaparker/dynamics
|
examples/old/leap_semistable.py
|
1
|
8602
|
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import animation
from scipy import constants
def rotation_transform(axis, angle, ax = None):
if ax is None: ax = plt.gca()
t_scale = ax.transData
t_rotate = mpl.transforms.Affine2D().rotate_deg_around(axis[0], axis[1], angle*180/np.pi)
return t_rotate + t_scale
def animate(spheres, radius, T):
r = radius*1e9
for sphere in spheres:
sphere.x *= 1e9
sphere.y *= 1e9
xmin = np.inf; xmax = -np.inf
ymin = np.inf; ymax = -np.inf
fig,axes = plt.subplots(1,2)
plt.subplots_adjust(hspace=0.3)
plt.subplot(axes[0])
circles = []
lines = []
for i,sphere in enumerate(spheres):
xmin = min(np.min(sphere.x), xmin)
xmax = max(np.max(sphere.x), xmax)
ymin = min(np.min(sphere.y), ymin)
ymax = max(np.max(sphere.y), ymax)
circles.append(plt.Circle([sphere.x[0], sphere.y[0]], r, animated=True, edgecolor='C{}'.format(i), fc='white', lw=2))
lines.append(plt.Line2D([sphere.x[0]-r, sphere.x[0]+r], [sphere.y[0], sphere.y[0]], lw=1.5, color='black', animated=True))
plt.gca().add_artist(circles[-1])
plt.gca().add_line(lines[-1])
plt.xlim([xmin-r, xmax+r])
plt.ylim([ymin-r, ymax+r])
plt.xlabel("x (nm)")
plt.ylabel("y (nm)")
plt.gca().set_aspect('equal')
ax = plt.gca()
title = ax.text(.97,0.03, r"{0:.2f} $\mu$s".format(0.0), transform=ax.transAxes, horizontalalignment='right', fontsize=13, animated=True)
def update(t):
for i,sphere in enumerate(spheres):
circles[i].center = (sphere.x[t], sphere.y[t])
lines[i].set_data([sphere.x[t]-r, sphere.x[t]+r], [sphere.y[t], sphere.y[t]])
lines[i].set_transform(rotation_transform([sphere.x[t], sphere.y[t]], sphere.phi[t], ax=ax))
title.set_text(r"{0:.2f} $\mu$s".format(dt*t*1.e6))
return circles + lines + [title]
anim = animation.FuncAnimation(plt.gcf(), update, frames=np.arange(0,len(spheres[0].x),1), interval=6, blit=True, repeat=True)
# plt.figure()
# plt.plot(time, phi)
# Writer = animation.writers['ffmpeg']
# writer = Writer(fps=30, bitrate=7800)
# anim.save('test.mp4', writer=writer, dpi=200)
# plt.plot(x, y)
# plt.figure()
# rad_data = np.array([(sphere.x**2 + sphere.y**2)**0.5 for sphere in spheres])
# plt.hist(rad_data.flatten())
# plt.figure()
# phi_data = np.array([sphere.phi[4000:] % 2*np.pi for sphere in spheres])
# plt.hist(phi_data.flatten())
plt.subplot(axes[1])
plt.plot(spheres[0].x, spheres[0].y)
plt.plot(spheres[1].x, spheres[1].y)
plt.xlabel("x (nm)")
plt.ylabel("y (nm)")
plt.xlim([xmin-r, xmax+r])
plt.ylim([ymin-r, ymax+r])
plt.gca().set_aspect('equal')
plt.suptitle(r"time step: {0:.1f} ns, T = {1} K, $\mu$ = {2:.2f} mPa$\cdot$s".format(dt*1e9, T, mu*1e3))
plt.figure()
kT = constants.k*T
KE_x = 0.5*spheres[0].mass*spheres[0].vx**2/kT
KE_y = 0.5*spheres[0].mass*spheres[0].vy**2/kT
KE_r = 0.5*spheres[0].Iz*spheres[0].omega**2/kT
plt.hist(KE_r[np.isfinite(KE_r)], color = 'C2', bins=np.linspace(0,2.5,80), alpha=0.5)
plt.hist(KE_x[np.isfinite(KE_x)], color = 'C0', bins=np.linspace(0,2.5,80), alpha=0.5)
plt.hist(KE_y[np.isfinite(KE_y)], color = 'C1', bins=np.linspace(0,2.5,80), alpha=0.5)
plt.axvline(x = 0.5*constants.k*T/kT, color='black')
plt.figtext(.85,.8, r"$\frac{{1}}{{2}}kT$", horizontalalignment='right', verticalalignment='top', fontsize=14)
plt.figtext(.85,.70, r"$\left< \frac{{1}}{{2}}mv_x^2 \right>$: {0:.3f} $kT$".format(np.average(KE_x)) , horizontalalignment='right', verticalalignment='top', fontsize=14, color='C0')
plt.figtext(.85,.6, r"$\left< \frac{{1}}{{2}}mv_y^2 \right>$: {0:.3f} $kT$".format(np.average(KE_y)) , horizontalalignment='right', verticalalignment='top', fontsize=14, color='C1')
plt.figtext(.85,.5, r"$\left< \frac{{1}}{{2}}I_z \omega_z^2 \right>$: {0:.3f} $kT$".format(np.average(KE_r)) , horizontalalignment='right', verticalalignment='top', fontsize=14, color='C2')
plt.xlim([0,3*0.5])
plt.xlabel("energy (kT)")
plt.ylabel("count")
plt.suptitle(r"time step: {0:.1f} ns, T = {1} K, $\mu$ = {2:.2f} mPa$\cdot$s".format(dt*1e9, T, mu*1e3))
plt.figure()
plt.plot(time*1e3, spheres[0].phi*180/np.pi)
plt.plot(time*1e3, spheres[1].phi*180/np.pi)
plt.xlabel("time (ms)")
plt.ylabel("angle (deg.)")
plt.show()
class rigid_body:
def __init__(self, pos, mass, Iz):
self.pos = pos
self.angle = 0
self.mass = mass
self.Iz = Iz
self.velocity = np.zeros(2)
self.angular_velocity = 0
self.F = np.zeros(2)
self.prevF = np.zeros(2)
self.T = 0
self.prevT = 0
self.predicted_velocity = np.zeros(2)
self.predicted_angular_velocity = 0
self.x = []
self.y = []
self.vx = []
self.vy = []
self.omega = []
self.phi = []
def predict(self, dt):
self.velocity += (self.prevF/self.mass)*dt/2
self.angular_velocity += (self.prevT/self.Iz)*dt/2
self.pos += self.velocity*dt
self.angle += self.angular_velocity*dt
def correct(self, F, T, dt):
self.velocity += (F/self.mass)*dt/2
self.angular_velocity += (T/self.Iz)*dt/2
self.prevF = F
self.prevT = T
def push(self, F, dt):
self.velocity += (F/self.mass)*dt
self.pos += self.velocity*dt
def twist(self, T, dt):
self.angular_velocity += (T/self.Iz)*dt
self.angle += self.angular_velocity*dt
# final time and time step
tf = 300e-6
dt = 172e-9
tf = 100000*dt
time = np.arange(0,tf,dt)
# sphere properties
radius = 150e-9
density = 10490
mass = 4/3*np.pi*radius**3*density
Iz = 2/5*mass*radius**2
# initial conditions
spheres = [ rigid_body(np.array([-200e-9,0.0]), mass, Iz),
rigid_body(np.array([200e-9,0.0]), mass, Iz) ]
# spheres = [rigid_body(100e-9*np.array([x,0.0]), mass, Iz) for x in np.arange(-8,8,4)]
# spheres = [rigid_body(100e-9*np.array([x,4.0]), mass, Iz) for x in np.arange(-8,8,4)]
# spheres.extend([rigid_body(100e-9*np.array([x,-4.0]), mass, Iz) for x in np.arange(-8,8,4)])
# spheres.extend([rigid_body(100e-9*np.array([x,-8.0]), mass, Iz) for x in np.arange(-8,8,4)])
# fluid properties
mu = 0.6e-3 # liquid viscosity
temp = 320 # temperature
alpha_T = 6*np.pi*mu*radius
alpha_R = 8*np.pi*mu*radius**3
beta_T = (2*alpha_T*constants.k*temp/dt)**0.5
beta_R = (2*alpha_R*constants.k*temp/dt)**0.5
# Electrostatic repulsion
estatic = 0
# beam properties
r0 = 400e-9 # radius
I = 0e-12 # intensity
l = 0e-12 # angular drive
center = np.array([0,-200e-9]) # beam center
for t in time:
# Fadd = np.zeros((len(spheres),2))
# for i in range(0,len(spheres)):
# for j in range(i+1,len(spheres)):
# d = spheres[i].pos - spheres[j].pos
# r = np.linalg.norm(d)
# Fadd[i] += estatic*d/r**3
# Fadd[j] += -estatic*d/r**3
# if r < 2*radius:
# dv = spheres[i].velocity - spheres[j].velocity
# spheres[i].velocity -= np.dot(dv, d)/r**2 * d
# spheres[j].velocity += np.dot(dv, d)/r**2 * d
for i,sphere in enumerate(spheres):
sphere.predict(dt)
F = -alpha_T*sphere.velocity + beta_T*np.random.normal(size=2)
n = sphere.pos - center
dist = np.linalg.norm(n)
n /= dist
that = np.array([n[1], -n[0]])
# F += I*1*dist*np.exp(dist**2/r0**2)*(1 - dist**2/r0**2)*n
# F += l*that# *np.sin(t/59)
# F += Fadd[i]
T = -alpha_R*sphere.angular_velocity + beta_R*np.random.normal(size=1)[0]
sphere.correct(F,T,dt)
sphere.x.append(sphere.pos[0])
sphere.y.append(sphere.pos[1])
sphere.vx.append(sphere.velocity[0])
sphere.vy.append(sphere.velocity[1])
sphere.omega.append(sphere.angular_velocity)
sphere.phi.append(sphere.angle)
for sphere in spheres:
sphere.x = np.asarray(sphere.x)
sphere.y = np.asarray(sphere.y)
sphere.vx = np.asarray(sphere.vx)
sphere.vy = np.asarray(sphere.vy)
sphere.phi = np.asarray(sphere.phi)
sphere.omega = np.asarray(sphere.omega)
animate(spheres, radius=radius, T=temp)
# plt.plot(1e9*spheres[0].x, 1e9*spheres[0].y, '.-')
# plt.plot(1e9*spheres[1].x, 1e9*spheres[1].y, '.-')
plt.show()
|
mit
| 8,537,241,411,429,405,000
| 33.270916
| 194
| 0.58126
| false
| 2.625763
| false
| false
| false
|
bendk/thesquirrel
|
docs/forms.py
|
1
|
1624
|
# thesquirrel.org
#
# Copyright (C) 2015 Flying Squirrel Community Space
#
# thesquirrel.org is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
# thesquirrel.org is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
# License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with thesquirrel.org. If not, see <http://www.gnu.org/licenses/>.
from django import forms
from django.utils.translation import ugettext as _
from docs.models import Document
class DocumentForm(forms.ModelForm):
public = forms.ChoiceField(label=_('Access'), choices=(
(False, 'Members Only'),
(True, 'Public'),
))
slug = forms.CharField(label=_('URL'), widget=forms.TextInput(attrs={
'data-slug-for': 'title'
}))
class Meta:
model = Document
fields = ( 'title', 'slug', 'public', 'body', )
labels = {
'body': '',
}
def __init__(self, author, *args, **kwargs):
self.author = author
super(DocumentForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
document = super(DocumentForm, self).save(commit=False)
document.author = self.author
if commit:
document.save()
return document
|
agpl-3.0
| -4,625,286,531,913,107,000
| 33.553191
| 78
| 0.666256
| false
| 4.009877
| false
| false
| false
|
kenik/pyrcp
|
pyrcp/user.py
|
1
|
2065
|
# -* coding: utf-8 -*-
from hashlib import md5
from pyrcp import *
import pyrcp.db as pydb
from flask import Flask, request, redirect, render_template
from flask_login import (LoginManager, login_required, login_user,
current_user, logout_user, UserMixin, AnonymousUserMixin)
from itsdangerous import URLSafeTimedSerializer
login_serializer = URLSafeTimedSerializer(app.secret_key)
class User(UserMixin):
def __init__(self, userid, user_pass, account_id):
self.id = userid
self.user_pass = user_pass
self.account_id = account_id
def get_auth_token(self):
"""
Encode a secure token for cookie
"""
data = [str(self.id), self.user_pass]
return login_serializer.dumps(data)
@staticmethod
def get(userid):
"""
Static method to search the database and see if userid exists. If it
does exist then return a User Object. If not then return None as
required by Flask-Login.
"""
#For this example the USERS database is a list consisting of
#(user,hased_password) of users.
db = pydb.get_db()
cursor = db.cursor()
sql = "SELECT `userid`, `user_pass`, `account_id` FROM `login` WHERE `userid`='%s'"
cur = cursor.execute(sql % (userid))
users = cursor.fetchall()
for user in users:
if user[0] == userid:
return User(user[0], user[1], user[2])
return None
def get_acc_id(self):
return self.account_id
def get_id(self):
return self.id
def is_anonymous(self):
return False
def is_authenticated(self):
return True
class AnonymousUser(AnonymousUserMixin):
def __init__(self):
self.id = 'Guest'
def get_id(self):
return self.id
def is_anonymous(self):
return True
def is_authenticated(self):
return False
def hash_pass(password):
return md5(password).hexdigest()
def main():
pass
if __name__ == '__main__':
main()
|
gpl-2.0
| 2,577,444,483,230,044,700
| 25.818182
| 91
| 0.607264
| false
| 3.903592
| false
| false
| false
|
NoNameYet07/421_521_final_project
|
BEERdunio_modules_kg1_sp.py
|
1
|
6285
|
# bin/usr/python
# Setting up GPIO pins
from time import sleep
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD) # Identifies the pin numbers to the pi
GPIO.setwarnings(False)
GPIO.setup(3, GPIO.OUT) # Should sets pin #3 as an output...but doesnt work yet
GPIO.setup(3, GPIO.LOW) # Turns initial output for pin 3 off
import time
timestr = time.strftime("%Y%m%d %H%M%S")
# Import functions to analyze license validity from CheckLicense.py
from CheckLicense import check_license, calc_BAC
import getpass
import sys
import re
# Operating modes
while True:
#try:
mode_req=raw_input("Enter Mode(normal, party, barkeep): ")
if mode_req=="party":
passwd=getpass.getpass("Enter password: ")
if passwd=="admin":
mode="party"
if mode_req=="normal":
passwd=getpass.getpass("Enter password: ")
if passwd=="admin":
mode="normal"
if mode_req=="barkeep":
passwd=getpass.getpass("Enter password: ")
if passwd=="admin":
mode="barkeep"
#Normal mode operations--------------------------------------------------------------------------------------------
while mode=='normal':
#try:
print '{0} mode!' .format(mode)
raw_text=getpass.getpass('Swipe card now: ').strip()
check_license_out=check_license(raw_text)
valid_license=check_license_out[0]
first_name=check_license_out[1]
last_name=check_license_out[2]
DL_num=check_license_out[3]
# Check to see if person is registered user
users=open("users_list.txt", 'r')
hit=0
print DL_num
if valid_license=='Yes':
for line in users:
if re.search(DL_num, line, re.IGNORECASE):
hit=hit+1
if hit>=1:
valid_license='Yes'
else:
print 'Not registered user'
valid_license='No'
# Calculating the BAC
BAC=calc_BAC(raw_text)
print BAC
# Opening the solenoid
if valid_license=='Yes':
GPIO.setup(3, GPIO.HIGH)
print 'Beer time!'
sleep(2);
GPIO.setup(3, GPIO.LOW)
with open("swipes.txt", "a") as myfile:
myfile.write(last_name+","+first_name+" ")
myfile.write(DL_num+" ")
myfile.write(mode+" ")
myfile.write(time.strftime("%Y-%m-%d")+" ")
myfile.write(str(time.time())+"\n")
# except (NameError, IndexError, ValueError):
# print "error"
# continue
#Party mode operations--------------------------------------------------------------------------------------------
while mode=="party":
try:
print '{0} mode!' .format(mode)
raw_license_text=getpass.getpass('Swipe card now: ').strip()
check_license_out=check_license(raw_license_text)
valid_license=check_license_out[0]
first_name=check_license_out[1]
last_name=check_license_out[2]
# Opening the solenoid
if valid_license=='Yes':
GPIO.setup(3, GPIO.HIGH)
print 'Beer time!'
sleep(2);
GPIO.setup(3, GPIO.LOW)
with open("swipes_normal.txt", "a") as myfile:
myfile.write(last_name)
myfile.write(",")
myfile.write(first_name)
myfile.write(",")
myfile.write(time.strftime("%Y%m%d%H%M%S\n"))
except (NameError, IndexError, ValueError):
print "error"
continue
#Barkeep mode operations-------------------------------------------------------------------------------------------
while mode=="barkeep":
try:
print '{0} mode!' .format(mode)
check_license_out=check_license(getpass.getpass('Swipe card now: ').strip())
valid_license=check_license_out[0]
first_name=check_license_out[1]
last_name=check_license_out[2]
#usr_chksum = #chksum(firstname_lastname)
#'{0}beer_score' .format(usr_chksum)
#Check to see if person is blacklisted
blacklist=open("blacklist.txt", 'r')
hit=0
if valid_license=='Yes':
for line in blacklist:
if re.search(last_name, line, re.IGNORECASE):
hit=hit+1
if re.search(first_name, line, re.IGNORECASE):
hit=hit+1
if hit>=2:
print "We don't serve your kind here."
blacklisted='Yes'
else:
blacklisted='No'
#Calculate BAC
#Opening the solenoid
if blacklisted=='No':
if BAC < intoxicated:
GPIO.setup(3, GPIO.HIGH)
print 'Beer time!'
print BAC
sleep(2);
GPIO.setup(3, GPIO.LOW)
with open("swipes_barkeep.txt", "a") as myfile:
myfile.write(last_name)
myfile.write(",")
myfile.write(first_name)
myfile.write("_")
myfile.write(time.strftime("%Y-%m-%d %H:%M%S\n"))
else:
print 'Your BAC is {0}' .format(BAC)
print "You are too drunk, beer time is over"
except (NameError, IndexError, ValueError):
print "error"
continue
# except (NameError, IndexError, ValueError):
print "error"
# continue
#end ---------------------------------------------------------------------------
|
gpl-2.0
| -4,553,099,314,612,198,000
| 32.972973
| 119
| 0.449642
| false
| 4.435427
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.