repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
matiboy/django_safari_notifications
|
django_safari_notifications/apps.py
|
1
|
1111
|
# -*- coding: utf-8
from django.apps import AppConfig
import logging
class DjangoSafariNotificationsConfig(AppConfig):
name = 'django_safari_notifications'
verbose_name = 'Safari Push Notifications'
version = 'v1'
service_base = 'push'
userinfo_key = 'userinfo'
logger = logging.getLogger('django_safari_notifications')
# Provide path to a pem file containing the certificate, the key as well as Apple's WWDRCA
cert = 'path/to/your/cert'
passphrase = 'pass:xxxx' # this will be used with -passin in the openssl command so could be with pass, env etc
# If single site, just set these values. Otherwise create Domain entries
website_conf = None
# sample single site: do not include the authenticationToken
"""
website_conf = {
"websiteName": "Bay Airlines",
"websitePushID": "web.com.example.domain",
"allowedDomains": ["http://domain.example.com"],
"urlFormatString": "http://domain.example.com/%@/?flight=%@",
"webServiceURL": "https://example.com/push"
}
"""
iconset_folder = '/path/to/your/iconset'
|
mit
| -6,515,032,378,796,969,000
| 38.678571
| 115
| 0.673267
| false
| 3.715719
| false
| false
| false
|
TomSkelly/MatchAnnot
|
showAnnot.py
|
1
|
2299
|
#!/usr/bin/env python
# Read annotation file, print selected stuff in human-readable format.
# AUTHOR: Tom Skelly (thomas.skelly@fnlcr.nih.gov)
import os
import sys
import optparse
import re # regular expressions
import cPickle as pickle
from tt_log import logger
import Annotations as anno
VERSION = '20150417.01'
def main ():
logger.debug('version %s starting' % VERSION)
opt, args = getParms()
if opt.gtfpickle is not None:
handle = open (opt.gtfpickle, 'r')
pk = pickle.Unpickler (handle)
annotList = pk.load()
handle.close()
else:
annotList = anno.AnnotationList (opt.gtf)
geneList = annotList.getGene (opt.gene)
if geneList is None:
print 'gene %s not found in annotations' % opt.gene
elif len(geneList) != 1:
print 'there are %d occurrences of gene %s in annotations' % (len(geneList), opt.gene)
else:
geneEnt = geneList[0]
print 'gene: ',
printEnt (geneEnt)
for transEnt in geneEnt.getChildren():
print '\ntr: ',
printTran (transEnt)
for exonEnt in transEnt.getChildren():
print 'exon: ',
printEnt (exonEnt)
logger.debug('finished')
return
def printEnt (ent):
print '%-15s %9d %9d %6d' % (ent.name, ent.start, ent.end, ent.end-ent.start+1)
return
def printTran (ent):
print '%-15s %9d %9d %6d' % (ent.name, ent.start, ent.end, ent.end-ent.start+1),
if hasattr (ent, 'startcodon'):
print ' start: %9d' % ent.startcodon,
if hasattr (ent, 'stopcodon'):
print ' stop: %9d' % ent.stopcodon,
print
return
def getParms (): # use default input sys.argv[1:]
parser = optparse.OptionParser(usage='%prog [options] <fasta_file> ... ')
parser.add_option ('--gtf', help='annotations in gtf format')
parser.add_option ('--gtfpickle', help='annotations in pickled gtf format')
parser.add_option ('--gene', help='gene to print')
parser.set_defaults (gtf=None,
gtfpickle=None,
gene=None,
)
opt, args = parser.parse_args()
return opt, args
if __name__ == "__main__":
main()
|
gpl-3.0
| -1,754,153,406,302,757,400
| 24.263736
| 94
| 0.579382
| false
| 3.467572
| false
| false
| false
|
Midrya/chromium
|
rietveld.py
|
1
|
26054
|
# coding: utf-8
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines class Rietveld to easily access a rietveld instance.
Security implications:
The following hypothesis are made:
- Rietveld enforces:
- Nobody else than issue owner can upload a patch set
- Verifies the issue owner credentials when creating new issues
- A issue owner can't change once the issue is created
- A patch set cannot be modified
"""
import copy
import errno
import json
import logging
import re
import socket
import ssl
import sys
import time
import urllib
import urllib2
import urlparse
import patch
from third_party import upload
import third_party.oauth2client.client as oa2client
from third_party import httplib2
# Appengine replies with 302 when authentication fails (sigh.)
oa2client.REFRESH_STATUS_CODES.append(302)
upload.LOGGER.setLevel(logging.WARNING) # pylint: disable=E1103
class Rietveld(object):
"""Accesses rietveld."""
def __init__(
self, url, auth_config, email=None, extra_headers=None, maxtries=None):
self.url = url.rstrip('/')
self.rpc_server = upload.GetRpcServer(self.url, auth_config, email)
self._xsrf_token = None
self._xsrf_token_time = None
self._maxtries = maxtries or 40
def xsrf_token(self):
if (not self._xsrf_token_time or
(time.time() - self._xsrf_token_time) > 30*60):
self._xsrf_token_time = time.time()
self._xsrf_token = self.get(
'/xsrf_token',
extra_headers={'X-Requesting-XSRF-Token': '1'})
return self._xsrf_token
def get_pending_issues(self):
"""Returns an array of dict of all the pending issues on the server."""
# TODO: Convert this to use Rietveld::search(), defined below.
return json.loads(
self.get('/search?format=json&commit=2&closed=3&'
'keys_only=True&limit=1000&order=__key__'))['results']
def close_issue(self, issue):
"""Closes the Rietveld issue for this changelist."""
logging.info('closing issue %d' % issue)
self.post("/%d/close" % issue, [('xsrf_token', self.xsrf_token())])
def get_description(self, issue):
"""Returns the issue's description.
Converts any CRLF into LF and strip extraneous whitespace.
"""
return '\n'.join(self.get('/%d/description' % issue).strip().splitlines())
def get_issue_properties(self, issue, messages):
"""Returns all the issue's metadata as a dictionary."""
url = '/api/%d' % issue
if messages:
url += '?messages=true'
data = json.loads(self.get(url, retry_on_404=True))
data['description'] = '\n'.join(data['description'].strip().splitlines())
return data
def get_depends_on_patchset(self, issue, patchset):
"""Returns the patchset this patchset depends on if it exists."""
url = '/%d/patchset/%d/get_depends_on_patchset' % (issue, patchset)
resp = None
try:
resp = json.loads(self.post(url, []))
except (urllib2.HTTPError, ValueError):
# The get_depends_on_patchset endpoint does not exist on this Rietveld
# instance yet. Ignore the error and proceed.
# TODO(rmistry): Make this an error when all Rietveld instances have
# this endpoint.
pass
return resp
def get_patchset_properties(self, issue, patchset):
"""Returns the patchset properties."""
url = '/api/%d/%d' % (issue, patchset)
return json.loads(self.get(url))
def get_file_content(self, issue, patchset, item):
"""Returns the content of a new file.
Throws HTTP 302 exception if the file doesn't exist or is not a binary file.
"""
# content = 0 is the old file, 1 is the new file.
content = 1
url = '/%d/binary/%d/%d/%d' % (issue, patchset, item, content)
return self.get(url)
def get_file_diff(self, issue, patchset, item):
"""Returns the diff of the file.
Returns a useless diff for binary files.
"""
url = '/download/issue%d_%d_%d.diff' % (issue, patchset, item)
return self.get(url)
def get_patch(self, issue, patchset):
"""Returns a PatchSet object containing the details to apply this patch."""
props = self.get_patchset_properties(issue, patchset) or {}
out = []
for filename, state in props.get('files', {}).iteritems():
logging.debug('%s' % filename)
# If not status, just assume it's a 'M'. Rietveld often gets it wrong and
# just has status: null. Oh well.
status = state.get('status') or 'M'
if status[0] not in ('A', 'D', 'M', 'R'):
raise patch.UnsupportedPatchFormat(
filename, 'Change with status \'%s\' is not supported.' % status)
svn_props = self.parse_svn_properties(
state.get('property_changes', ''), filename)
if state.get('is_binary'):
if status[0] == 'D':
if status[0] != status.strip():
raise patch.UnsupportedPatchFormat(
filename, 'Deleted file shouldn\'t have property change.')
out.append(patch.FilePatchDelete(filename, state['is_binary']))
else:
content = self.get_file_content(issue, patchset, state['id'])
if not content:
# As a precaution due to a bug in upload.py for git checkout, refuse
# empty files. If it's empty, it's not a binary file.
raise patch.UnsupportedPatchFormat(
filename,
'Binary file is empty. Maybe the file wasn\'t uploaded in the '
'first place?')
out.append(patch.FilePatchBinary(
filename,
content,
svn_props,
is_new=(status[0] == 'A')))
continue
try:
diff = self.get_file_diff(issue, patchset, state['id'])
except urllib2.HTTPError, e:
if e.code == 404:
raise patch.UnsupportedPatchFormat(
filename, 'File doesn\'t have a diff.')
raise
# FilePatchDiff() will detect file deletion automatically.
p = patch.FilePatchDiff(filename, diff, svn_props)
out.append(p)
if status[0] == 'A':
# It won't be set for empty file.
p.is_new = True
if (len(status) > 1 and
status[1] == '+' and
not (p.source_filename or p.svn_properties)):
raise patch.UnsupportedPatchFormat(
filename, 'Failed to process the svn properties')
return patch.PatchSet(out)
@staticmethod
def parse_svn_properties(rietveld_svn_props, filename):
"""Returns a list of tuple [('property', 'newvalue')].
rietveld_svn_props is the exact format from 'svn diff'.
"""
rietveld_svn_props = rietveld_svn_props.splitlines()
svn_props = []
if not rietveld_svn_props:
return svn_props
# 1. Ignore svn:mergeinfo.
# 2. Accept svn:eol-style and svn:executable.
# 3. Refuse any other.
# \n
# Added: svn:ignore\n
# + LF\n
spacer = rietveld_svn_props.pop(0)
if spacer or not rietveld_svn_props:
# svn diff always put a spacer between the unified diff and property
# diff
raise patch.UnsupportedPatchFormat(
filename, 'Failed to parse svn properties.')
while rietveld_svn_props:
# Something like 'Added: svn:eol-style'. Note the action is localized.
# *sigh*.
action = rietveld_svn_props.pop(0)
match = re.match(r'^(\w+): (.+)$', action)
if not match or not rietveld_svn_props:
raise patch.UnsupportedPatchFormat(
filename,
'Failed to parse svn properties: %s, %s' % (action, svn_props))
if match.group(2) == 'svn:mergeinfo':
# Silently ignore the content.
rietveld_svn_props.pop(0)
continue
if match.group(1) not in ('Added', 'Modified'):
# Will fail for our French friends.
raise patch.UnsupportedPatchFormat(
filename, 'Unsupported svn property operation.')
if match.group(2) in ('svn:eol-style', 'svn:executable', 'svn:mime-type'):
# ' + foo' where foo is the new value. That's fragile.
content = rietveld_svn_props.pop(0)
match2 = re.match(r'^ \+ (.*)$', content)
if not match2:
raise patch.UnsupportedPatchFormat(
filename, 'Unsupported svn property format.')
svn_props.append((match.group(2), match2.group(1)))
return svn_props
def update_description(self, issue, description):
"""Sets the description for an issue on Rietveld."""
logging.info('new description for issue %d' % issue)
self.post('/%d/description' % issue, [
('description', description),
('xsrf_token', self.xsrf_token())])
def add_comment(self, issue, message, add_as_reviewer=False):
max_message = 10000
tail = '…\n(message too large)'
if len(message) > max_message:
message = message[:max_message-len(tail)] + tail
logging.info('issue %d; comment: %s' % (issue, message.strip()[:300]))
return self.post('/%d/publish' % issue, [
('xsrf_token', self.xsrf_token()),
('message', message),
('message_only', 'True'),
('add_as_reviewer', str(bool(add_as_reviewer))),
('send_mail', 'True'),
('no_redirect', 'True')])
def add_inline_comment(
self, issue, text, side, snapshot, patchset, patchid, lineno):
logging.info('add inline comment for issue %d' % issue)
return self.post('/inline_draft', [
('issue', str(issue)),
('text', text),
('side', side),
('snapshot', snapshot),
('patchset', str(patchset)),
('patch', str(patchid)),
('lineno', str(lineno))])
def set_flag(self, issue, patchset, flag, value):
return self.post('/%d/edit_flags' % issue, [
('last_patchset', str(patchset)),
('xsrf_token', self.xsrf_token()),
(flag, str(value))])
def search(
self,
owner=None, reviewer=None,
base=None,
closed=None, private=None, commit=None,
created_before=None, created_after=None,
modified_before=None, modified_after=None,
per_request=None, keys_only=False,
with_messages=False):
"""Yields search results."""
# These are expected to be strings.
string_keys = {
'owner': owner,
'reviewer': reviewer,
'base': base,
'created_before': created_before,
'created_after': created_after,
'modified_before': modified_before,
'modified_after': modified_after,
}
# These are either None, False or True.
three_state_keys = {
'closed': closed,
'private': private,
'commit': commit,
}
url = '/search?format=json'
# Sort the keys mainly to ease testing.
for key in sorted(string_keys):
value = string_keys[key]
if value:
url += '&%s=%s' % (key, urllib2.quote(value))
for key in sorted(three_state_keys):
value = three_state_keys[key]
if value is not None:
url += '&%s=%d' % (key, int(value) + 1)
if keys_only:
url += '&keys_only=True'
if with_messages:
url += '&with_messages=True'
if per_request:
url += '&limit=%d' % per_request
cursor = ''
while True:
output = self.get(url + cursor)
if output.startswith('<'):
# It's an error message. Return as no result.
break
data = json.loads(output) or {}
if not data.get('results'):
break
for i in data['results']:
yield i
cursor = '&cursor=%s' % data['cursor']
def trigger_try_jobs(
self, issue, patchset, reason, clobber, revision, builders_and_tests,
master=None, category='cq'):
"""Requests new try jobs.
|builders_and_tests| is a map of builders: [tests] to run.
|master| is the name of the try master the builders belong to.
|category| is used to distinguish regular jobs and experimental jobs.
Returns the keys of the new TryJobResult entites.
"""
params = [
('reason', reason),
('clobber', 'True' if clobber else 'False'),
('builders', json.dumps(builders_and_tests)),
('xsrf_token', self.xsrf_token()),
('category', category),
]
if revision:
params.append(('revision', revision))
if master:
# Temporarily allow empty master names for old configurations. The try
# job will not be associated with a master name on rietveld. This is
# going to be deprecated.
params.append(('master', master))
return self.post('/%d/try/%d' % (issue, patchset), params)
def trigger_distributed_try_jobs(
self, issue, patchset, reason, clobber, revision, masters,
category='cq'):
"""Requests new try jobs.
|masters| is a map of masters: map of builders: [tests] to run.
|category| is used to distinguish regular jobs and experimental jobs.
"""
for (master, builders_and_tests) in masters.iteritems():
self.trigger_try_jobs(
issue, patchset, reason, clobber, revision, builders_and_tests,
master, category)
def get_pending_try_jobs(self, cursor=None, limit=100):
"""Retrieves the try job requests in pending state.
Returns a tuple of the list of try jobs and the cursor for the next request.
"""
url = '/get_pending_try_patchsets?limit=%d' % limit
extra = ('&cursor=' + cursor) if cursor else ''
data = json.loads(self.get(url + extra))
return data['jobs'], data['cursor']
def get(self, request_path, **kwargs):
kwargs.setdefault('payload', None)
return self._send(request_path, **kwargs)
def post(self, request_path, data, **kwargs):
ctype, body = upload.EncodeMultipartFormData(data, [])
return self._send(request_path, payload=body, content_type=ctype, **kwargs)
def _send(self, request_path, retry_on_404=False, **kwargs):
"""Sends a POST/GET to Rietveld. Returns the response body."""
# rpc_server.Send() assumes timeout=None by default; make sure it's set
# to something reasonable.
kwargs.setdefault('timeout', 15)
logging.debug('POSTing to %s, args %s.', request_path, kwargs)
try:
# Sadly, upload.py calls ErrorExit() which does a sys.exit(1) on HTTP
# 500 in AbstractRpcServer.Send().
old_error_exit = upload.ErrorExit
def trap_http_500(msg):
"""Converts an incorrect ErrorExit() call into a HTTPError exception."""
m = re.search(r'(50\d) Server Error', msg)
if m:
# Fake an HTTPError exception. Cheezy. :(
raise urllib2.HTTPError(
request_path, int(m.group(1)), msg, None, None)
old_error_exit(msg)
upload.ErrorExit = trap_http_500
for retry in xrange(self._maxtries):
try:
logging.debug('%s' % request_path)
result = self.rpc_server.Send(request_path, **kwargs)
# Sometimes GAE returns a HTTP 200 but with HTTP 500 as the content.
# How nice.
return result
except urllib2.HTTPError, e:
if retry >= (self._maxtries - 1):
raise
flake_codes = [500, 502, 503]
if retry_on_404:
flake_codes.append(404)
if e.code not in flake_codes:
raise
except urllib2.URLError, e:
if retry >= (self._maxtries - 1):
raise
if (not 'Name or service not known' in e.reason and
not 'EOF occurred in violation of protocol' in e.reason and
# On windows we hit weird bug http://crbug.com/537417
# with message '[Errno 10060] A connection attempt failed...'
not (sys.platform.startswith('win') and
isinstance(e.reason, socket.error) and
e.reason.errno == errno.ETIMEDOUT
)
):
# Usually internal GAE flakiness.
raise
except ssl.SSLError, e:
if retry >= (self._maxtries - 1):
raise
if not 'timed out' in str(e):
raise
# If reaching this line, loop again. Uses a small backoff.
time.sleep(min(10, 1+retry*2))
except urllib2.HTTPError as e:
print 'Request to %s failed: %s' % (e.geturl(), e.read())
raise
finally:
upload.ErrorExit = old_error_exit
# DEPRECATED.
Send = get
class OAuthRpcServer(object):
def __init__(self,
host,
client_email,
client_private_key,
private_key_password='notasecret',
user_agent=None,
timeout=None,
extra_headers=None):
"""Wrapper around httplib2.Http() that handles authentication.
client_email: email associated with the service account
client_private_key: encrypted private key, as a string
private_key_password: password used to decrypt the private key
"""
# Enforce https
host_parts = urlparse.urlparse(host)
if host_parts.scheme == 'https': # fine
self.host = host
elif host_parts.scheme == 'http':
upload.logging.warning('Changing protocol to https')
self.host = 'https' + host[4:]
else:
msg = 'Invalid url provided: %s' % host
upload.logging.error(msg)
raise ValueError(msg)
self.host = self.host.rstrip('/')
self.extra_headers = extra_headers or {}
if not oa2client.HAS_OPENSSL:
logging.error("No support for OpenSSL has been found, "
"OAuth2 support requires it.")
logging.error("Installing pyopenssl will probably solve this issue.")
raise RuntimeError('No OpenSSL support')
self.creds = oa2client.SignedJwtAssertionCredentials(
client_email,
client_private_key,
'https://www.googleapis.com/auth/userinfo.email',
private_key_password=private_key_password,
user_agent=user_agent)
self._http = self.creds.authorize(httplib2.Http(timeout=timeout))
def Send(self,
request_path,
payload=None,
content_type='application/octet-stream',
timeout=None,
extra_headers=None,
**kwargs):
"""Send a POST or GET request to the server.
Args:
request_path: path on the server to hit. This is concatenated with the
value of 'host' provided to the constructor.
payload: request is a POST if not None, GET otherwise
timeout: in seconds
extra_headers: (dict)
"""
# This method signature should match upload.py:AbstractRpcServer.Send()
method = 'GET'
headers = self.extra_headers.copy()
headers.update(extra_headers or {})
if payload is not None:
method = 'POST'
headers['Content-Type'] = content_type
prev_timeout = self._http.timeout
try:
if timeout:
self._http.timeout = timeout
# TODO(pgervais) implement some kind of retry mechanism (see upload.py).
url = self.host + request_path
if kwargs:
url += "?" + urllib.urlencode(kwargs)
# This weird loop is there to detect when the OAuth2 token has expired.
# This is specific to appengine *and* rietveld. It relies on the
# assumption that a 302 is triggered only by an expired OAuth2 token. This
# prevents any usage of redirections in pages accessed this way.
# This variable is used to make sure the following loop runs only twice.
redirect_caught = False
while True:
try:
ret = self._http.request(url,
method=method,
body=payload,
headers=headers,
redirections=0)
except httplib2.RedirectLimit:
if redirect_caught or method != 'GET':
logging.error('Redirection detected after logging in. Giving up.')
raise
redirect_caught = True
logging.debug('Redirection detected. Trying to log in again...')
self.creds.access_token = None
continue
break
return ret[1]
finally:
self._http.timeout = prev_timeout
class JwtOAuth2Rietveld(Rietveld):
"""Access to Rietveld using OAuth authentication.
This class is supposed to be used only by bots, since this kind of
access is restricted to service accounts.
"""
# The parent__init__ is not called on purpose.
# pylint: disable=W0231
def __init__(self,
url,
client_email,
client_private_key_file,
private_key_password=None,
extra_headers=None,
maxtries=None):
if private_key_password is None: # '' means 'empty password'
private_key_password = 'notasecret'
self.url = url.rstrip('/')
bot_url = self.url
if self.url.endswith('googleplex.com'):
bot_url = self.url + '/bots'
with open(client_private_key_file, 'rb') as f:
client_private_key = f.read()
logging.info('Using OAuth login: %s' % client_email)
self.rpc_server = OAuthRpcServer(bot_url,
client_email,
client_private_key,
private_key_password=private_key_password,
extra_headers=extra_headers or {})
self._xsrf_token = None
self._xsrf_token_time = None
self._maxtries = maxtries or 40
class CachingRietveld(Rietveld):
"""Caches the common queries.
Not to be used in long-standing processes, like the commit queue.
"""
def __init__(self, *args, **kwargs):
super(CachingRietveld, self).__init__(*args, **kwargs)
self._cache = {}
def _lookup(self, function_name, args, update):
"""Caches the return values corresponding to the arguments.
It is important that the arguments are standardized, like None vs False.
"""
function_cache = self._cache.setdefault(function_name, {})
if args not in function_cache:
function_cache[args] = update(*args)
return copy.deepcopy(function_cache[args])
def get_description(self, issue):
return self._lookup(
'get_description',
(issue,),
super(CachingRietveld, self).get_description)
def get_issue_properties(self, issue, messages):
"""Returns the issue properties.
Because in practice the presubmit checks often ask without messages first
and then with messages, always ask with messages and strip off if not asked
for the messages.
"""
# It's a tad slower to request with the message but it's better than
# requesting the properties twice.
data = self._lookup(
'get_issue_properties',
(issue, True),
super(CachingRietveld, self).get_issue_properties)
if not messages:
# Assumes self._lookup uses deepcopy.
del data['messages']
return data
def get_patchset_properties(self, issue, patchset):
return self._lookup(
'get_patchset_properties',
(issue, patchset),
super(CachingRietveld, self).get_patchset_properties)
class ReadOnlyRietveld(object):
"""
Only provides read operations, and simulates writes locally.
Intentionally do not inherit from Rietveld to avoid any write-issuing
logic to be invoked accidentally.
"""
# Dictionary of local changes, indexed by issue number as int.
_local_changes = {}
def __init__(self, *args, **kwargs):
# We still need an actual Rietveld instance to issue reads, just keep
# it hidden.
self._rietveld = Rietveld(*args, **kwargs)
@classmethod
def _get_local_changes(cls, issue):
"""Returns dictionary of local changes for |issue|, if any."""
return cls._local_changes.get(issue, {})
@property
def url(self):
return self._rietveld.url
def get_pending_issues(self):
pending_issues = self._rietveld.get_pending_issues()
# Filter out issues we've closed or unchecked the commit checkbox.
return [issue for issue in pending_issues
if not self._get_local_changes(issue).get('closed', False) and
self._get_local_changes(issue).get('commit', True)]
def close_issue(self, issue): # pylint:disable=R0201
logging.info('ReadOnlyRietveld: closing issue %d' % issue)
ReadOnlyRietveld._local_changes.setdefault(issue, {})['closed'] = True
def get_issue_properties(self, issue, messages):
data = self._rietveld.get_issue_properties(issue, messages)
data.update(self._get_local_changes(issue))
return data
def get_patchset_properties(self, issue, patchset):
return self._rietveld.get_patchset_properties(issue, patchset)
def get_depends_on_patchset(self, issue, patchset):
return self._rietveld.get_depends_on_patchset(issue, patchset)
def get_patch(self, issue, patchset):
return self._rietveld.get_patch(issue, patchset)
def update_description(self, issue, description): # pylint:disable=R0201
logging.info('ReadOnlyRietveld: new description for issue %d: %s' %
(issue, description))
def add_comment(self, # pylint:disable=R0201
issue,
message,
add_as_reviewer=False):
logging.info('ReadOnlyRietveld: posting comment "%s" to issue %d' %
(message, issue))
def set_flag(self, issue, patchset, flag, value): # pylint:disable=R0201
logging.info('ReadOnlyRietveld: setting flag "%s" to "%s" for issue %d' %
(flag, value, issue))
ReadOnlyRietveld._local_changes.setdefault(issue, {})[flag] = value
def trigger_try_jobs( # pylint:disable=R0201
self, issue, patchset, reason, clobber, revision, builders_and_tests,
master=None, category='cq'):
logging.info('ReadOnlyRietveld: triggering try jobs %r for issue %d' %
(builders_and_tests, issue))
def trigger_distributed_try_jobs( # pylint:disable=R0201
self, issue, patchset, reason, clobber, revision, masters,
category='cq'):
logging.info('ReadOnlyRietveld: triggering try jobs %r for issue %d' %
(masters, issue))
|
bsd-3-clause
| 759,426,349,750,002,600
| 34.253045
| 80
| 0.620183
| false
| 3.801547
| false
| false
| false
|
gonicus/gosa
|
backend/src/gosa/backend/plugins/samba/logonhours.py
|
1
|
2755
|
# This file is part of the GOsa framework.
#
# http://gosa-project.org
#
# Copyright:
# (C) 2016 GONICUS GmbH, Germany, http://www.gonicus.de
#
# See the LICENSE file in the project's top-level directory for details.
import time
from gosa.backend.objects.types import AttributeType
class SambaLogonHoursAttribute(AttributeType):
"""
This is a special object-attribute-type for sambaLogonHours.
This call can convert sambaLogonHours to a UnicodeString and vice versa.
It is used in the samba-object definition file.
"""
__alias__ = "SambaLogonHours"
def values_match(self, value1, value2):
return str(value1) == str(value2)
def is_valid_value(self, value):
if len(value):
try:
# Check if each week day contains 24 values.
if type(value[0]) is not str or len(value[0]) != 168 or len(set(value[0]) - set('01')):
return False
return True
except:
return False
def _convert_to_unicodestring(self, value):
"""
This method is a converter used when values gets read from or written to the backend.
Converts the 'SambaLogonHours' object-type into a 'UnicodeString'-object.
"""
if len(value):
# Combine the binary strings
lstr = value[0]
# New reverse every 8 bit part, and toggle high- and low-tuple (4Bits)
new = ""
for i in range(0, 21):
n = lstr[i * 8:((i + 1) * 8)]
n = n[0:4] + n[4:]
n = n[::-1]
n = str(hex(int(n, 2)))[2::].rjust(2, '0')
new += n
value = [new.upper()]
return value
def _convert_from_string(self, value):
return self._convert_from_unicodestring(value)
def _convert_from_unicodestring(self, value):
"""
This method is a converter used when values gets read from or written to the backend.
Converts a 'UnicodeString' attribute into the 'SambaLogonHours' object-type.
"""
if len(value):
# Convert each hex-pair into binary values.
# Then reverse the binary result and switch high and low pairs.
value = value[0]
lstr = ""
for i in range(0, 42, 2):
n = (bin(int(value[i:i + 2], 16))[2::]).rjust(8, '0')
n = n[::-1]
lstr += n[0:4] + n[4:]
# Shift lster by timezone offset
shift_by = int((168 + (time.timezone/3600)) % 168)
lstr = lstr[shift_by:] + lstr[:shift_by]
# Parse result into more readable value
value = [lstr]
return value
|
lgpl-2.1
| 5,620,265,410,477,604,000
| 29.611111
| 103
| 0.549546
| false
| 3.837047
| false
| false
| false
|
mice-software/maus
|
tests/integration/test_simulation/test_beam_maker/binomial_beam_config.py
|
1
|
4151
|
# This file is part of MAUS: http://micewww.pp.rl.ac.uk:8080/projects/maus
#
# MAUS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAUS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAUS. If not, see <http://www.gnu.org/licenses/>.
"""
Configuration to generate a beam distribution with binomial distribution in
the spill and various distributions for difference particle types
"""
#pylint: disable = C0103, R0801
import os
mrd = os.environ["MAUS_ROOT_DIR"]
simulation_geometry_filename = os.path.join(
mrd, "tests", "integration", "test_simulation", "test_beam_maker",
"BeamTest.dat"
)
output_root_file_name = os.path.join(mrd, "tmp", "test_beammaker_output.root")
input_root_file_name = output_root_file_name # for conversion
spill_generator_number_of_spills = 1000
verbose_level = 1
beam = {
"particle_generator":"binomial", # routine for generating empty primaries
"binomial_n":20, # number of coin tosses
"binomial_p":0.1, # probability of making a particle on each toss
"random_seed":5, # random seed for beam generation; controls also how the MC
# seeds are generated
"definitions":[
##### MUONS #######
{
"reference":{
"position":{"x":0.0, "y":0.0, "z":3.0},
"momentum":{"x":0.0, "y":0.0, "z":1.0},
"spin":{"x":0.0, "y":0.0, "z":1.0},
"particle_id":-13,
"energy":226.0,
"time":2.e6,
"random_seed":0
}, # reference particle
"random_seed_algorithm":"incrementing_random", # algorithm for seeding MC
"weight":90., # probability of generating a particle
"transverse":{
"transverse_mode":"penn",
"emittance_4d":6.,
"beta_4d":333.,
"alpha_4d":1.,
"normalised_angular_momentum":2.,
"bz":4.e-3
},
"longitudinal":{
"longitudinal_mode":"sawtooth_time",
"momentum_variable":"p",
"sigma_p":25.,
"t_start":-1.e6,
"t_end":+1.e6},
"coupling":{"coupling_mode":"none"}
},
##### PIONS #####
{ # as above...
"reference":{
"position":{"x":0.0, "y":-0.0, "z":0.0},
"momentum":{"x":0.0, "y":0.0, "z":1.0},
"spin":{"x":0.0, "y":0.0, "z":1.0},
"particle_id":211, "energy":285.0, "time":0.0, "random_seed":10
},
"random_seed_algorithm":"incrementing_random",
"weight":2.,
"transverse":{"transverse_mode":"constant_solenoid", "emittance_4d":6.,
"normalised_angular_momentum":0.1, "bz":4.e-3},
"longitudinal":{"longitudinal_mode":"uniform_time",
"momentum_variable":"p",
"sigma_p":25.,
"t_start":-1.e6,
"t_end":+1.e6},
"coupling":{"coupling_mode":"none"}
},
##### ELECTRONS #####
{ # as above...
"reference":{
"position":{"x":0.0, "y":-0.0, "z":0.0},
"momentum":{"x":0.0, "y":0.0, "z":1.0},
"spin":{"x":0.0, "y":0.0, "z":1.0},
"particle_id":-11, "energy":200.0, "time":0.0, "random_seed":10
},
"random_seed_algorithm":"incrementing_random",
"weight":8.,
"transverse":{"transverse_mode":"constant_solenoid", "emittance_4d":6.,
"normalised_angular_momentum":0.1, "bz":4.e-3},
"longitudinal":{"longitudinal_mode":"uniform_time",
"momentum_variable":"p",
"sigma_p":25.,
"t_start":-2.e6,
"t_end":+1.e6},
"coupling":{"coupling_mode":"none"}
}]
}
|
gpl-3.0
| 8,497,676,633,273,780,000
| 37.082569
| 80
| 0.544688
| false
| 3.245504
| false
| false
| false
|
amagnus/pulsegig
|
app/models.py
|
1
|
1894
|
from django.db import models
from django.contrib.auth.models import User
class Guy(models.Model):
user = models.OneToOneField(User, primary_key=True)
cell = models.CharField(max_length=15)
metroarea_name = models.CharField(max_length=30, default=None, null=True)
metroareaID = models.IntegerField(default=None, null=True)
created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.user
class Band(models.Model):
name = models.CharField(max_length=100)
genre = models.CharField(max_length=100)
skID = models.IntegerField()
created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.name
class SimilarBand(models.Model):
band_input = models.ForeignKey(Band, related_name='band_input')
band_suggest = models.ForeignKey(Band, related_name='band_suggest')
disabled = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.band_input.name
class Alert(models.Model):
user = models.ForeignKey(User)
band = models.ForeignKey(Band)
disabled = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.band.name
class AlertLog(models.Model):
user = models.ForeignKey(User)
band = models.ForeignKey(Band)
eventskID = models.IntegerField(default=None)
showDate = models.DateField()
showURL = models.CharField(max_length=255)
is_similar = models.BooleanField(default=False)
send_on = models.DateTimeField()
has_sent = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.band.name
|
mit
| -6,112,383,756,835,243,000
| 30.566667
| 77
| 0.705913
| false
| 3.743083
| false
| false
| false
|
PyBossa/pybossa
|
pybossa/auth/token.py
|
1
|
1271
|
# -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2015 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
class TokenAuth(object):
_specific_actions = []
@property
def specific_actions(self):
return self._specific_actions
def can(self, user, action, _, token=None):
action = ''.join(['_', action])
return getattr(self, action)(user, token)
def _create(self, user, token=None):
return False
def _read(self, user, token=None):
return not user.is_anonymous()
def _update(self, user, token):
return False
def _delete(self, user, token):
return False
|
agpl-3.0
| 7,030,215,194,736,983,000
| 30
| 77
| 0.683714
| false
| 3.875
| false
| false
| false
|
ekumenlabs/terminus
|
terminus/generators/rndf_id_mapper.py
|
1
|
2695
|
"""
Copyright (C) 2017 Open Source Robotics Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from city_visitor import CityVisitor
from models.polyline_geometry import PolylineGeometry
class RNDFIdMapper(CityVisitor):
"""Simple city visitor that generates the RNDF ids for segments,
lanes and waypoints. Ids and objects are stored in two dictionaries,
so we can later perform lookups in either way"""
# Note: For the time being we treat streets and trunks in the same way,
# hence generating a single lane for any of them. This will change in the
# future, when we properly support multi-lanes trunks.
def run(self):
self.segment_id = 0
self.waypoint_id = 0
self.lane_id = 0
self.object_to_id_level_1 = {}
self.object_to_id_level_2 = {}
self.id_to_object = {}
super(RNDFIdMapper, self).run()
def id_for(self, object):
try:
return self.object_to_id_level_1[id(object)]
except KeyError:
return self.object_to_id_level_2[object]
def object_for(self, id):
return self.id_to_object[id]
def map_road(self, road):
self.segment_id = self.segment_id + 1
self.lane_id = 0
self._register(str(self.segment_id), road)
def start_street(self, street):
self.map_road(street)
def start_trunk(self, trunk):
self.map_road(trunk)
def start_lane(self, lane):
self.lane_id = self.lane_id + 1
rndf_lane_id = str(self.segment_id) + '.' + str(self.lane_id)
self._register(rndf_lane_id, lane)
self.waypoint_id = 0
for waypoint in lane.waypoints_for(PolylineGeometry):
self.waypoint_id = self.waypoint_id + 1
rndf_waypoint_id = rndf_lane_id + '.' + str(self.waypoint_id)
self._register(rndf_waypoint_id, waypoint)
def _register(self, rndf_id, object):
"""We do some caching by id, to avoid computing hashes if they are
expensive, but keep the hash-based dict as a fallback"""
self.object_to_id_level_1[id(object)] = rndf_id
self.object_to_id_level_2[object] = rndf_id
self.id_to_object[rndf_id] = object
|
apache-2.0
| -5,815,910,434,938,483,000
| 35.418919
| 77
| 0.661224
| false
| 3.550725
| false
| false
| false
|
wkia/kodi-addon-repo
|
plugin.audio.openlast/default.py
|
1
|
6672
|
# -*- coding: utf-8 -*-
import os
import sys
import urllib
import urlparse
import xbmcaddon
import xbmcgui
import xbmcplugin
if sys.version_info < (2, 7):
import simplejson as json
else:
import json
from logging import log
from util import build_url
__addon__ = xbmcaddon.Addon()
#__addonid__ = __addon__.getAddonInfo('id')
#__settings__ = xbmcaddon.Addon(id='xbmc-vk.svoka.com')
#__language__ = __settings__.getLocalizedString
#LANGUAGE = __addon__.getLocalizedString
ADDONVERSION = __addon__.getAddonInfo('version')
CWD = __addon__.getAddonInfo('path').decode("utf-8")
log('start -----------------------------------------------------')
log('script version %s started' % ADDONVERSION)
#xbmc.log(str(sys.argv))
addonUrl = sys.argv[0]
addon_handle = int(sys.argv[1])
args = urlparse.parse_qs(sys.argv[2][1:])
#my_addon = xbmcaddon.Addon()
# lastfmUser = my_addon.getSetting('lastfm_username')
xbmcplugin.setContent(addon_handle, 'audio')
lastfmApi = 'http://ws.audioscrobbler.com/2.0/'
lastfmApiKey = '47608ece2138b2edae9538f83f703457' # TODO use Openlast key
lastfmAddon = None
lastfmUser = ''
try:
lastfmAddon = xbmcaddon.Addon('service.scrobbler.lastfm')
lastfmUser = lastfmAddon.getSetting('lastfmuser')
except RuntimeError:
pass
#xbmc.log(str(args))
action = args.get('action', None)
folder = args.get('folder', None)
#xbmc.log('openlast: folder=' + str(folder)) #, xbmc.LOGDEBUG)
#xbmc.log('openlast: action=' + str(action)) #, xbmc.LOGDEBUG)
if folder is None:
url = build_url(addonUrl, {'folder': 'similarArtist'})
li = xbmcgui.ListItem('Similar artist radio', iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
if '' != lastfmUser:
url = build_url(addonUrl, {'folder': 'lastfm', 'username': lastfmUser})
# xbmc.log(url)
li = xbmcgui.ListItem('Personal radio for Last.fm user: ' + lastfmUser, iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
url = build_url(addonUrl, {'folder': 'lastfm'})
li = xbmcgui.ListItem('Personal radio for Last.fm user...', iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
elif folder[0] == 'lastfm':
username = ''
if None != args.get('username'):
username = args.get('username')[0]
playcount = 0
if None != args.get('playcount'):
playcount = int(args.get('playcount')[0])
if username == '':
user_keyboard = xbmc.Keyboard()
user_keyboard.setHeading('Last.FM user name') # __language__(30001))
user_keyboard.setHiddenInput(False)
user_keyboard.setDefault(lastfmUser)
user_keyboard.doModal()
if user_keyboard.isConfirmed():
username = user_keyboard.getText()
else:
raise Exception("Login input was cancelled.")
if action is None:
url = build_url(lastfmApi, {'method': 'user.getInfo', 'user': username,
'format': 'json', 'api_key': lastfmApiKey})
reply = urllib.urlopen(url)
resp = json.load(reply)
if "error" in resp:
raise Exception("Error! DATA: " + str(resp))
else:
# xbmc.log(str(resp))
pass
playcount = int(resp['user']['playcount'])
img = resp['user']['image'][2]['#text']
if '' == img:
img = 'DefaultAudio.png'
url = build_url(addonUrl, {'folder': folder[0], 'action': 'lovedTracks', 'username': username})
li = xbmcgui.ListItem('Listen to loved tracks', iconImage=img)
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=False)
url = build_url(addonUrl, {'folder': folder[0], 'action': 'topTracks', 'username': username, 'playcount': playcount})
li = xbmcgui.ListItem('Listen to track library', iconImage=img)
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=False)
url = build_url(addonUrl, {'folder': folder[0], 'action': 'topArtists', 'username': username, 'playcount': playcount})
li = xbmcgui.ListItem('Listen to artist library', iconImage=img)
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=False)
url = build_url(addonUrl, {'folder': folder[0], 'action': 'syncLibrary', 'username': username, 'playcount': playcount})
li = xbmcgui.ListItem('[EXPERIMENTAL] Syncronize library to folder', iconImage=img)
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=False)
xbmcplugin.endOfDirectory(addon_handle)
elif action[0] == 'lovedTracks':
script = os.path.join(CWD, "run_app.py")
log('running script %s...' % script)
xbmc.executebuiltin('XBMC.RunScript(%s, %s, %s)' % (script, action[0], username))
elif action[0] == 'topTracks':
script = os.path.join(CWD, "run_app.py")
log('running script %s...' % script)
xbmc.executebuiltin('XBMC.RunScript(%s, %s, %s, %s)' % (script, action[0], username, playcount))
elif action[0] == 'topArtists':
script = os.path.join(CWD, "run_app.py")
log('running script %s...' % script)
xbmc.executebuiltin('XBMC.RunScript(%s, %s, %s, %s)' % (script, action[0], username, playcount))
elif action[0] == 'syncLibrary':
script = os.path.join(CWD, "run_app.py")
log('running script %s...' % script)
xbmc.executebuiltin('XBMC.RunScript(%s, %s, %s)' % (script, action[0], username))
elif folder[0] == 'similarArtist':
if action is None:
url = build_url(lastfmApi, {'method': 'chart.getTopArtists',
'format': 'json', 'api_key': lastfmApiKey})
reply = urllib.urlopen(url)
resp = json.load(reply)
if "error" in resp:
raise Exception("Error! DATA: " + str(resp))
else:
#log(str(resp))
pass
for a in resp['artists']['artist']:
url = build_url(addonUrl, {'folder': folder[0], 'action': a['name'].encode('utf-8')})
li = xbmcgui.ListItem(a['name'])
li.setArt({'icon': a['image'][2]['#text'], 'thumb': a['image'][2]['#text'], 'fanart': a['image'][4]['#text']})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=False)
pass
xbmcplugin.endOfDirectory(addon_handle)
log('end -----------------------------------------------------')
|
gpl-2.0
| 4,180,069,975,239,429,000
| 37.566474
| 127
| 0.618855
| false
| 3.439175
| false
| false
| false
|
mozman/ezdxf
|
tests/test_06_math/test_630b_bezier4p_functions.py
|
1
|
4662
|
# Copyright (c) 2010-2020 Manfred Moitzi
# License: MIT License
import pytest
import random
from ezdxf.math import (
cubic_bezier_interpolation, Vec3, Bezier3P, quadratic_to_cubic_bezier,
Bezier4P, have_bezier_curves_g1_continuity, bezier_to_bspline,
)
def test_vertex_interpolation():
points = [(0, 0), (3, 1), (5, 3), (0, 8)]
result = list(cubic_bezier_interpolation(points))
assert len(result) == 3
c1, c2, c3 = result
p = c1.control_points
assert p[0].isclose((0, 0))
assert p[1].isclose((0.9333333333333331, 0.3111111111111111))
assert p[2].isclose((1.8666666666666663, 0.6222222222222222))
assert p[3].isclose((3, 1))
p = c2.control_points
assert p[0].isclose((3, 1))
assert p[1].isclose((4.133333333333334, 1.3777777777777778))
assert p[2].isclose((5.466666666666667, 1.822222222222222))
assert p[3].isclose((5, 3))
p = c3.control_points
assert p[0].isclose((5, 3))
assert p[1].isclose((4.533333333333333, 4.177777777777778))
assert p[2].isclose((2.2666666666666666, 6.088888888888889))
assert p[3].isclose((0, 8))
def test_quadratic_to_cubic_bezier():
r = random.Random(0)
def random_vec() -> Vec3:
return Vec3(r.uniform(-10, 10), r.uniform(-10, 10), r.uniform(-10, 10))
for i in range(1000):
quadratic = Bezier3P((random_vec(), random_vec(), random_vec()))
quadratic_approx = list(quadratic.approximate(10))
cubic = quadratic_to_cubic_bezier(quadratic)
cubic_approx = list(cubic.approximate(10))
assert len(quadratic_approx) == len(cubic_approx)
for p1, p2 in zip(quadratic_approx, cubic_approx):
assert p1.isclose(p2)
# G1 continuity: normalized end-tangent == normalized start-tangent of next curve
B1 = Bezier4P([(0, 0), (1, 1), (2, 1), (3, 0)])
# B1/B2 has G1 continuity:
B2 = Bezier4P([(3, 0), (4, -1), (5, -1), (6, 0)])
# B1/B3 has no G1 continuity:
B3 = Bezier4P([(3, 0), (4, 1), (5, 1), (6, 0)])
# B1/B4 G1 continuity off tolerance:
B4 = Bezier4P([(3, 0), (4, -1.03), (5, -1.0), (6, 0)])
# B1/B5 has a gap between B1 end and B5 start:
B5 = Bezier4P([(4, 0), (5, -1), (6, -1), (7, 0)])
def test_g1_continuity_for_bezier_curves():
assert have_bezier_curves_g1_continuity(B1, B2) is True
assert have_bezier_curves_g1_continuity(B1, B3) is False
assert have_bezier_curves_g1_continuity(B1, B4, g1_tol=1e-4) is False, \
"should be outside of tolerance "
assert have_bezier_curves_g1_continuity(B1, B5) is False, \
"end- and start point should match"
D1 = Bezier4P([(0, 0), (1, 1), (3, 0), (3, 0)])
D2 = Bezier4P([(3, 0), (3, 0), (5, -1), (6, 0)])
def test_g1_continuity_for_degenerated_bezier_curves():
assert have_bezier_curves_g1_continuity(D1, B2) is False
assert have_bezier_curves_g1_continuity(B1, D2) is False
assert have_bezier_curves_g1_continuity(D1, D2) is False
@pytest.mark.parametrize('curve', [D1, D2])
def test_flatten_degenerated_bezier_curves(curve):
# Degenerated Bezier curves behave like regular curves!
assert len(list(curve.flattening(0.1))) > 4
@pytest.mark.parametrize("b1,b2", [
(B1, B2), # G1 continuity, the common case
(B1, B3), # without G1 continuity is also a regular B-spline
(B1, B5), # regular B-spline, but first control point of B5 is lost
], ids=["G1", "without G1", "gap"])
def test_bezier_curves_to_bspline(b1, b2):
bspline = bezier_to_bspline([b1, b2])
# Remove duplicate control point between two adjacent curves:
expected = list(b1.control_points) + list(b2.control_points)[1:]
assert bspline.degree == 3, "should be a cubic B-spline"
assert bspline.control_points == tuple(expected)
def test_quality_of_bezier_to_bspline_conversion_1():
# This test shows the close relationship between cubic Bézier- and
# cubic B-spline curves.
points0 = B1.approximate(10)
points1 = bezier_to_bspline([B1]).approximate(10)
for p0, p1 in zip(points0, points1):
assert p0.isclose(p1) is True, "conversion should be perfect"
def test_quality_of_bezier_to_bspline_conversion_2():
# This test shows the close relationship between cubic Bézier- and
# cubic B-spline curves.
# Remove duplicate point between the two curves:
points0 = list(B1.approximate(10)) + list(B2.approximate(10))[1:]
points1 = bezier_to_bspline([B1, B2]).approximate(20)
for p0, p1 in zip(points0, points1):
assert p0.isclose(p1) is True, "conversion should be perfect"
def test_bezier_curves_to_bspline_error():
with pytest.raises(ValueError):
bezier_to_bspline([]) # one or more curves expected
|
mit
| 2,218,089,063,526,213,400
| 35.40625
| 81
| 0.65794
| false
| 2.729936
| true
| false
| false
|
fdouetteau/PyBabe
|
pybabe/format_csv.py
|
1
|
3107
|
from base import BabeBase, StreamHeader, StreamFooter
import csv
from charset import UTF8Recoder, UTF8RecoderWithCleanup, PrefixReader, UnicodeCSVWriter
import codecs
import logging
log = logging.getLogger("csv")
def linepull(stream, dialect, kwargs):
it = iter(stream)
fields = kwargs.get('fields', None)
if not fields:
fields = [it.next().rstrip('\r\n')]
metainfo = StreamHeader(**dict(kwargs, fields=fields))
yield metainfo
for row in it:
yield metainfo.t._make([row.rstrip('\r\n')])
yield StreamFooter()
def build_value(x, null_value):
if x == null_value:
return None
else:
return unicode(x, "utf-8")
def csvpull(stream, dialect, kwargs):
reader = csv.reader(stream, dialect)
fields = kwargs.get('fields', None)
null_value = kwargs.get('null_value', "")
ignore_malformed = kwargs.get('ignore_bad_lines', False)
if not fields:
fields = reader.next()
metainfo = StreamHeader(**dict(kwargs, fields=fields))
yield metainfo
for row in reader:
try:
yield metainfo.t._make([build_value(x, null_value) for x in row])
except Exception, e:
if ignore_malformed:
log.warn("Malformed line: %s, %s" % (row, e))
else:
raise e
yield StreamFooter()
def pull(format, stream, kwargs):
if kwargs.get('utf8_cleanup', False):
stream = UTF8RecoderWithCleanup(stream, kwargs.get('encoding', 'utf-8'))
elif codecs.getreader(kwargs.get('encoding', 'utf-8')) != codecs.getreader('utf-8'):
stream = UTF8Recoder(stream, kwargs.get('encoding', None))
else:
pass
delimiter = kwargs.get('delimiter', None)
sniff_read = stream.next()
stream = PrefixReader(sniff_read, stream, linefilter=kwargs.get("linefilter", None))
dialect = csv.Sniffer().sniff(sniff_read)
if sniff_read.endswith('\r\n'):
dialect.lineterminator = '\r\n'
else:
dialect.lineterminator = '\n'
if dialect.delimiter.isalpha() and not delimiter:
# http://bugs.python.org/issue2078
for row in linepull(stream, dialect, kwargs):
yield row
return
if delimiter:
dialect.delimiter = delimiter
for row in csvpull(stream, dialect, kwargs):
yield row
class default_dialect(csv.Dialect):
lineterminator = '\n'
delimiter = ','
doublequote = False
escapechar = '\\'
quoting = csv.QUOTE_MINIMAL
quotechar = '"'
def push(format, metainfo, instream, outfile, encoding, delimiter=None, **kwargs):
if not encoding:
encoding = "utf8"
dialect = kwargs.get('dialect', default_dialect)
if delimiter:
dialect.delimiter = delimiter
writer = UnicodeCSVWriter(outfile, dialect=dialect, encoding=encoding)
writer.writerow(metainfo.fields)
for k in instream:
if isinstance(k, StreamFooter):
break
else:
writer.writerow(k)
BabeBase.addPullPlugin('csv', ['csv', 'tsv', 'txt'], pull)
BabeBase.addPushPlugin('csv', ['csv', 'tsv', 'txt'], push)
|
bsd-3-clause
| -8,952,105,549,496,500,000
| 30.07
| 88
| 0.631799
| false
| 3.703218
| false
| false
| false
|
filippog/pysnmp
|
examples/hlapi/asyncore/sync/agent/ntforg/v3-trap.py
|
1
|
1601
|
"""
SNMPv3 TRAP: auth SHA, privacy: AES128
++++++++++++++++++++++++++++++++++++++
Send SNMP notification using the following options:
* SNMPv3
* with authoritative snmpEngineId = 0x8000000001020304
(USM must be configured at the Receiver accordingly)
* with user 'usr-sha-aes128', auth: SHA, priv: AES128
* over IPv4/UDP
* send TRAP notification
* with TRAP ID 'authenticationFailure' specified as a MIB symbol
* do not include any additional managed object information
SNMPv3 TRAPs requires pre-sharing the Notification Originator's
value of SnmpEngineId with Notification Receiver. To facilitate that
we will use static (e.g. not autogenerated) version of snmpEngineId.
Functionally similar to:
| $ snmptrap -v3 -e 8000000001020304 -l authPriv -u usr-sha-aes -A authkey1 -X privkey1 -a SHA -x AES demo.snmplabs.com 12345 1.3.6.1.4.1.20408.4.1.1.2 1.3.6.1.2.1.1.1.0 s "my system"
"""#
from pysnmp.hlapi import *
errorIndication, errorStatus, errorIndex, varBinds = next(
sendNotification(SnmpEngine(OctetString(hexValue='8000000001020304')),
UsmUserData('usr-sha-aes128', 'authkey1', 'privkey1',
authProtocol=usmHMACSHAAuthProtocol,
privProtocol=usmAesCfb128Protocol),
UdpTransportTarget(('demo.snmplabs.com', 162)),
ContextData(),
'trap',
NotificationType(
ObjectIdentity('SNMPv2-MIB', 'authenticationFailure')
)
)
)
if errorIndication:
print(errorIndication)
|
bsd-3-clause
| 7,296,825,608,207,418,000
| 38.04878
| 183
| 0.647096
| false
| 3.573661
| false
| true
| false
|
SU-ECE-17-7/hotspotter
|
hsviz/draw_func2.py
|
1
|
54605
|
''' Lots of functions for drawing and plotting visiony things '''
# TODO: New naming scheme
# viz_<func_name> will clear everything. The current axes and fig: clf, cla. # Will add annotations
# interact_<func_name> will clear everything and start user interactions.
# show_<func_name> will always clear the current axes, but not fig: cla # Might # add annotates?
# plot_<func_name> will not clear the axes or figure. More useful for graphs
# draw_<func_name> same as plot for now. More useful for images
from __future__ import division, print_function
from hscom import __common__
(print, print_, print_on, print_off, rrr, profile,
printDBG) = __common__.init(__name__, '[df2]', DEBUG=False, initmpl=True)
# Python
from itertools import izip
from os.path import splitext, split, join, normpath, exists
import colorsys
import itertools
import pylab
import sys
import textwrap
import time
import warnings
# Matplotlib / Qt
import matplotlib
import matplotlib as mpl # NOQA
from matplotlib.collections import PatchCollection, LineCollection
from matplotlib.font_manager import FontProperties
from matplotlib.patches import Rectangle, Circle, FancyArrow
from matplotlib.transforms import Affine2D
from matplotlib.backends import backend_qt4
import matplotlib.pyplot as plt
# Qt
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import Qt
# Scientific
import numpy as np
import scipy.stats
import cv2
# HotSpotter
from hscom import helpers
from hscom import tools
from hscom.Printable import DynStruct
#================
# GLOBALS
#================
TMP_mevent = None
QT4_WINS = []
plotWidget = None
# GENERAL FONTS
SMALLER = 8
SMALL = 10
MED = 12
LARGE = 14
#fpargs = dict(family=None, style=None, variant=None, stretch=None, fname=None)
FONTS = DynStruct()
FONTS.small = FontProperties(weight='light', size=SMALL)
FONTS.smaller = FontProperties(weight='light', size=SMALLER)
FONTS.med = FontProperties(weight='light', size=MED)
FONTS.large = FontProperties(weight='light', size=LARGE)
FONTS.medbold = FontProperties(weight='bold', size=MED)
FONTS.largebold = FontProperties(weight='bold', size=LARGE)
# SPECIFIC FONTS
FONTS.legend = FONTS.small
FONTS.figtitle = FONTS.med
FONTS.axtitle = FONTS.med
FONTS.subtitle = FONTS.med
FONTS.xlabel = FONTS.smaller
FONTS.ylabel = FONTS.small
FONTS.relative = FONTS.smaller
# COLORS
ORANGE = np.array((255, 127, 0, 255)) / 255.0
RED = np.array((255, 0, 0, 255)) / 255.0
GREEN = np.array(( 0, 255, 0, 255)) / 255.0
BLUE = np.array(( 0, 0, 255, 255)) / 255.0
YELLOW = np.array((255, 255, 0, 255)) / 255.0
BLACK = np.array(( 0, 0, 0, 255)) / 255.0
WHITE = np.array((255, 255, 255, 255)) / 255.0
GRAY = np.array((127, 127, 127, 255)) / 255.0
DEEP_PINK = np.array((255, 20, 147, 255)) / 255.0
PINK = np.array((255, 100, 100, 255)) / 255.0
FALSE_RED = np.array((255, 51, 0, 255)) / 255.0
TRUE_GREEN = np.array(( 0, 255, 0, 255)) / 255.0
DARK_ORANGE = np.array((127, 63, 0, 255)) / 255.0
DARK_YELLOW = np.array((127, 127, 0, 255)) / 255.0
PURPLE = np.array((102, 0, 153, 255)) / 255.0
UNKNOWN_PURP = PURPLE
# FIGURE GEOMETRY
DPI = 80
#DPI = 160
#FIGSIZE = (24) # default windows fullscreen
FIGSIZE_MED = (12, 6)
FIGSIZE_SQUARE = (12, 12)
FIGSIZE_BIGGER = (24, 12)
FIGSIZE_HUGE = (32, 16)
FIGSIZE = FIGSIZE_MED
# Quality drawings
#FIGSIZE = FIGSIZE_SQUARE
#DPI = 120
tile_within = (-1, 30, 969, 1041)
if helpers.get_computer_name() == 'Ooo':
TILE_WITHIN = (-1912, 30, -969, 1071)
# DEFAULTS. (TODO: Can these be cleaned up?)
DISTINCT_COLORS = True # and False
DARKEN = None
ELL_LINEWIDTH = 1.5
if DISTINCT_COLORS:
ELL_ALPHA = .6
LINE_ALPHA = .35
else:
ELL_ALPHA = .4
LINE_ALPHA = .4
LINE_ALPHA_OVERRIDE = helpers.get_arg('--line-alpha-override', type_=float, default=None)
ELL_ALPHA_OVERRIDE = helpers.get_arg('--ell-alpha-override', type_=float, default=None)
#LINE_ALPHA_OVERRIDE = None
#ELL_ALPHA_OVERRIDE = None
ELL_COLOR = BLUE
LINE_COLOR = RED
LINE_WIDTH = 1.4
SHOW_LINES = True # True
SHOW_ELLS = True
POINT_SIZE = 2
base_fnum = 9001
def next_fnum():
global base_fnum
base_fnum += 1
return base_fnum
def my_prefs():
global LINE_COLOR
global ELL_COLOR
global ELL_LINEWIDTH
global ELL_ALPHA
LINE_COLOR = (1, 0, 0)
ELL_COLOR = (0, 0, 1)
ELL_LINEWIDTH = 2
ELL_ALPHA = .5
def execstr_global():
execstr = ['global' + key for key in globals().keys()]
return execstr
def register_matplotlib_widget(plotWidget_):
'talks to PyQt4 guis'
global plotWidget
plotWidget = plotWidget_
#fig = plotWidget.figure
#axes_list = fig.get_axes()
#ax = axes_list[0]
#plt.sca(ax)
def unregister_qt4_win(win):
global QT4_WINS
if win == 'all':
QT4_WINS = []
def register_qt4_win(win):
global QT4_WINS
QT4_WINS.append(win)
def OooScreen2():
nRows = 1
nCols = 1
x_off = 30 * 4
y_off = 30 * 4
x_0 = -1920
y_0 = 30
w = (1912 - x_off) / nRows
h = (1080 - y_off) / nCols
return dict(num_rc=(1, 1), wh=(w, h), xy_off=(x_0, y_0), wh_off=(0, 10),
row_first=True, no_tile=False)
def deterministic_shuffle(list_):
randS = int(np.random.rand() * np.uint(0 - 2) / 2)
np.random.seed(len(list_))
np.random.shuffle(list_)
np.random.seed(randS)
def distinct_colors(N, brightness=.878):
# http://blog.jianhuashao.com/2011/09/generate-n-distinct-colors.html
sat = brightness
val = brightness
HSV_tuples = [(x * 1.0 / N, sat, val) for x in xrange(N)]
RGB_tuples = map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)
deterministic_shuffle(RGB_tuples)
return RGB_tuples
def add_alpha(colors):
return [list(color) + [1] for color in colors]
def _axis_xy_width_height(ax, xaug=0, yaug=0, waug=0, haug=0):
'gets geometry of a subplot'
autoAxis = ax.axis()
xy = (autoAxis[0] + xaug, autoAxis[2] + yaug)
width = (autoAxis[1] - autoAxis[0]) + waug
height = (autoAxis[3] - autoAxis[2]) + haug
return xy, width, height
def draw_border(ax, color=GREEN, lw=2, offset=None):
'draws rectangle border around a subplot'
xy, width, height = _axis_xy_width_height(ax, -.7, -.2, 1, .4)
if offset is not None:
xoff, yoff = offset
xy = [xoff, yoff]
height = - height - yoff
width = width - xoff
rect = matplotlib.patches.Rectangle(xy, width, height, lw=lw)
rect = ax.add_patch(rect)
rect.set_clip_on(False)
rect.set_fill(False)
rect.set_edgecolor(color)
def draw_roi(roi, label=None, bbox_color=(1, 0, 0),
lbl_bgcolor=(0, 0, 0), lbl_txtcolor=(1, 1, 1), theta=0, ax=None):
if ax is None:
ax = gca()
(rx, ry, rw, rh) = roi
#cos_ = np.cos(theta)
#sin_ = np.sin(theta)
#rot_t = Affine2D([( cos_, -sin_, 0),
#( sin_, cos_, 0),
#( 0, 0, 1)])
#scale_t = Affine2D([( rw, 0, 0),
#( 0, rh, 0),
#( 0, 0, 1)])
#trans_t = Affine2D([( 1, 0, rx + rw / 2),
#( 0, 1, ry + rh / 2),
#( 0, 0, 1)])
#t_end = scale_t + rot_t + trans_t + t_start
# Transformations are specified in backwards order.
trans_roi = Affine2D()
trans_roi.scale(rw, rh)
trans_roi.rotate(theta)
trans_roi.translate(rx + rw / 2, ry + rh / 2)
t_end = trans_roi + ax.transData
bbox = matplotlib.patches.Rectangle((-.5, -.5), 1, 1, lw=2, transform=t_end)
arw_x, arw_y, arw_dx, arw_dy = (-0.5, -0.5, 1.0, 0.0)
arrowargs = dict(head_width=.1, transform=t_end, length_includes_head=True)
arrow = FancyArrow(arw_x, arw_y, arw_dx, arw_dy, **arrowargs)
bbox.set_fill(False)
#bbox.set_transform(trans)
bbox.set_edgecolor(bbox_color)
arrow.set_edgecolor(bbox_color)
arrow.set_facecolor(bbox_color)
ax.add_patch(bbox)
ax.add_patch(arrow)
#ax.add_patch(arrow2)
if label is not None:
ax_absolute_text(rx, ry, label, ax=ax,
horizontalalignment='center',
verticalalignment='center',
color=lbl_txtcolor,
backgroundcolor=lbl_bgcolor)
# ---- GENERAL FIGURE COMMANDS ----
def sanatize_img_fname(fname):
fname_clean = fname
search_replace_list = [(' ', '_'), ('\n', '--'), ('\\', ''), ('/', '')]
for old, new in search_replace_list:
fname_clean = fname_clean.replace(old, new)
fname_noext, ext = splitext(fname_clean)
fname_clean = fname_noext + ext.lower()
# Check for correct extensions
if not ext.lower() in helpers.IMG_EXTENSIONS:
fname_clean += '.png'
return fname_clean
def sanatize_img_fpath(fpath):
[dpath, fname] = split(fpath)
fname_clean = sanatize_img_fname(fname)
fpath_clean = join(dpath, fname_clean)
fpath_clean = normpath(fpath_clean)
return fpath_clean
def set_geometry(fnum, x, y, w, h):
fig = get_fig(fnum)
qtwin = fig.canvas.manager.window
qtwin.setGeometry(x, y, w, h)
def get_geometry(fnum):
fig = get_fig(fnum)
qtwin = fig.canvas.manager.window
(x1, y1, x2, y2) = qtwin.geometry().getCoords()
(x, y, w, h) = (x1, y1, x2 - x1, y2 - y1)
return (x, y, w, h)
def get_screen_info():
from PyQt4 import Qt, QtGui # NOQA
desktop = QtGui.QDesktopWidget()
mask = desktop.mask() # NOQA
layout_direction = desktop.layoutDirection() # NOQA
screen_number = desktop.screenNumber() # NOQA
normal_geometry = desktop.normalGeometry() # NOQA
num_screens = desktop.screenCount() # NOQA
avail_rect = desktop.availableGeometry() # NOQA
screen_rect = desktop.screenGeometry() # NOQA
QtGui.QDesktopWidget().availableGeometry().center() # NOQA
normal_geometry = desktop.normalGeometry() # NOQA
def get_all_figures():
all_figures_ = [manager.canvas.figure for manager in
matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
all_figures = []
# Make sure you dont show figures that this module closed
for fig in iter(all_figures_):
if not 'df2_closed' in fig.__dict__.keys() or not fig.df2_closed:
all_figures.append(fig)
# Return all the figures sorted by their number
all_figures = sorted(all_figures, key=lambda fig: fig.number)
return all_figures
def get_all_qt4_wins():
return QT4_WINS
def all_figures_show():
if plotWidget is not None:
plotWidget.figure.show()
plotWidget.figure.canvas.draw()
for fig in iter(get_all_figures()):
time.sleep(.1)
fig.show()
fig.canvas.draw()
def all_figures_tight_layout():
for fig in iter(get_all_figures()):
fig.tight_layout()
#adjust_subplots()
time.sleep(.1)
def get_monitor_geom(monitor_num=0):
from PyQt4 import QtGui # NOQA
desktop = QtGui.QDesktopWidget()
rect = desktop.availableGeometry()
geom = (rect.x(), rect.y(), rect.width(), rect.height())
return geom
def golden_wh(x):
'returns a width / height with a golden aspect ratio'
return map(int, map(round, (x * .618, x * .312)))
def all_figures_tile(num_rc=(3, 4), wh=1000, xy_off=(0, 0), wh_off=(0, 10),
row_first=True, no_tile=False, override1=False):
'Lays out all figures in a grid. if wh is a scalar, a golden ratio is used'
# RCOS TODO:
# I want this function to layout all the figures and qt windows within the
# bounds of a rectangle. (taken from the get_monitor_geom, or specified by
# the user i.e. left half of monitor 0). It should lay them out
# rectangularly and choose figure sizes such that all of them will fit.
if no_tile:
return
if not np.iterable(wh):
wh = golden_wh(wh)
all_figures = get_all_figures()
all_qt4wins = get_all_qt4_wins()
if override1:
if len(all_figures) == 1:
fig = all_figures[0]
win = fig.canvas.manager.window
win.setGeometry(0, 0, 900, 900)
update()
return
#nFigs = len(all_figures) + len(all_qt4_wins)
num_rows, num_cols = num_rc
w, h = wh
x_off, y_off = xy_off
w_off, h_off = wh_off
x_pad, y_pad = (0, 0)
printDBG('[df2] Tile all figures: ')
printDBG('[df2] wh = %r' % ((w, h),))
printDBG('[df2] xy_offsets = %r' % ((x_off, y_off),))
printDBG('[df2] wh_offsets = %r' % ((w_off, h_off),))
printDBG('[df2] xy_pads = %r' % ((x_pad, y_pad),))
if sys.platform == 'win32':
h_off += 0
w_off += 40
x_off += 40
y_off += 40
x_pad += 0
y_pad += 100
def position_window(i, win):
isqt4_mpl = isinstance(win, backend_qt4.MainWindow)
isqt4_back = isinstance(win, QtGui.QMainWindow)
if not isqt4_mpl and not isqt4_back:
raise NotImplementedError('%r-th Backend %r is not a Qt Window' % (i, win))
if row_first:
y = (i % num_rows) * (h + h_off) + 40
x = (int(i / num_rows)) * (w + w_off) + x_pad
else:
x = (i % num_cols) * (w + w_off) + 40
y = (int(i / num_cols)) * (h + h_off) + y_pad
x += x_off
y += y_off
win.setGeometry(x, y, w, h)
ioff = 0
for i, win in enumerate(all_qt4wins):
position_window(i, win)
ioff += 1
for i, fig in enumerate(all_figures):
win = fig.canvas.manager.window
position_window(i + ioff, win)
def all_figures_bring_to_front():
all_figures = get_all_figures()
for fig in iter(all_figures):
bring_to_front(fig)
def close_all_figures():
all_figures = get_all_figures()
for fig in iter(all_figures):
close_figure(fig)
def close_figure(fig):
fig.clf()
fig.df2_closed = True
qtwin = fig.canvas.manager.window
qtwin.close()
def bring_to_front(fig):
#what is difference between show and show normal?
qtwin = fig.canvas.manager.window
qtwin.raise_()
qtwin.activateWindow()
qtwin.setWindowFlags(Qt.WindowStaysOnTopHint)
qtwin.setWindowFlags(Qt.WindowFlags(0))
qtwin.show()
def show():
all_figures_show()
all_figures_bring_to_front()
plt.show()
def reset():
close_all_figures()
def draw():
all_figures_show()
def update():
draw()
all_figures_bring_to_front()
def present(*args, **kwargs):
'execing present should cause IPython magic'
print('[df2] Presenting figures...')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
all_figures_tile(*args, **kwargs)
all_figures_show()
all_figures_bring_to_front()
# Return an exec string
execstr = helpers.ipython_execstr()
execstr += textwrap.dedent('''
if not embedded:
print('[df2] Presenting in normal shell.')
print('[df2] ... plt.show()')
plt.show()
''')
return execstr
def save_figure(fnum=None, fpath=None, usetitle=False, overwrite=True):
#import warnings
#warnings.simplefilter("error")
# Find the figure
if fnum is None:
fig = gcf()
else:
fig = plt.figure(fnum, figsize=FIGSIZE, dpi=DPI)
# Enforce inches and DPI
fig.set_size_inches(FIGSIZE[0], FIGSIZE[1])
fnum = fig.number
if fpath is None:
# Find the title
fpath = sanatize_img_fname(fig.canvas.get_window_title())
if usetitle:
title = sanatize_img_fname(fig.canvas.get_window_title())
fpath = join(fpath, title)
# Add in DPI information
fpath_noext, ext = splitext(fpath)
size_suffix = '_DPI=%r_FIGSIZE=%d,%d' % (DPI, FIGSIZE[0], FIGSIZE[1])
fpath = fpath_noext + size_suffix + ext
# Sanatize the filename
fpath_clean = sanatize_img_fpath(fpath)
#fname_clean = split(fpath_clean)[1]
print('[df2] save_figure() %r' % (fpath_clean,))
#adjust_subplots()
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
if not exists(fpath_clean) or overwrite:
fig.savefig(fpath_clean, dpi=DPI)
def set_ticks(xticks, yticks):
ax = gca()
ax.set_xticks(xticks)
ax.set_yticks(yticks)
def set_xticks(tick_set):
ax = gca()
ax.set_xticks(tick_set)
def set_yticks(tick_set):
ax = gca()
ax.set_yticks(tick_set)
def set_xlabel(lbl, ax=None):
if ax is None:
ax = gca()
ax.set_xlabel(lbl, fontproperties=FONTS.xlabel)
def set_title(title, ax=None):
if ax is None:
ax = gca()
ax.set_title(title, fontproperties=FONTS.axtitle)
def set_ylabel(lbl):
ax = gca()
ax.set_ylabel(lbl, fontproperties=FONTS.xlabel)
def plot(*args, **kwargs):
return plt.plot(*args, **kwargs)
def plot2(x_data, y_data, marker='o', title_pref='', x_label='x', y_label='y', *args,
**kwargs):
do_plot = True
ax = gca()
if len(x_data) != len(y_data):
warnstr = '[df2] ! Warning: len(x_data) != len(y_data). Cannot plot2'
warnings.warn(warnstr)
draw_text(warnstr)
do_plot = False
if len(x_data) == 0:
warnstr = '[df2] ! Warning: len(x_data) == 0. Cannot plot2'
warnings.warn(warnstr)
draw_text(warnstr)
do_plot = False
if do_plot:
ax.plot(x_data, y_data, marker, *args, **kwargs)
min_ = min(x_data.min(), y_data.min())
max_ = max(x_data.max(), y_data.max())
# Equal aspect ratio
ax.set_xlim(min_, max_)
ax.set_ylim(min_, max_)
ax.set_aspect('equal')
ax.set_xlabel(x_label, fontproperties=FONTS.xlabel)
ax.set_ylabel(y_label, fontproperties=FONTS.xlabel)
ax.set_title(title_pref + ' ' + x_label + ' vs ' + y_label,
fontproperties=FONTS.axtitle)
def adjust_subplots_xlabels():
adjust_subplots(left=.03, right=.97, bottom=.2, top=.9, hspace=.15)
def adjust_subplots_xylabels():
adjust_subplots(left=.03, right=1, bottom=.1, top=.9, hspace=.15)
def adjust_subplots_safe(left=.1, right=.9, bottom=.1, top=.9, wspace=.3, hspace=.5):
adjust_subplots(left, bottom, right, top, wspace, hspace)
def adjust_subplots(left=0.02, bottom=0.02,
right=0.98, top=0.90,
wspace=0.1, hspace=0.15):
'''
left = 0.125 # the left side of the subplots of the figure
right = 0.9 # the right side of the subplots of the figure
bottom = 0.1 # the bottom of the subplots of the figure
top = 0.9 # the top of the subplots of the figure
wspace = 0.2 # the amount of width reserved for blank space between subplots
hspace = 0.2
'''
#print('[df2] adjust_subplots(%r)' % locals())
plt.subplots_adjust(left, bottom, right, top, wspace, hspace)
#=======================
# TEXT FUNCTIONS
# TODO: I have too many of these. Need to consolidate
#=======================
def upperleft_text(txt):
txtargs = dict(horizontalalignment='left',
verticalalignment='top',
#fontsize='smaller',
#fontweight='ultralight',
backgroundcolor=(0, 0, 0, .5),
color=ORANGE)
ax_relative_text(.02, .02, txt, **txtargs)
def upperright_text(txt, offset=None):
txtargs = dict(horizontalalignment='right',
verticalalignment='top',
#fontsize='smaller',
#fontweight='ultralight',
backgroundcolor=(0, 0, 0, .5),
color=ORANGE,
offset=offset)
ax_relative_text(.98, .02, txt, **txtargs)
def lowerright_text(txt):
txtargs = dict(horizontalalignment='right',
verticalalignment='top',
#fontsize='smaller',
#fontweight='ultralight',
backgroundcolor=(0, 0, 0, .5),
color=ORANGE)
ax_relative_text(.98, .92, txt, **txtargs)
def absolute_lbl(x_, y_, txt, roffset=(-.02, -.02), **kwargs):
txtargs = dict(horizontalalignment='right',
verticalalignment='top',
backgroundcolor=(0, 0, 0, .5),
color=ORANGE,
**kwargs)
ax_absolute_text(x_, y_, txt, roffset=roffset, **txtargs)
def ax_relative_text(x, y, txt, ax=None, offset=None, **kwargs):
if ax is None:
ax = gca()
xy, width, height = _axis_xy_width_height(ax)
x_, y_ = ((xy[0]) + x * width, (xy[1] + height) - y * height)
if offset is not None:
xoff, yoff = offset
x_ += xoff
y_ += yoff
ax_absolute_text(x_, y_, txt, ax=ax, **kwargs)
def ax_absolute_text(x_, y_, txt, ax=None, roffset=None, **kwargs):
if ax is None:
ax = gca()
if 'fontproperties' in kwargs:
kwargs['fontproperties'] = FONTS.relative
if roffset is not None:
xroff, yroff = roffset
xy, width, height = _axis_xy_width_height(ax)
x_ += xroff * width
y_ += yroff * height
ax.text(x_, y_, txt, **kwargs)
def fig_relative_text(x, y, txt, **kwargs):
kwargs['horizontalalignment'] = 'center'
kwargs['verticalalignment'] = 'center'
fig = gcf()
#xy, width, height = _axis_xy_width_height(ax)
#x_, y_ = ((xy[0]+width)+x*width, (xy[1]+height)-y*height)
fig.text(x, y, txt, **kwargs)
def draw_text(text_str, rgb_textFG=(0, 0, 0), rgb_textBG=(1, 1, 1)):
ax = gca()
xy, width, height = _axis_xy_width_height(ax)
text_x = xy[0] + (width / 2)
text_y = xy[1] + (height / 2)
ax.text(text_x, text_y, text_str,
horizontalalignment='center',
verticalalignment='center',
color=rgb_textFG,
backgroundcolor=rgb_textBG)
def set_figtitle(figtitle, subtitle='', forcefignum=True, incanvas=True):
if figtitle is None:
figtitle = ''
fig = gcf()
if incanvas:
if subtitle != '':
subtitle = '\n' + subtitle
fig.suptitle(figtitle + subtitle, fontsize=14, fontweight='bold')
#fig.suptitle(figtitle, x=.5, y=.98, fontproperties=FONTS.figtitle)
#fig_relative_text(.5, .96, subtitle, fontproperties=FONTS.subtitle)
else:
fig.suptitle('')
window_figtitle = ('fig(%d) ' % fig.number) + figtitle
fig.canvas.set_window_title(window_figtitle)
def convert_keypress_event_mpl_to_qt4(mevent):
global TMP_mevent
TMP_mevent = mevent
# Grab the key from the mpl.KeyPressEvent
key = mevent.key
print('[df2] convert event mpl -> qt4')
print('[df2] key=%r' % key)
# dicts modified from backend_qt4.py
mpl2qtkey = {'control': Qt.Key_Control, 'shift': Qt.Key_Shift,
'alt': Qt.Key_Alt, 'super': Qt.Key_Meta,
'enter': Qt.Key_Return, 'left': Qt.Key_Left, 'up': Qt.Key_Up,
'right': Qt.Key_Right, 'down': Qt.Key_Down,
'escape': Qt.Key_Escape, 'f1': Qt.Key_F1, 'f2': Qt.Key_F2,
'f3': Qt.Key_F3, 'f4': Qt.Key_F4, 'f5': Qt.Key_F5,
'f6': Qt.Key_F6, 'f7': Qt.Key_F7, 'f8': Qt.Key_F8,
'f9': Qt.Key_F9, 'f10': Qt.Key_F10, 'f11': Qt.Key_F11,
'f12': Qt.Key_F12, 'home': Qt.Key_Home, 'end': Qt.Key_End,
'pageup': Qt.Key_PageUp, 'pagedown': Qt.Key_PageDown}
# Reverse the control and super (aka cmd/apple) keys on OSX
if sys.platform == 'darwin':
mpl2qtkey.update({'super': Qt.Key_Control, 'control': Qt.Key_Meta, })
# Try to reconstruct QtGui.KeyEvent
type_ = QtCore.QEvent.Type(QtCore.QEvent.KeyPress) # The type should always be KeyPress
text = ''
# Try to extract the original modifiers
modifiers = QtCore.Qt.NoModifier # initialize to no modifiers
if key.find(u'ctrl+') >= 0:
modifiers = modifiers | QtCore.Qt.ControlModifier
key = key.replace(u'ctrl+', u'')
print('[df2] has ctrl modifier')
text += 'Ctrl+'
if key.find(u'alt+') >= 0:
modifiers = modifiers | QtCore.Qt.AltModifier
key = key.replace(u'alt+', u'')
print('[df2] has alt modifier')
text += 'Alt+'
if key.find(u'super+') >= 0:
modifiers = modifiers | QtCore.Qt.MetaModifier
key = key.replace(u'super+', u'')
print('[df2] has super modifier')
text += 'Super+'
if key.isupper():
modifiers = modifiers | QtCore.Qt.ShiftModifier
print('[df2] has shift modifier')
text += 'Shift+'
# Try to extract the original key
try:
if key in mpl2qtkey:
key_ = mpl2qtkey[key]
else:
key_ = ord(key.upper()) # Qt works with uppercase keys
text += key.upper()
except Exception as ex:
print('[df2] ERROR key=%r' % key)
print('[df2] ERROR %r' % ex)
raise
autorep = False # default false
count = 1 # default 1
text = QtCore.QString(text) # The text is somewhat arbitrary
# Create the QEvent
print('----------------')
print('[df2] Create event')
print('[df2] type_ = %r' % type_)
print('[df2] text = %r' % text)
print('[df2] modifiers = %r' % modifiers)
print('[df2] autorep = %r' % autorep)
print('[df2] count = %r ' % count)
print('----------------')
qevent = QtGui.QKeyEvent(type_, key_, modifiers, text, autorep, count)
return qevent
def test_build_qkeyevent():
import draw_func2 as df2
qtwin = df2.QT4_WINS[0]
# This reconstructs an test mplevent
canvas = df2.figure(1).canvas
mevent = matplotlib.backend_bases.KeyEvent('key_press_event', canvas, u'ctrl+p', x=672, y=230.0)
qevent = df2.convert_keypress_event_mpl_to_qt4(mevent)
app = qtwin.backend.app
app.sendEvent(qtwin.ui, mevent)
#type_ = QtCore.QEvent.Type(QtCore.QEvent.KeyPress) # The type should always be KeyPress
#text = QtCore.QString('A') # The text is somewhat arbitrary
#modifiers = QtCore.Qt.NoModifier # initialize to no modifiers
#modifiers = modifiers | QtCore.Qt.ControlModifier
#modifiers = modifiers | QtCore.Qt.AltModifier
#key_ = ord('A') # Qt works with uppercase keys
#autorep = False # default false
#count = 1 # default 1
#qevent = QtGui.QKeyEvent(type_, key_, modifiers, text, autorep, count)
return qevent
# This actually doesn't matter
def on_key_press_event(event):
'redirects keypress events to main window'
global QT4_WINS
print('[df2] %r' % event)
print('[df2] %r' % str(event.__dict__))
for qtwin in QT4_WINS:
qevent = convert_keypress_event_mpl_to_qt4(event)
app = qtwin.backend.app
print('[df2] attempting to send qevent to qtwin')
app.sendEvent(qtwin, qevent)
# TODO: FINISH ME
#PyQt4.QtGui.QKeyEvent
#qtwin.keyPressEvent(event)
#fig.canvas.manager.window.keyPressEvent()
def customize_figure(fig, docla):
if not 'user_stat_list' in fig.__dict__.keys() or docla:
fig.user_stat_list = []
fig.user_notes = []
# We dont need to catch keypress events because you just need to set it as
# an application level shortcut
# Catch key press events
#key_event_cbid = fig.__dict__.get('key_event_cbid', None)
#if key_event_cbid is not None:
#fig.canvas.mpl_disconnect(key_event_cbid)
#fig.key_event_cbid = fig.canvas.mpl_connect('key_press_event', on_key_press_event)
fig.df2_closed = False
def gcf():
if plotWidget is not None:
#print('is plotwidget visible = %r' % plotWidget.isVisible())
fig = plotWidget.figure
return fig
return plt.gcf()
def gca():
if plotWidget is not None:
#print('is plotwidget visible = %r' % plotWidget.isVisible())
axes_list = plotWidget.figure.get_axes()
current = 0
ax = axes_list[current]
return ax
return plt.gca()
def cla():
return plt.cla()
def clf():
return plt.clf()
def get_fig(fnum=None):
printDBG('[df2] get_fig(fnum=%r)' % fnum)
fig_kwargs = dict(figsize=FIGSIZE, dpi=DPI)
if plotWidget is not None:
return gcf()
if fnum is None:
try:
fig = gcf()
except Exception as ex:
printDBG('[df2] get_fig(): ex=%r' % ex)
fig = plt.figure(**fig_kwargs)
fnum = fig.number
else:
try:
fig = plt.figure(fnum, **fig_kwargs)
except Exception as ex:
print(repr(ex))
warnings.warn(repr(ex))
fig = gcf()
return fig
def get_ax(fnum=None, pnum=None):
figure(fnum=fnum, pnum=pnum)
ax = gca()
return ax
def figure(fnum=None, docla=False, title=None, pnum=(1, 1, 1), figtitle=None,
doclf=False, **kwargs):
'''
fnum = fignum = figure number
pnum = plotnum = plot tuple
'''
#matplotlib.pyplot.xkcd()
fig = get_fig(fnum)
axes_list = fig.get_axes()
# Ensure my customized settings
customize_figure(fig, docla)
# Convert pnum to tuple format
if tools.is_int(pnum):
nr = pnum // 100
nc = pnum // 10 - (nr * 10)
px = pnum - (nr * 100) - (nc * 10)
pnum = (nr, nc, px)
if doclf: # a bit hacky. Need to rectify docla and doclf
fig.clf()
# Get the subplot
if docla or len(axes_list) == 0:
printDBG('[df2] *** NEW FIGURE %r.%r ***' % (fnum, pnum))
if not pnum is None:
#ax = plt.subplot(*pnum)
ax = fig.add_subplot(*pnum)
ax.cla()
else:
ax = gca()
else:
printDBG('[df2] *** OLD FIGURE %r.%r ***' % (fnum, pnum))
if not pnum is None:
ax = plt.subplot(*pnum) # fig.add_subplot fails here
#ax = fig.add_subplot(*pnum)
else:
ax = gca()
#ax = axes_list[0]
# Set the title
if not title is None:
ax = gca()
ax.set_title(title, fontproperties=FONTS.axtitle)
# Add title to figure
if figtitle is None and pnum == (1, 1, 1):
figtitle = title
if not figtitle is None:
set_figtitle(figtitle, incanvas=False)
return fig
def plot_pdf(data, draw_support=True, scale_to=None, label=None, color=0,
nYTicks=3):
fig = gcf()
ax = gca()
data = np.array(data)
if len(data) == 0:
warnstr = '[df2] ! Warning: len(data) = 0. Cannot visualize pdf'
warnings.warn(warnstr)
draw_text(warnstr)
return
bw_factor = .05
if isinstance(color, (int, float)):
colorx = color
line_color = plt.get_cmap('gist_rainbow')(colorx)
else:
line_color = color
# Estimate a pdf
data_pdf = estimate_pdf(data, bw_factor)
# Get probability of seen data
prob_x = data_pdf(data)
# Get probability of unseen data data
x_data = np.linspace(0, data.max(), 500)
y_data = data_pdf(x_data)
# Scale if requested
if not scale_to is None:
scale_factor = scale_to / y_data.max()
y_data *= scale_factor
prob_x *= scale_factor
#Plot the actual datas on near the bottom perterbed in Y
if draw_support:
pdfrange = prob_x.max() - prob_x.min()
perb = (np.random.randn(len(data))) * pdfrange / 30.
preb_y_data = np.abs([pdfrange / 50. for _ in data] + perb)
ax.plot(data, preb_y_data, 'o', color=line_color, figure=fig, alpha=.1)
# Plot the pdf (unseen data)
ax.plot(x_data, y_data, color=line_color, label=label)
if nYTicks is not None:
yticks = np.linspace(min(y_data), max(y_data), nYTicks)
ax.set_yticks(yticks)
def estimate_pdf(data, bw_factor):
try:
data_pdf = scipy.stats.gaussian_kde(data, bw_factor)
data_pdf.covariance_factor = bw_factor
except Exception as ex:
print('[df2] ! Exception while estimating kernel density')
print('[df2] data=%r' % (data,))
print('[df2] ex=%r' % (ex,))
raise
return data_pdf
def show_histogram(data, bins=None, **kwargs):
print('[df2] show_histogram()')
dmin = int(np.floor(data.min()))
dmax = int(np.ceil(data.max()))
if bins is None:
bins = dmax - dmin
fig = figure(**kwargs)
ax = gca()
ax.hist(data, bins=bins, range=(dmin, dmax))
#help(np.bincount)
fig.show()
def show_signature(sig, **kwargs):
fig = figure(**kwargs)
plt.plot(sig)
fig.show()
def plot_stems(x_data=None, y_data=None):
if y_data is not None and x_data is None:
x_data = np.arange(len(y_data))
pass
if len(x_data) != len(y_data):
print('[df2] WARNING plot_stems(): len(x_data)!=len(y_data)')
if len(x_data) == 0:
print('[df2] WARNING plot_stems(): len(x_data)=len(y_data)=0')
x_data_ = np.array(x_data)
y_data_ = np.array(y_data)
x_data_sort = x_data_[y_data_.argsort()[::-1]]
y_data_sort = y_data_[y_data_.argsort()[::-1]]
markerline, stemlines, baseline = pylab.stem(x_data_sort, y_data_sort, linefmt='-')
pylab.setp(markerline, 'markerfacecolor', 'b')
pylab.setp(baseline, 'linewidth', 0)
ax = gca()
ax.set_xlim(min(x_data) - 1, max(x_data) + 1)
ax.set_ylim(min(y_data) - 1, max(max(y_data), max(x_data)) + 1)
def plot_sift_signature(sift, title='', fnum=None, pnum=None):
figure(fnum=fnum, pnum=pnum)
ax = gca()
plot_bars(sift, 16)
ax.set_xlim(0, 128)
ax.set_ylim(0, 256)
space_xticks(9, 16)
space_yticks(5, 64)
ax.set_title(title)
dark_background(ax)
return ax
def dark_background(ax=None, doubleit=False):
if ax is None:
ax = gca()
xy, width, height = _axis_xy_width_height(ax)
if doubleit:
halfw = (doubleit) * (width / 2)
halfh = (doubleit) * (height / 2)
xy = (xy[0] - halfw, xy[1] - halfh)
width *= (doubleit + 1)
height *= (doubleit + 1)
rect = matplotlib.patches.Rectangle(xy, width, height, lw=0, zorder=0)
rect.set_clip_on(True)
rect.set_fill(True)
rect.set_color(BLACK * .9)
rect = ax.add_patch(rect)
def space_xticks(nTicks=9, spacing=16, ax=None):
if ax is None:
ax = gca()
ax.set_xticks(np.arange(nTicks) * spacing)
small_xticks(ax)
def space_yticks(nTicks=9, spacing=32, ax=None):
if ax is None:
ax = gca()
ax.set_yticks(np.arange(nTicks) * spacing)
small_yticks(ax)
def small_xticks(ax=None):
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(8)
def small_yticks(ax=None):
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(8)
def plot_bars(y_data, nColorSplits=1):
width = 1
nDims = len(y_data)
nGroup = nDims // nColorSplits
ori_colors = distinct_colors(nColorSplits)
x_data = np.arange(nDims)
ax = gca()
for ix in xrange(nColorSplits):
xs = np.arange(nGroup) + (nGroup * ix)
color = ori_colors[ix]
x_dat = x_data[xs]
y_dat = y_data[xs]
ax.bar(x_dat, y_dat, width, color=color, edgecolor=np.array(color) * .8)
def phantom_legend_label(label, color, loc='upper right'):
'adds a legend label without displaying an actor'
pass
#phantom_actor = plt.Circle((0, 0), 1, fc=color, prop=FONTS.legend, loc=loc)
#plt.legend(phant_actor, label, framealpha=.2)
#plt.legend(*zip(*legend_tups), framealpha=.2)
#legend_tups = []
#legend_tups.append((phantom_actor, label))
def legend(loc='upper right'):
ax = gca()
ax.legend(prop=FONTS.legend, loc=loc)
def plot_histpdf(data, label=None, draw_support=False, nbins=10):
freq, _ = plot_hist(data, nbins=nbins)
plot_pdf(data, draw_support=draw_support, scale_to=freq.max(), label=label)
def plot_hist(data, bins=None, nbins=10, weights=None):
if isinstance(data, list):
data = np.array(data)
if bins is None:
dmin = data.min()
dmax = data.max()
bins = dmax - dmin
ax = gca()
freq, bins_, patches = ax.hist(data, bins=nbins, weights=weights, range=(dmin, dmax))
return freq, bins_
def variation_trunctate(data):
ax = gca()
data = np.array(data)
if len(data) == 0:
warnstr = '[df2] ! Warning: len(data) = 0. Cannot variation_truncate'
warnings.warn(warnstr)
return
trunc_max = data.mean() + data.std() * 2
trunc_min = np.floor(data.min())
ax.set_xlim(trunc_min, trunc_max)
#trunc_xticks = np.linspace(0, int(trunc_max),11)
#trunc_xticks = trunc_xticks[trunc_xticks >= trunc_min]
#trunc_xticks = np.append([int(trunc_min)], trunc_xticks)
#no_zero_yticks = ax.get_yticks()[ax.get_yticks() > 0]
#ax.set_xticks(trunc_xticks)
#ax.set_yticks(no_zero_yticks)
#_----------------- HELPERS ^^^ ---------
# ---- IMAGE CREATION FUNCTIONS ----
@tools.debug_exception
def draw_sift(desc, kp=None):
# TODO: There might be a divide by zero warning in here.
''' desc = np.random.rand(128)
desc = desc / np.sqrt((desc**2).sum())
desc = np.round(desc * 255) '''
# This is draw, because it is an overlay
ax = gca()
tau = 2 * np.pi
DSCALE = .25
XYSCALE = .5
XYSHIFT = -.75
ORI_SHIFT = 0 # -tau #1/8 * tau
# SIFT CONSTANTS
NORIENTS = 8
NX = 4
NY = 4
NBINS = NX * NY
def cirlce_rad2xy(radians, mag):
return np.cos(radians) * mag, np.sin(radians) * mag
discrete_ori = (np.arange(0, NORIENTS) * (tau / NORIENTS) + ORI_SHIFT)
# Build list of plot positions
# Build an "arm" for each sift measurement
arm_mag = desc / 255.0
arm_ori = np.tile(discrete_ori, (NBINS, 1)).flatten()
# The offset x,y's for each sift measurment
arm_dxy = np.array(zip(*cirlce_rad2xy(arm_ori, arm_mag)))
yxt_gen = itertools.product(xrange(NY), xrange(NX), xrange(NORIENTS))
yx_gen = itertools.product(xrange(NY), xrange(NX))
# Transform the drawing of the SIFT descriptor to the its elliptical patch
axTrans = ax.transData
kpTrans = None
if kp is None:
kp = [0, 0, 1, 0, 1]
kp = np.array(kp)
kpT = kp.T
x, y, a, c, d = kpT[:, 0]
kpTrans = Affine2D([( a, 0, x),
( c, d, y),
( 0, 0, 1)])
axTrans = ax.transData
# Draw 8 directional arms in each of the 4x4 grid cells
arrow_patches = []
arrow_patches2 = []
for y, x, t in yxt_gen:
index = y * NX * NORIENTS + x * NORIENTS + t
(dx, dy) = arm_dxy[index]
arw_x = x * XYSCALE + XYSHIFT
arw_y = y * XYSCALE + XYSHIFT
arw_dy = dy * DSCALE * 1.5 # scale for viz Hack
arw_dx = dx * DSCALE * 1.5
#posA = (arw_x, arw_y)
#posB = (arw_x+arw_dx, arw_y+arw_dy)
_args = [arw_x, arw_y, arw_dx, arw_dy]
_kwargs = dict(head_width=.0001, transform=kpTrans, length_includes_head=False)
arrow_patches += [FancyArrow(*_args, **_kwargs)]
arrow_patches2 += [FancyArrow(*_args, **_kwargs)]
# Draw circles around each of the 4x4 grid cells
circle_patches = []
for y, x in yx_gen:
circ_xy = (x * XYSCALE + XYSHIFT, y * XYSCALE + XYSHIFT)
circ_radius = DSCALE
circle_patches += [Circle(circ_xy, circ_radius, transform=kpTrans)]
# Efficiently draw many patches with PatchCollections
circ_collection = PatchCollection(circle_patches)
circ_collection.set_facecolor('none')
circ_collection.set_transform(axTrans)
circ_collection.set_edgecolor(BLACK)
circ_collection.set_alpha(.5)
# Body of arrows
arw_collection = PatchCollection(arrow_patches)
arw_collection.set_transform(axTrans)
arw_collection.set_linewidth(.5)
arw_collection.set_color(RED)
arw_collection.set_alpha(1)
# Border of arrows
arw_collection2 = matplotlib.collections.PatchCollection(arrow_patches2)
arw_collection2.set_transform(axTrans)
arw_collection2.set_linewidth(1)
arw_collection2.set_color(BLACK)
arw_collection2.set_alpha(1)
# Add artists to axes
ax.add_collection(circ_collection)
ax.add_collection(arw_collection2)
ax.add_collection(arw_collection)
def feat_scores_to_color(fs, cmap_='hot'):
assert len(fs.shape) == 1, 'score must be 1d'
cmap = plt.get_cmap(cmap_)
mins = fs.min()
rnge = fs.max() - mins
if rnge == 0:
return [cmap(.5) for fx in xrange(len(fs))]
score2_01 = lambda score: .1 + .9 * (float(score) - mins) / (rnge)
colors = [cmap(score2_01(score)) for score in fs]
return colors
def colorbar(scalars, colors):
'adds a color bar next to the axes'
orientation = ['vertical', 'horizontal'][0]
TICK_FONTSIZE = 8
# Put colors and scalars in correct order
sorted_scalars = sorted(scalars)
sorted_colors = [x for (y, x) in sorted(zip(scalars, colors))]
# Make a listed colormap and mappable object
listed_cmap = mpl.colors.ListedColormap(sorted_colors)
sm = plt.cm.ScalarMappable(cmap=listed_cmap)
sm.set_array(sorted_scalars)
# Use mapable object to create the colorbar
cb = plt.colorbar(sm, orientation=orientation)
# Add the colorbar to the correct label
axis = cb.ax.xaxis if orientation == 'horizontal' else cb.ax.yaxis
position = 'bottom' if orientation == 'horizontal' else 'right'
axis.set_ticks_position(position)
axis.set_ticks([0, .5, 1])
cb.ax.tick_params(labelsize=TICK_FONTSIZE)
def draw_lines2(kpts1, kpts2, fm=None, fs=None, kpts2_offset=(0, 0),
color_list=None, **kwargs):
if not DISTINCT_COLORS:
color_list = None
# input data
if not SHOW_LINES:
return
if fm is None: # assume kpts are in director correspondence
assert kpts1.shape == kpts2.shape
if len(fm) == 0:
return
ax = gca()
woff, hoff = kpts2_offset
# Draw line collection
kpts1_m = kpts1[fm[:, 0]].T
kpts2_m = kpts2[fm[:, 1]].T
xxyy_iter = iter(zip(kpts1_m[0],
kpts2_m[0] + woff,
kpts1_m[1],
kpts2_m[1] + hoff))
if color_list is None:
if fs is None: # Draw with solid color
color_list = [ LINE_COLOR for fx in xrange(len(fm))]
else: # Draw with colors proportional to score difference
color_list = feat_scores_to_color(fs)
segments = [((x1, y1), (x2, y2)) for (x1, x2, y1, y2) in xxyy_iter]
linewidth = [LINE_WIDTH for fx in xrange(len(fm))]
line_alpha = LINE_ALPHA
if LINE_ALPHA_OVERRIDE is not None:
line_alpha = LINE_ALPHA_OVERRIDE
line_group = LineCollection(segments, linewidth, color_list, alpha=line_alpha)
#plt.colorbar(line_group, ax=ax)
ax.add_collection(line_group)
#figure(100)
#plt.hexbin(x,y, cmap=plt.cm.YlOrRd_r)
def draw_kpts(kpts, *args, **kwargs):
draw_kpts2(kpts, *args, **kwargs)
def draw_kpts2(kpts, offset=(0, 0), ell=SHOW_ELLS, pts=False, pts_color=ORANGE,
pts_size=POINT_SIZE, ell_alpha=ELL_ALPHA,
ell_linewidth=ELL_LINEWIDTH, ell_color=ELL_COLOR,
color_list=None, rect=None, arrow=False, **kwargs):
if not DISTINCT_COLORS:
color_list = None
printDBG('drawkpts2: Drawing Keypoints! ell=%r pts=%r' % (ell, pts))
# get matplotlib info
ax = gca()
pltTrans = ax.transData
ell_actors = []
# data
kpts = np.array(kpts)
kptsT = kpts.T
x = kptsT[0, :] + offset[0]
y = kptsT[1, :] + offset[1]
printDBG('[df2] draw_kpts()----------')
printDBG('[df2] draw_kpts() ell=%r pts=%r' % (ell, pts))
printDBG('[df2] draw_kpts() drawing kpts.shape=%r' % (kpts.shape,))
if rect is None:
rect = ell
rect = False
if pts is True:
rect = False
if ell or rect:
printDBG('[df2] draw_kpts() drawing ell kptsT.shape=%r' % (kptsT.shape,))
# We have the transformation from unit circle to ellipse here. (inv(A))
a = kptsT[2]
b = np.zeros(len(a))
c = kptsT[3]
d = kptsT[4]
kpts_iter = izip(x, y, a, b, c, d)
aff_list = [Affine2D([( a_, b_, x_),
( c_, d_, y_),
( 0, 0, 1)])
for (x_, y_, a_, b_, c_, d_) in kpts_iter]
patch_list = []
ell_actors = [Circle( (0, 0), 1, transform=aff) for aff in aff_list]
if ell:
patch_list += ell_actors
if rect:
rect_actors = [Rectangle( (-1, -1), 2, 2, transform=aff) for aff in aff_list]
patch_list += rect_actors
if arrow:
_kwargs = dict(head_width=.01, length_includes_head=False)
arrow_actors1 = [FancyArrow(0, 0, 0, 1, transform=aff, **_kwargs) for aff in aff_list]
arrow_actors2 = [FancyArrow(0, 0, 1, 0, transform=aff, **_kwargs) for aff in aff_list]
patch_list += arrow_actors1
patch_list += arrow_actors2
ellipse_collection = matplotlib.collections.PatchCollection(patch_list)
ellipse_collection.set_facecolor('none')
ellipse_collection.set_transform(pltTrans)
if ELL_ALPHA_OVERRIDE is not None:
ell_alpha = ELL_ALPHA_OVERRIDE
ellipse_collection.set_alpha(ell_alpha)
ellipse_collection.set_linewidth(ell_linewidth)
if not color_list is None:
ell_color = color_list
if ell_color == 'distinct':
ell_color = distinct_colors(len(kpts))
ellipse_collection.set_edgecolor(ell_color)
ax.add_collection(ellipse_collection)
if pts:
printDBG('[df2] draw_kpts() drawing pts x.shape=%r y.shape=%r' % (x.shape, y.shape))
if color_list is None:
color_list = [pts_color for _ in xrange(len(x))]
ax.autoscale(enable=False)
ax.scatter(x, y, c=color_list, s=2 * pts_size, marker='o', edgecolor='none')
#ax.autoscale(enable=False)
#ax.plot(x, y, linestyle='None', marker='o', markerfacecolor=pts_color, markersize=pts_size, markeredgewidth=0)
# ---- CHIP DISPLAY COMMANDS ----
def imshow(img, fnum=None, title=None, figtitle=None, pnum=None,
interpolation='nearest', **kwargs):
'other interpolations = nearest, bicubic, bilinear'
#printDBG('[df2] ----- IMSHOW ------ ')
#printDBG('[***df2.imshow] fnum=%r pnum=%r title=%r *** ' % (fnum, pnum, title))
#printDBG('[***df2.imshow] img.shape = %r ' % (img.shape,))
#printDBG('[***df2.imshow] img.stats = %r ' % (helpers.printable_mystats(img),))
fig = figure(fnum=fnum, pnum=pnum, title=title, figtitle=figtitle, **kwargs)
ax = gca()
if not DARKEN is None:
imgdtype = img.dtype
img = np.array(img, dtype=float) * DARKEN
img = np.array(img, dtype=imgdtype)
plt_imshow_kwargs = {
'interpolation': interpolation,
#'cmap': plt.get_cmap('gray'),
'vmin': 0,
'vmax': 255,
}
try:
if len(img.shape) == 3 and img.shape[2] == 3:
# img is in a color format
imgBGR = img
if imgBGR.dtype == np.float64:
if imgBGR.max() <= 1:
imgBGR = np.array(imgBGR, dtype=np.float32)
else:
imgBGR = np.array(imgBGR, dtype=np.uint8)
imgRGB = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2RGB)
ax.imshow(imgRGB, **plt_imshow_kwargs)
elif len(img.shape) == 2:
# img is in grayscale
imgGRAY = img
ax.imshow(imgGRAY, cmap=plt.get_cmap('gray'), **plt_imshow_kwargs)
else:
raise Exception('unknown image format')
except TypeError as te:
print('[df2] imshow ERROR %r' % te)
raise
except Exception as ex:
print('[df2] img.dtype = %r' % (img.dtype,))
print('[df2] type(img) = %r' % (type(img),))
print('[df2] img.shape = %r' % (img.shape,))
print('[df2] imshow ERROR %r' % ex)
raise
#plt.set_cmap('gray')
ax.set_xticks([])
ax.set_yticks([])
#ax.set_autoscale(False)
#try:
#if pnum == 111:
#fig.tight_layout()
#except Exception as ex:
#print('[df2] !! Exception durring fig.tight_layout: '+repr(ex))
#raise
return fig, ax
def get_num_channels(img):
ndims = len(img.shape)
if ndims == 2:
nChannels = 1
elif ndims == 3 and img.shape[2] == 3:
nChannels = 3
elif ndims == 3 and img.shape[2] == 1:
nChannels = 1
else:
raise Exception('Cannot determine number of channels')
return nChannels
def stack_images(img1, img2, vert=None):
nChannels = get_num_channels(img1)
nChannels2 = get_num_channels(img2)
assert nChannels == nChannels2
(h1, w1) = img1.shape[0: 2] # get chip dimensions
(h2, w2) = img2.shape[0: 2]
woff, hoff = 0, 0
vert_wh = max(w1, w2), h1 + h2
horiz_wh = w1 + w2, max(h1, h2)
if vert is None:
# Display the orientation with the better (closer to 1) aspect ratio
vert_ar = max(vert_wh) / min(vert_wh)
horiz_ar = max(horiz_wh) / min(horiz_wh)
vert = vert_ar < horiz_ar
if vert:
wB, hB = vert_wh
hoff = h1
else:
wB, hB = horiz_wh
woff = w1
# concatentate images
if nChannels == 3:
imgB = np.zeros((hB, wB, 3), np.uint8)
imgB[0:h1, 0:w1, :] = img1
imgB[hoff:(hoff + h2), woff:(woff + w2), :] = img2
elif nChannels == 1:
imgB = np.zeros((hB, wB), np.uint8)
imgB[0:h1, 0:w1] = img1
imgB[hoff:(hoff + h2), woff:(woff + w2)] = img2
return imgB, woff, hoff
def show_chipmatch2(rchip1, rchip2, kpts1, kpts2, fm=None, fs=None, title=None,
vert=None, fnum=None, pnum=None, **kwargs):
'''Draws two chips and the feature matches between them. feature matches
kpts1 and kpts2 use the (x,y,a,c,d)
'''
printDBG('[df2] draw_matches2() fnum=%r, pnum=%r' % (fnum, pnum))
# get matching keypoints + offset
(h1, w1) = rchip1.shape[0:2] # get chip (h, w) dimensions
(h2, w2) = rchip2.shape[0:2]
# Stack the compared chips
match_img, woff, hoff = stack_images(rchip1, rchip2, vert)
xywh1 = (0, 0, w1, h1)
xywh2 = (woff, hoff, w2, h2)
# Show the stacked chips
fig, ax = imshow(match_img, title=title, fnum=fnum, pnum=pnum)
# Overlay feature match nnotations
draw_fmatch(xywh1, xywh2, kpts1, kpts2, fm, fs, **kwargs)
return ax, xywh1, xywh2
# draw feature match
def draw_fmatch(xywh1, xywh2, kpts1, kpts2, fm, fs=None, lbl1=None, lbl2=None,
fnum=None, pnum=None, rect=False, colorbar_=True, **kwargs):
'''Draws the matching features. This is draw because it is an overlay
xywh1 - location of rchip1 in the axes
xywh2 - location or rchip2 in the axes
'''
if fm is None:
assert kpts1.shape == kpts2.shape, 'shapes different or fm not none'
fm = np.tile(np.arange(0, len(kpts1)), (2, 1)).T
pts = kwargs.get('draw_pts', False)
ell = kwargs.get('draw_ell', True)
lines = kwargs.get('draw_lines', True)
ell_alpha = kwargs.get('ell_alpha', .4)
nMatch = len(fm)
#printDBG('[df2.draw_fnmatch] nMatch=%r' % nMatch)
x1, y1, w1, h1 = xywh1
x2, y2, w2, h2 = xywh2
offset2 = (x2, y2)
# Custom user label for chips 1 and 2
if lbl1 is not None:
absolute_lbl(x1 + w1, y1, lbl1)
if lbl2 is not None:
absolute_lbl(x2 + w2, y2, lbl2)
# Plot the number of matches
if kwargs.get('show_nMatches', False):
upperleft_text('#match=%d' % nMatch)
# Draw all keypoints in both chips as points
if kwargs.get('all_kpts', False):
all_args = dict(ell=False, pts=pts, pts_color=GREEN, pts_size=2,
ell_alpha=ell_alpha, rect=rect)
all_args.update(kwargs)
draw_kpts2(kpts1, **all_args)
draw_kpts2(kpts2, offset=offset2, **all_args)
# Draw Lines and Ellipses and Points oh my
if nMatch > 0:
colors = [kwargs['colors']] * nMatch if 'colors' in kwargs else distinct_colors(nMatch)
if fs is not None:
colors = feat_scores_to_color(fs, 'hot')
acols = add_alpha(colors)
# Helper functions
def _drawkpts(**_kwargs):
_kwargs.update(kwargs)
fxs1 = fm[:, 0]
fxs2 = fm[:, 1]
draw_kpts2(kpts1[fxs1], rect=rect, **_kwargs)
draw_kpts2(kpts2[fxs2], offset=offset2, rect=rect, **_kwargs)
def _drawlines(**_kwargs):
_kwargs.update(kwargs)
draw_lines2(kpts1, kpts2, fm, fs, kpts2_offset=offset2, **_kwargs)
# User helpers
if ell:
_drawkpts(pts=False, ell=True, color_list=colors)
if pts:
_drawkpts(pts_size=8, pts=True, ell=False, pts_color=BLACK)
_drawkpts(pts_size=6, pts=True, ell=False, color_list=acols)
if lines:
_drawlines(color_list=colors)
else:
draw_boxedX(xywh2)
if fs is not None and colorbar_ and 'colors' in vars() and colors is not None:
colorbar(fs, colors)
#legend()
return None
def draw_boxedX(xywh, color=RED, lw=2, alpha=.5, theta=0):
'draws a big red x. redx'
ax = gca()
x1, y1, w, h = xywh
x2, y2 = x1 + w, y1 + h
segments = [((x1, y1), (x2, y2)),
((x1, y2), (x2, y1))]
trans = Affine2D()
trans.rotate(theta)
trans = trans + ax.transData
width_list = [lw] * len(segments)
color_list = [color] * len(segments)
line_group = LineCollection(segments, width_list, color_list, alpha=alpha,
transOffset=trans)
ax.add_collection(line_group)
def disconnect_callback(fig, callback_type, **kwargs):
#print('[df2] disconnect %r callback' % callback_type)
axes = kwargs.get('axes', [])
for ax in axes:
ax._hs_viewtype = ''
cbid_type = callback_type + '_cbid'
cbfn_type = callback_type + '_func'
cbid = fig.__dict__.get(cbid_type, None)
cbfn = fig.__dict__.get(cbfn_type, None)
if cbid is not None:
fig.canvas.mpl_disconnect(cbid)
else:
cbfn = None
fig.__dict__[cbid_type] = None
return cbid, cbfn
def connect_callback(fig, callback_type, callback_fn):
#print('[df2] register %r callback' % callback_type)
if callback_fn is None:
return
cbid_type = callback_type + '_cbid'
cbfn_type = callback_type + '_func'
fig.__dict__[cbid_type] = fig.canvas.mpl_connect(callback_type, callback_fn)
fig.__dict__[cbfn_type] = callback_fn
|
apache-2.0
| -1,678,968,028,501,710,800
| 31.697605
| 119
| 0.588353
| false
| 3.052776
| false
| false
| false
|
silas/rock
|
rock/text.py
|
1
|
1235
|
from __future__ import unicode_literals
def _(text):
return text.strip('\n')
USAGE = _("""
Usage: rock [--help] [--env=ENV] [--path=PATH] [--runtime=RUNTIME] command
""")
HELP = _("""
--help show help message
--verbose show script while running
--dry-run show script without running
--version show version
project:
--env=ENV set env
--path=PATH set path
--runtime=RUNTIME set runtime
commands:
build run build
test run tests
run run in environment
clean clean project files
other commands:
config show project configuration
env show evaluable environment variables
init generates project skeleton
runtime show installed runtimes
""")
CONFIG_USAGE = _("""
Usage: rock config [--format=FORMAT]
""")
CONFIG_HELP = _("""
--help show help message
--format set output format (json, yaml)
""")
ENV_USAGE = _("""
Usage: rock env
""")
ENV_HELP = _("""
--help show help message
""")
RUNTIME_USAGE = _("""
Usage: rock runtime
""")
RUNTIME_HELP = _("""
--help show help message
""")
|
mit
| 5,519,456,527,590,050,000
| 20.293103
| 74
| 0.545749
| false
| 4.116667
| false
| false
| false
|
setsid/yacron
|
yacron/time.py
|
1
|
5052
|
"""
This file is part of yacron.
Copyright (C) 2016 Vadim Kuznetsov <vimusov@gmail.com>
yacron is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
yacron is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with yacron. If not, see <http://www.gnu.org/licenses/>.
"""
class CronTime(object):
"""
Parse and store scheduled time.
"""
def __init__(self, minutes, hours, weekdays):
"""
Parse and store the minutes, hours and weekdays values.
:param minutes: Minutes (str)
:param hours: Hours (str)
:param weekdays: Weekdays (str)
:raise ValueError if any of the values is invalid
"""
self._minutes = self._parse_value(0, minutes, 59)
self._hours = self._parse_value(0, hours, 23)
# slashes are unacceptable in weekdays value
self._weekdays = self._parse_value(1, weekdays, 7, slash_acceptable=False)
@property
def minutes(self):
return self._minutes
@property
def hours(self):
return self._hours
@property
def weekdays(self):
return self._weekdays
def _check_value_range(self, min_value, value, max_value):
"""
Check is value in range.
:param min_value: Minimal valid value
:param value: Value
:param max_value: Maximum valid value
:return True if the value is in range
:raise ValueError if the value is out of range
"""
if not (min_value <= value <= max_value):
raise ValueError("invalid value '{0:d}', must be in [{1:d}..{2:d}]".format(value, min_value, max_value))
return True
def _check_special_chars(self, value):
"""
Check special characters in the value:
1) value can not contains more than one '*' or '/' or '-' characters;
2) special characters can not be mixed (there can be the only one except ',');
:param value: Value.
:raise ValueError if any invalid sequence of special characters found in the value.
"""
all_count = value.count('*')
slash_count = value.count('/')
comma_count = value.count(',')
hyphen_count = value.count('-')
is_invalid = any((
all_count > 1,
slash_count > 1,
hyphen_count > 1,
all_count and (slash_count or comma_count or hyphen_count),
slash_count and (all_count or comma_count or hyphen_count),
comma_count and (all_count or slash_count or hyphen_count),
hyphen_count and (all_count or slash_count or comma_count),
))
if is_invalid:
raise ValueError("invalid format in value '{0:s}'".format(value))
def _parse_value(self, min_value, value, max_value, slash_acceptable=True):
"""
Parse and check a value.
:param min_value: Minimal valid value
:param value: Value
:param max_value: Maximum valid value
:param slash_acceptable: Slash is valid in the value
:return: List of values.
:raise ValueError if parsing failed
"""
self._check_special_chars(value)
if value == '*':
return list(range(min_value, max_value + 1))
if value.startswith('/'):
if not slash_acceptable:
raise ValueError("value '{0:s}' can not contains slash".format(value))
divisor = int(value[1:])
self._check_value_range(min_value, divisor, max_value)
return [n for n in range(min_value, max_value + 1) if n % divisor == 0]
if '-' in value:
start_value, stop_value = map(int, value.split('-'))
self._check_value_range(min_value, start_value, max_value)
self._check_value_range(min_value, stop_value, max_value)
if start_value >= stop_value:
raise ValueError("start value can not be greater or equal to stop value")
return list(range(start_value, stop_value + 1))
if ',' in value:
return [n for n in map(int, value.split(',')) if self._check_value_range(min_value, n, max_value)]
return [int(value)]
def check_time(self, cur_time):
"""
Compare parsed time and current time.
:param cur_time: Current time (datetime).
:return: True if current time matches with parser time and False otherwise
"""
return all((
cur_time.minute in self._minutes,
cur_time.hour in self._hours,
cur_time.isoweekday() in self._weekdays,
))
|
gpl-3.0
| 7,801,523,714,869,108,000
| 35.345324
| 116
| 0.598773
| false
| 4.103981
| false
| false
| false
|
UCSC-iGEM-2016/taris_controller
|
taris_controller/taris_sensor.py
|
1
|
9944
|
#!/usr/bin/python
from __future__ import print_function
import io # used to create file streams
import fcntl # used to access I2C parameters like addresses
import sys
import time # used for sleep delay and timestamps
class Taris_Sensor():
''' This object holds all required interface data for the Atlas Scientific \
EZO pH and RTD sensors. Built off of the base library, with new functions \
added for calibration and additional testing. '''
def __init__(self, address, bus):
# open two file streams, one for reading and one for writing
# the specific I2C channel is selected with bus
# it is usually 1, except for older revisions where it's 0
# wb and rb indicate binary read and write
self.file_read = io.open("/dev/i2c-"+str(bus), "rb", buffering=0)
self.file_write = io.open("/dev/i2c-"+str(bus), "wb", buffering=0)
# initializes I2C to either a user specified or default address
self.set_i2c_address(address)
self.cal_timeout = 1.6 # timeout for calibrations
self.read_timeout = 1.0 # timeout for reads
self.short_timeout = 0.3 # timeout for regular commands
# Set if testing board
self.DEBUG = True
def set_i2c_address(self, addr):
'''Set the I2C communications to the slave specified by the address. \
The commands for I2C dev using the ioctl functions are specified in \
the i2c-dev.h file from i2c-tools'''
I2C_SLAVE = 0x703
fcntl.ioctl(self.file_read, I2C_SLAVE, addr)
fcntl.ioctl(self.file_write, I2C_SLAVE, addr)
def write(self, cmd):
'''Writes a command to the sensor.'''
# appends the null character and sends the string over I2C
cmd += "\00"
self.file_write.write(cmd)
def read(self, num_of_bytes=31,startbit=1):
'''Reads data from the sensor and parses the incoming response.'''
# reads a specified number of bytes from I2C, then parses and displays the result
res = self.file_read.read(num_of_bytes) # read from the board
response = filter(lambda x: x != '\x00', res) # remove the null characters to get the response
if ord(response[0]) == 1: # if the response isn't an error
# change MSB to 0 for all received characters except the first and get a list of characters
char_list = map(lambda x: chr(ord(x) & ~0x80), list(response[startbit:]))
# NOTE: having to change the MSB to 0 is a glitch in the raspberry pi, and you shouldn't have to do this!
return ''.join(char_list) # convert the char list to a string and returns it
else:
return "Error " + str(ord(response[0]))
def query(self, string, start=1):
'''For commands that require a write, a wait, and a response. For instance, \
calibration requires writing an initial CAL command, waiting 300ms, \
then checking for a pass/fail indicator message.'''
# write a command to the board, wait the correct timeout, and read the response
self.write(string)
# the read and calibration commands require a longer timeout
if string.upper().startswith("R"):
time.sleep(self.read_timeout)
elif string.upper().startswith("CAL"):
time.sleep(self.cal_timeout)
else:
time.sleep(self.short_timeout)
return self.read(startbit=start)
def verify(self):
'''Verifies that the sensor is connected, also returns firmware version.'''
device_ID = self.query("I")
if device_ID.startswith("?I"):
print("Connected sensor: " + str(device_ID)[3:])
else:
raw_input("EZO not connected: " + device_ID)
def close(self):
'''Closes the sensor's filestream, not usually required.'''
self.file_read.close()
self.file_write.close()
def getData(self):
'''Gets data from sensor reading as a float.'''
data = self.query("R")
return float(data)
def cal_wait(self, cal_time):
'''UI for waiting for pH sensor to stabilize during calibration'''
x=1
if self.DEBUG == True:
cal_time = 4
while x<cal_time:
if x==1:
sys.stdout.write("Please wait for sensor to stabilize:")
else:
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(1)
x+=1
print('\n')
def pH_calibrateSensor(self):
'''Performs pH sensor calibration using included buffers.'''
# Clear previous calibration data
print("Starting pH sensor calibration...")
q = self.query("Cal,clear", 0)
if str(ord(q)) != '1':
print("Calibration failed with response " + str(q))
time.sleep(2)
return False
# Midpoint calibration. This will also reset previous data.
raw_input("Please rinse probe. Press [Enter] when pH 7 buffer is loaded.")
self.cal_wait(60)
mid_pH = "7.00"
q = self.query("CAL,MID," + mid_pH, 0)
if str(ord(q)) != '1':
print("Calibration failed with response " + str(q))
time.sleep(2)
return False
# Lowpoint calibration
raw_input("Please rinse probe. Press [Enter] when pH 4 buffer is loaded.")
self.cal_wait(60)
low_pH = "4.00"
q = self.query("CAL,LOW," + low_pH, 0)
if str(ord(q)) != '1':
print("Calibration failed with response " + str(q))
time.sleep(2)
return False
# Highpoint calibration
raw_input("Please rinse probe. Press [Enter] when pH 10 buffer is loaded.")
self.cal_wait(60)
high_pH = "10.00"
q = self.query("CAL,HIGH," + high_pH, 0)
if str(ord(q)) != '1':
print("Calibration failed with response " + str(q))
time.sleep(2)
return False
q = str(self.query("Cal,?"))
# Check that 3-point calibration is complete, otherwise return ERROR
if q != "?CAL,3":
print("Three point calibration incomplete!" + str(q))
cal_response = raw_input("Enter 'R' to retry or Enter to exit.")
if cal_response == "R" or cal_response == "r":
self.pH_calibrateSensor()
else:
return False
print("Three point pH calibration complete!")
time.sleep(1)
return True
def temp_calibrateSensor(self):
'''Calibrates the temperature sensor. Requires an external thermometer.'''
print("Clearing previous temperature calibration.")
q = str(ord(self.query("Cal,clear\0x0d", 0)))
if q == "1":
cal_temp = raw_input("Enter room temperature\n>>")
self.cal_wait(5)
q = str(ord(self.query("Cal,"+str(cal_temp) + "\0x0d", 0)))
if q == "1":
q = str(self.query("Cal,?"))
if q == "?CAL,1":
print("One point temperature calibration complete!")
return True
elif q == "?CAL,0":
print("One point temperature calibration incomplete!")
cal_response = raw_input("Enter R to retry or Enter to exit.")
if cal_response == "R" or cal_response == "r":
self.temp_calibrateSensor()
else:
return False
else:
print("Error setting new calibration temperature: " + str(q))
time.sleep(1)
return False
else:
print("Could not set new calibration temperature: " + str(q))
time.sleep(1)
return False
else:
print("Could not clear RTD sensor: " + str(q))
time.sleep(1)
return False
return False
def pH_compensateTemp(self,temp):
'''Compensates the pH sensor for temperature, is used in conjunction with \
a reading from the RTD sensor.'''
comp_status = self.query("T," + str(temp),0)
if str(ord(comp_status)) != '1':
print("Temperature compensation failed!: ")
time.sleep(2)
return False
else:
comp_status = str(self.query("T,?"))
print("Temperature compensation set for: " + comp_status[3:] + u'\xb0' + "C")
time.sleep(2)
return False
def lockProtocol(self,command):
'''Not currently working. Normally used for locking some of the \
internal parameters (e.g. baud rate for UART mode).'''
read_bytes = 9
print("1.\tDisconnect power to device and any signal wires.\n\
2.\tShort PRB to TX.\n\
3.\tTurn device on and wait for LED to change to blue.\n\
4.\tRemove short from PRB to TX, then restart device.\n\
5.\tConnect data lines to Raspberry Pi I2C pins.")
raw_input("Press Enter when this is complete.")
raw_input("Press Enter to prevent further changes to device configuration.")
command_message = "PLOCK," + str(command)
self.sensorQ(command_message)
time.sleep(0.3)
lock_status = self.sensorRead(read_bytes)
if lock_status == "?PLOCK,1":
print("Sensor settings locked.")
return_code = 1
elif lock_status == "?PLOCK,0":
print("Sensor settings unlocked.")
return_code = 0
else:
print("False locking sensor settings.")
return False
return return_code
|
gpl-3.0
| 2,327,405,068,719,227,000
| 38.776
| 117
| 0.559433
| false
| 4.107394
| false
| false
| false
|
lezizi/A-Framework
|
python/local-source/source.py
|
1
|
2324
|
#!/usr/bin/env python
#
# Copyright (C) 2012 LeZiZi Studio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class SourceHandler():
'''
Provides basic source handling.
Property:
source: source object
'''
from base import Source
def __init__(self, source=None):
if source is None:
self.source = self.Source()
else:
self.source = source
def append(self,action):
'''
Append an Action to current source.
Argument:
action: An Action.
Return:
Boolean. True for success and False when action exsisits.
'''
self.source.list.append(action)
def delete(self,act):
'''
Argument:
act: An Action OR a string of action key.
Return:
Boolean. True for success.
'''
if self.source.list.count(act) == 0:
del(self.list[self.list.index(act)])
return(True)
else:
return(False)
def join(self, source):
'''
Copy source form another souce to current source.
'''
for each in source:
if self.list.count(each) == 0 :
self.list.append(each)
def match(self,ingroups=[],outgroups=[],implementation=None,key=None):
### NOT YET IMP ##
pass
def test():
from base import Action
b = Action()
b.key = "1"
c = Action()
c.key = "1"
print(cmp(b,c))
a = SourceHandler()
print(a.append(b))
print(a.append(c))
print(a.source.list)
print(a.delete(b))
#for each in dir(a):
# print(getattr(a,each))
# test()
|
apache-2.0
| -1,190,390,218,465,255,400
| 25.023256
| 76
| 0.547762
| false
| 4.070053
| false
| false
| false
|
lyndsysimon/hgrid-git-example
|
app.py
|
1
|
1874
|
from flask import Flask, jsonify, render_template, request
import json
import os
import tempfile
app = Flask(__name__)
from git_subprocess import Repository
repo_path = '/tmp/test/'
# Set up a git repository for a storage backend
repo = Repository(repo_path or tempfile.mkdtemp())
repo.init()
# Homepage - just render the template
@app.route('/')
def index():
return render_template('index.html')
# DELETE verb
@app.route('/api/files/', methods=['DELETE', ])
def delete_files():
# since multiple items could be deleted at once, iterate the list.
for id in json.loads(request.form.get('ids', '[]')):
repo._rm_file(id)
repo.commit(
author='Internet User <anon@inter.net>',
message='Deleted file(s)',
)
return jsonify({'deleted': request.form.get('ids')})
# GET verb
@app.route('/api/files/', methods=['GET', ])
def get_files():
return jsonify({
'files': [
_file_dict(f)
for f in os.listdir(repo.path)
if os.path.isfile(os.path.join(repo.path, f))
]
})
# POST verb
@app.route('/api/files/', methods=['POST', ])
def add_file():
f = request.files.get('file')
# write the file out to its new location
new_path = os.path.join(repo.path, f.filename)
with open(new_path, 'w') as outfile:
outfile.write(f.read())
# add it to git and commit
repo.add_file(
file_path=f.filename,
commit_author='Internet User <anon@inter.net>',
commit_message='Commited file {}'.format(f.filename)
)
return json.dumps([_file_dict(new_path), ])
def _file_dict(f):
return {
'uid': f,
'name': f,
'size': os.path.getsize(os.path.join(repo.path, f)),
'type': 'file',
'parent_uid': 'null'
}
if __name__ == '__main__':
app.run(debug=True, port=5000)
|
bsd-2-clause
| 4,562,782,882,468,337,000
| 23.337662
| 70
| 0.593917
| false
| 3.432234
| false
| false
| false
|
lliss/tr-55
|
tr55/model.py
|
1
|
14151
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
"""
TR-55 Model Implementation
A mapping between variable/parameter names found in the TR-55 document
and variables used in this program are as follows:
* `precip` is referred to as P in the report
* `runoff` is Q
* `evaptrans` maps to ET, the evapotranspiration
* `inf` is the amount of water that infiltrates into the soil (in inches)
* `init_abs` is Ia, the initial abstraction, another form of infiltration
"""
import copy
from tr55.tablelookup import lookup_cn, lookup_bmp_infiltration, \
lookup_ki, is_bmp, is_built_type, make_precolumbian, get_pollutants
from tr55.water_quality import get_volume_of_runoff, get_pollutant_load
from tr55.operations import dict_plus
def runoff_pitt(precip, land_use):
"""
The Pitt Small Storm Hydrology method. The output is a runoff
value in inches.
"""
c1 = +3.638858398e-2
c2 = -1.243464039e-1
c3 = +1.295682223e-1
c4 = +9.375868043e-1
c5 = -2.235170859e-2
c6 = +0.170228067e+0
c7 = -3.971810782e-1
c8 = +3.887275538e-1
c9 = -2.289321859e-2
p4 = pow(precip, 4)
p3 = pow(precip, 3)
p2 = pow(precip, 2)
impervious = (c1 * p3) + (c2 * p2) + (c3 * precip) + c4
urb_grass = (c5 * p4) + (c6 * p3) + (c7 * p2) + (c8 * precip) + c9
runoff_vals = {
'open_water': impervious,
'developed_low': 0.20 * impervious + 0.80 * urb_grass,
'cluster_housing': 0.20 * impervious + 0.80 * urb_grass,
'developed_med': 0.65 * impervious + 0.35 * urb_grass,
'developed_high': impervious,
'developed_open': urb_grass
}
if land_use not in runoff_vals:
raise Exception('Land use %s not a built-type.' % land_use)
else:
return min(runoff_vals[land_use], precip)
def nrcs_cutoff(precip, curve_number):
"""
A function to find the cutoff between precipitation/curve number
pairs that have zero runoff by definition, and those that do not.
"""
if precip <= -1 * (2 * (curve_number - 100.0) / curve_number):
return True
else:
return False
def runoff_nrcs(precip, evaptrans, soil_type, land_use):
"""
The runoff equation from the TR-55 document. The output is a
runoff value in inches.
"""
if land_use == 'cluster_housing':
land_use = 'developed_low'
curve_number = lookup_cn(soil_type, land_use)
if nrcs_cutoff(precip, curve_number):
return 0.0
potential_retention = (1000.0 / curve_number) - 10
initial_abs = 0.2 * potential_retention
precip_minus_initial_abs = precip - initial_abs
numerator = pow(precip_minus_initial_abs, 2)
denominator = (precip_minus_initial_abs + potential_retention)
runoff = numerator / denominator
return min(runoff, precip - evaptrans)
def simulate_cell_day(precip, evaptrans, cell, cell_count):
"""
Simulate a bunch of cells of the same type during a one-day event.
`precip` is the amount of precipitation in inches.
`evaptrans` is evapotranspiration.
`cell` is a string which contains a soil type and land use
separated by a colon.
`cell_count` is the number of cells to simulate.
The return value is a dictionary of runoff, evapotranspiration, and
infiltration as volumes of water.
"""
def clamp(runoff, et, inf, precip):
"""
This function clamps ensures that runoff + et + inf <= precip.
NOTE: infiltration is normally independent of the
precipitation level, but this function introduces a slight
dependency (that is, at very low levels of precipitation, this
function can cause infiltration to be smaller than it
ordinarily would be.
"""
total = runoff + et + inf
if (total > precip):
scale = precip / total
runoff *= scale
et *= scale
inf *= scale
return (runoff, et, inf)
precip = max(0.0, precip)
soil_type, land_use, bmp = cell.lower().split(':')
# If there is no precipitation, then there is no runoff or
# infiltration. There is evapotranspiration, however (it is
# understood that over a period of time, this can lead to the sum
# of the three values exceeding the total precipitation).
if precip == 0.0:
return {
'runoff-vol': 0.0,
# 'et-vol': cell_count * evaptrans,
'et-vol': 0.0,
'inf-vol': 0.0,
}
# Deal with the Best Management Practices (BMPs). For most BMPs,
# the infiltration is read from the table and the runoff is what
# is left over after infiltration and evapotranspiration. Rain
# gardens are treated differently.
if bmp and is_bmp(bmp) and bmp != 'rain_garden':
inf = lookup_bmp_infiltration(soil_type, bmp) # infiltration
runoff = max(0.0, precip - (evaptrans + inf)) # runoff
(runoff, evaptrans, inf) = clamp(runoff, evaptrans, inf, precip)
return {
'runoff-vol': cell_count * runoff,
'et-vol': cell_count * evaptrans,
'inf-vol': cell_count * inf
}
elif bmp and bmp == 'rain_garden':
# Here, return a mixture of 20% ideal rain garden and 80%
# high-intensity residential.
inf = lookup_bmp_infiltration(soil_type, bmp)
runoff = max(0.0, precip - (evaptrans + inf))
hi_res_cell = soil_type + ':developed_med:'
hi_res = simulate_cell_day(precip, evaptrans, hi_res_cell, 1)
hir_run = hi_res['runoff-vol']
hir_et = hi_res['et-vol']
hir_inf = hi_res['inf-vol']
final_runoff = (0.2 * runoff + 0.8 * hir_run)
final_et = (0.2 * evaptrans + 0.8 * hir_et)
final_inf = (0.2 * inf + 0.8 * hir_inf)
final = clamp(final_runoff, final_et, final_inf, precip)
(final_runoff, final_et, final_inf) = final
return {
'runoff-vol': cell_count * final_runoff,
'et-vol': cell_count * final_et,
'inf-vol': cell_count * final_inf
}
# At this point, if the `bmp` string has non-zero length, it is
# equal to either 'no_till' or 'cluster_housing'.
if bmp and bmp != 'no_till' and bmp != 'cluster_housing':
raise KeyError('Unexpected BMP: %s' % bmp)
land_use = bmp or land_use
# When the land use is a built-type and the level of precipitation
# is two inches or less, use the Pitt Small Storm Hydrology Model.
# When the land use is a built-type but the level of precipitation
# is higher, the runoff is the larger of that predicted by the
# Pitt model and NRCS model. Otherwise, return the NRCS amount.
if is_built_type(land_use) and precip <= 2.0:
runoff = runoff_pitt(precip, land_use)
elif is_built_type(land_use):
pitt_runoff = runoff_pitt(2.0, land_use)
nrcs_runoff = runoff_nrcs(precip, evaptrans, soil_type, land_use)
runoff = max(pitt_runoff, nrcs_runoff)
else:
runoff = runoff_nrcs(precip, evaptrans, soil_type, land_use)
inf = max(0.0, precip - (evaptrans + runoff))
(runoff, evaptrans, inf) = clamp(runoff, evaptrans, inf, precip)
return {
'runoff-vol': cell_count * runoff,
'et-vol': cell_count * evaptrans,
'inf-vol': cell_count * inf,
}
def create_unmodified_census(census):
"""
This creates a cell census, ignoring any modifications. The
output is suitable for use with `simulate_water_quality`.
"""
unmod = copy.deepcopy(census)
unmod.pop('modifications', None)
return unmod
def create_modified_census(census):
"""
This creates a cell census, with modifications, that is suitable
for use with `simulate_water_quality`.
For every type of cell that undergoes modification, the
modifications are indicated with a sub-distribution under that
cell type.
"""
mod = copy.deepcopy(census)
mod.pop('modifications', None)
for (cell, subcensus) in mod['distribution'].items():
n = subcensus['cell_count']
changes = {
'distribution': {
cell: {
'distribution': {
cell: {'cell_count': n}
}
}
}
}
mod = dict_plus(mod, changes)
for modification in (census.get('modifications') or []):
for (orig_cell, subcensus) in modification['distribution'].items():
n = subcensus['cell_count']
soil1, land1 = orig_cell.split(':')
soil2, land2, bmp = modification['change'].split(':')
changed_cell = '%s:%s:%s' % (soil2 or soil1, land2 or land1, bmp)
changes = {
'distribution': {
orig_cell: {
'distribution': {
orig_cell: {'cell_count': -n},
changed_cell: {'cell_count': n}
}
}
}
}
mod = dict_plus(mod, changes)
return mod
def simulate_water_quality(tree, cell_res, fn,
current_cell=None, precolumbian=False):
"""
Perform a water quality simulation by doing simulations on each of
the cell types (leaves), then adding them together by summing the
values of a node's subtrees and storing them at that node.
`tree` is the (sub)tree of cell distributions that is currently
under consideration.
`cell_res` is the size of each cell (used for turning inches of
water into volumes of water).
`fn` is a function that takes a cell type and a number of cells
and returns a dictionary containing runoff, et, and inf as
volumes.
`current_cell` is the cell type for the present node.
"""
# Internal node.
if 'cell_count' in tree and 'distribution' in tree:
n = tree['cell_count']
# simulate subtrees
if n != 0:
tally = {}
for cell, subtree in tree['distribution'].items():
simulate_water_quality(subtree, cell_res, fn,
cell, precolumbian)
subtree_ex_dist = subtree.copy()
subtree_ex_dist.pop('distribution', None)
tally = dict_plus(tally, subtree_ex_dist)
tree.update(tally) # update this node
# effectively a leaf
elif n == 0:
for pol in get_pollutants():
tree[pol] = 0.0
# Leaf node.
elif 'cell_count' in tree and 'distribution' not in tree:
# the number of cells covered by this leaf
n = tree['cell_count']
# canonicalize the current_cell string
split = current_cell.split(':')
if (len(split) == 2):
split.append('')
if precolumbian:
split[1] = make_precolumbian(split[1])
current_cell = '%s:%s:%s' % tuple(split)
# run the runoff model on this leaf
result = fn(current_cell, n) # runoff, et, inf
tree.update(result)
# perform water quality calculation
if n != 0:
soil_type, land_use, bmp = split
runoff_per_cell = result['runoff-vol'] / n
liters = get_volume_of_runoff(runoff_per_cell, n, cell_res)
for pol in get_pollutants():
tree[pol] = get_pollutant_load(land_use, pol, liters)
def postpass(tree):
"""
Remove volume units and replace them with inches.
"""
if 'cell_count' in tree:
if tree['cell_count'] > 0:
n = tree['cell_count']
tree['runoff'] = tree['runoff-vol'] / n
tree['et'] = tree['et-vol'] / n
tree['inf'] = tree['inf-vol'] / n
else:
tree['runoff'] = 0
tree['et'] = 0
tree['inf'] = 0
tree.pop('runoff-vol', None)
tree.pop('et-vol', None)
tree.pop('inf-vol', None)
if 'distribution' in tree:
for subtree in tree['distribution'].values():
postpass(subtree)
def simulate_modifications(census, fn, cell_res, precolumbian=False):
"""
Simulate effects of modifications.
`census` contains a distribution of cell-types in the area of interest.
`fn` is as described in `simulate_water_quality`.
`cell_res` is as described in `simulate_water_quality`.
"""
mod = create_modified_census(census)
simulate_water_quality(mod, cell_res, fn, precolumbian=precolumbian)
postpass(mod)
unmod = create_unmodified_census(census)
simulate_water_quality(unmod, cell_res, fn, precolumbian=precolumbian)
postpass(unmod)
return {
'unmodified': unmod,
'modified': mod
}
def simulate_day(census, precip, cell_res=10, precolumbian=False):
"""
Simulate a day, including water quality effects of modifications.
`census` contains a distribution of cell-types in the area of interest.
`cell_res` is as described in `simulate_water_quality`.
`precolumbian` indicates that artificial types should be turned
into forest.
"""
et_max = 0.207
if 'modifications' in census:
verify_census(census)
def fn(cell, cell_count):
# Compute et for cell type
split = cell.split(':')
if (len(split) == 2):
(land_use, bmp) = split
else:
(_, land_use, bmp) = split
et = et_max * lookup_ki(bmp or land_use)
# Simulate the cell for one day
return simulate_cell_day(precip, et, cell, cell_count)
return simulate_modifications(census, fn, cell_res, precolumbian)
def verify_census(census):
"""
Assures that there is no soil type/land cover pair
in a modification census that isn't in the AoI census.
"""
for modification in census['modifications']:
for land_cover in modification['distribution']:
if land_cover not in census['distribution']:
raise ValueError("Invalid modification census")
|
apache-2.0
| -1,927,152,914,711,812,900
| 33.098795
| 77
| 0.596636
| false
| 3.429714
| false
| false
| false
|
luwei0917/awsemmd_script
|
small_script/computeRg.py
|
1
|
2040
|
from Bio.PDB.PDBParser import PDBParser
import argparse
parser = argparse.ArgumentParser(description="Compute Rg of pdb")
parser.add_argument("pdb", help="pdb file")
args = parser.parse_args()
def computeRg(pdb_file, chain="A"):
# compute Radius of gyration
# pdb_file = f"/Users/weilu/Research/server/feb_2019/iterative_optimization_new_temp_range/all_simulations/{p}/{p}/crystal_structure.pdb"
chain_name = chain
parser = PDBParser()
structure = parser.get_structure('X', pdb_file)
chain = list(structure[0][chain_name])
all_res = list(structure.get_residues())
# n = len(all_res)
# n = len(chain)
regular_res_list = [res for res in all_res if res.get_id()[0] == ' ']
n = len(regular_res_list)
print("all chains")
cutoff = 15
for residue in regular_res_list:
if residue.get_id()[0] == ' ' and abs(residue["CA"].get_vector()[-1]) < cutoff:
print(residue.get_id()[1])
rg = 0.0
for i, residue_i in enumerate(regular_res_list):
for j, residue_j in enumerate(regular_res_list[i+1:]):
try:
r = residue_i["CA"] - residue_j["CA"]
except:
print(residue_i, residue_j)
rg += r**2
return (rg/(n**2))**0.5
rg = computeRg(args.pdb)
print(rg)
def cylindrical_rg_bias_term(oa, k_rg=4.184, rg0=0, atomGroup=-1, forceGroup=27):
nres, ca = oa.nres, oa.ca
if atomGroup == -1:
group = list(range(nres))
else:
group = atomGroup # atomGroup = [0, 1, 10, 12] means include residue 1, 2, 11, 13.
n = len(group)
rg_square = CustomBondForce("1/normalization*(x^2+y^2)")
# rg = CustomBondForce("1")
rg_square.addGlobalParameter("normalization", n*n)
for i in group:
for j in group:
if j <= i:
continue
rg_square.addBond(ca[i], ca[j], [])
rg = CustomCVForce(f"{k_rg}*(rg_square^0.5-{rg0})^2")
rg.addCollectiveVariable("rg_square", rg_square)
rg.setForceGroup(forceGroup)
return rg
|
mit
| 9,124,268,330,187,088,000
| 35.428571
| 141
| 0.59951
| false
| 3.022222
| false
| false
| false
|
Airbitz/airbitz-ofx
|
qbo.py
|
1
|
7851
|
#####################################################################
# #
# File: qbo.py #
# Developer: Justin Leto #
# #
# qbo class provides an interface from main csv iterator method #
# to handle qbo formatting, validations, and writing to file. #
# #
# Usage: python csvtoqbo.py <options> <csvfiles> #
# #
#####################################################################
import sys, traceback
import os
from datetime import datetime
import logging
import qboconst
class qbo:
# Holds a list of valid transactions via the addTransaction() method
__transactions = list()
# The full QBO document build from constants and transactions
__document = None
# Flag indicating whether the QBO document is valid
__isValid = None
# constructor
def __init__(self):
# Reads in constant values from file, set to private (const) variables
self.__HEADER = qboconst.HEADER
self.__FOOTER = qboconst.FOOTER
self.__DATE_START = qboconst.DATE_START
self.__DATE_END = qboconst.DATE_END
self.__BANKTRANLIST_START = qboconst.BANKTRANLIST_START
self.__BANKTRANLIST_END = qboconst.BANKTRANLIST_END
self.__TRANSACTION_START = qboconst.TRANSACTION_START
self.__TRANSACTION_END = qboconst.TRANSACTION_END
# Set document to valid
self.__isValid = True
# PUBLIC GET METHODS for constant values - used in unit testing.
#
#
def getHEADER(self):
return self.__HEADER
def getFOOTER(self):
return self.__FOOTER
def getDATE_START(self):
return self.__DATE_START
def getDATE_END(self):
return self.__DATE_END
def getBANKTRANLIST_START(self):
return self.__BANKTRANLIST_START
def getBANKTRANLIST_END(self):
return self.__BANKTRANLIST_END
def getTRANSACTION_START(self):
return self.__TRANSACTION_START
def getTRANSACTION_END(self):
return self.__TRANSACTION_END
# method to validate paramters used to submit transactions
def validateTransaction(self, status, date_posted, txn_type, to_from_flag, txn_amount, txn_exrate, name):
# if str.lower(status) != 'completed':
# #log status failure
# logging.info("Transaction status [" + status + "] invalid.")
# raise Exception("Transaction status [" + status + "] invalid.")
#
#if type(datetime.strptime(str(date_posted), '%m/%d/%Y')) is not datetime:
# logging.info("Transaction posted date [" + date_posted + "] invalid.")
# raise Exception("Transaction posted date [" + date_posted + "] invalid.")
# if str.lower(txn_type) not in ('payment','refund','withdrawal', 'withdraw funds', 'send', 'receive'):
# logging.info("Transaction type [" + str(txn_type) + "] not 'Payment', 'Refund', 'Withdraw Funds', or 'Withdrawal'.")
# raise Exception("Transaction type [" + str(txn_type) + "] not 'Payment', 'Refund', 'Withdraw Funds', or 'Withdrawal'.")
#
# if str.lower(to_from_flag) not in ('to', 'from'):
# logging.info("Transaction 'To/From' field [" + to_from_flag + "] invalid.")
# raise Exception("Transaction 'To/From' field [" + to_from_flag + "] invalid.")
#
# #logical test of txn_type and to_from_flag
# if ((str.lower(txn_type) == 'refund' and str.lower(to_from_flag) != 'to') or (str.lower(txn_type) == 'payment' and str.lower(to_from_flag) != 'from')):
# logging.info("Transaction type inconsistent with 'To/From' field.")
# raise Exception("Transaction type inconsistent with 'To/From' field.")
#
if len(name) == 0 or not name:
logging.info("Transaction name empty or null.")
raise Exception("Transaction name empty or null.")
return True
# Add transaction takes in param values uses the required formatting QBO transactions
# and pushes to list
def addTransaction(self, denom, date_posted, txn_memo, txn_id, txn_amount, txn_curamt, txn_category, name):
# try:
# # Validating param values prior to committing transaction
# self.validateTransaction(status, date_posted, txn_type, txn_id, txn_amount, name)
# except:
# raise Exception
# Construct QBO formatted transaction
transaction = ""
day = ""
month = ""
date_array = date_posted.split('-')
day = date_array[2]
month = date_array[1]
year = date_array[0]
if len(day) == 1:
day = "0"+day
if len(month) ==1:
month = "0"+month
rec_date = datetime.strptime(year+"/"+month+"/"+day, '%Y/%m/%d')
rec_date = rec_date.strftime('%Y%m%d%H%M%S') + '.000'
dtposted = ' <DTPOSTED>' + rec_date
if float(txn_amount) > 0:
trtype = ' <TRNTYPE>CREDIT'
else:
trtype = ' <TRNTYPE>DEBIT'
#
# if str.lower(txn_type) == 'receive':
# trtype = '<TRNTYPE>CREDIT'
# elif str.lower(txn_type) == 'send':
# trtype = '<TRNTYPE>DEBIT'
# if str.lower(txn_type) in ('refund', 'withdrawal', 'withdraw funds'):
# tramt = '<TRNAMT>-' + str(txn_amount).replace('$','')
# else:
# tramt = '<TRNAMT>' + str(txn_amount).replace('$','')
tramtbits = float(txn_amount) * denom
tramt = ' <TRNAMT>' + str(tramtbits)
if name:
trname = ' <NAME>' + str(name) + "\n"
else:
trname = ''
exrate = float(txn_curamt) / (tramtbits)
curamt = "{0:0.2f}".format(abs(float(txn_curamt)))
fmtexrate = "{0:0.6f}".format(float(exrate))
rawmemo = 'Rate=' + fmtexrate + " USD=" + curamt + " category=\"" + str(txn_category) + "\" memo=\"" + str(txn_memo)
memo = ' <MEMO>' + rawmemo[:253] + "\"\n"
fitid = ' <FITID>' + str(txn_id)
exrate = ' <CURRATE>' + fmtexrate
transaction = ("" + self.__TRANSACTION_START + "\n"
"" + trtype + "\n"
"" + dtposted + "\n"
"" + tramt + "\n"
"" + fitid + "\n"
"" + trname +
"" + memo +
"" + " <CURRENCY>" + "\n"
"" + exrate + "\n"
"" + " <CURSYM>USD" + "\n"
"" + " </CURRENCY>" + "\n"
"" + self.__TRANSACTION_END + "\n")
# Commit transaction to the document by adding to private member list object
self.__transactions.append(transaction)
logging.info("Transaction [" + str(self.getCount()) + "] Accepted.")
return True
# get the current number of valid committed transactions
def getCount(self):
return len(self.__transactions)
# get the valid status of the document
def isValid(self):
# If number of valid transactions are 0 document is invalid
if self.getCount() == 0:
self.__isValid = False
return self.__isValid
# get the text of the document
def getDocument(self):
self.Build()
return self.__document
# Construct the document, add the transactions
# save str into private member variable __document
def Build(self):
if not self.isValid():
logging.info("Error: QBO document is not valid.")
raise Exception("Error: QBO document is not valid.")
self.__document = ("" + self.__HEADER + "\n"
"" + self.__BANKTRANLIST_START + "\n"
"" + self.__DATE_START + "\n"
"" + self.__DATE_END + "\n")
for txn in self.__transactions:
self.__document = self.__document + str(txn)
self.__document = self.__document + ("" + self.__BANKTRANLIST_END + "\n"
"" + self.__FOOTER + "")
# Write QBO document to file
def Write(self, filename):
try:
with open(filename, 'w') as f:
# getDocument method will build document
# test for validity and return string for write
f.write(self.getDocument())
return True
except:
#log io error return False
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print(''.join('!! ' + line for line in lines))
logging.info('qbo.Write() method: '.join('!! ' + line for line in lines))
return False
|
mit
| 1,675,282,766,867,628,800
| 31.126582
| 155
| 0.603235
| false
| 3.155547
| false
| false
| false
|
berkerpeksag/pythondotorg
|
pydotorg/settings/base.py
|
1
|
5943
|
import os
import dj_database_url
### Basic config
BASE = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
DEBUG = TEMPLATE_DEBUG = True
SITE_ID = 1
SECRET_KEY = 'its-a-secret-to-everybody'
# Until Sentry works on Py3, do errors the old-fashioned way.
ADMINS = []
# General project information
# These are available in the template as SITE_INFO.<title>
SITE_VARIABLES = {
'site_name': 'Python.org',
'site_descript': 'The official home of the Python Programming Language',
}
### Databases
DATABASES = {
'default': dj_database_url.config(default='postgres:///python.org')
}
### Locale settings
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DATE_FORMAT = 'Y-m-d'
### Files (media and static)
MEDIA_ROOT = os.path.join(BASE, 'media')
MEDIA_URL = '/m/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.join(BASE, 'static-root')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE, 'static'),
]
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
### Authentication
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend",
)
LOGIN_REDIRECT_URL = 'home'
ACCOUNT_LOGOUT_REDIRECT_URL = 'home'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
SOCIALACCOUNT_EMAIL_REQUIRED = True
SOCIALACCOUNT_EMAIL_VERIFICATION = True
SOCIALACCOUNT_QUERY_EMAIL = True
### Templates
TEMPLATE_DIRS = [
os.path.join(BASE, 'templates')
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.core.context_processors.request",
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
"django.contrib.messages.context_processors.messages",
"pydotorg.context_processors.site_info",
"pydotorg.context_processors.url_name",
]
### URLs, WSGI, middleware, etc.
ROOT_URLCONF = 'pydotorg.urls'
MIDDLEWARE_CLASSES = (
'pydotorg.middleware.AdminNoCaching',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'pages.middleware.PageFallbackMiddleware',
'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
)
AUTH_USER_MODEL = 'users.User'
WSGI_APPLICATION = 'pydotorg.wsgi.application'
### Apps
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.redirects',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.comments',
'django.contrib.admin',
'django.contrib.admindocs',
'django_comments_xtd',
'jsonfield',
'pipeline',
'sitetree',
'timedelta',
'imagekit',
'haystack',
'honeypot',
'users',
'boxes',
'cms',
'companies',
'feedbacks',
'community',
'jobs',
'pages',
'sponsors',
'successstories',
'events',
'minutes',
'peps',
'blogs',
'downloads',
'codesamples',
'allauth',
'allauth.account',
'allauth.socialaccount',
#'allauth.socialaccount.providers.facebook',
#'allauth.socialaccount.providers.github',
#'allauth.socialaccount.providers.openid',
#'allauth.socialaccount.providers.twitter',
# Tastypie needs the `users` app to be already loaded.
'tastypie',
]
# Fixtures
FIXTURE_DIRS = (
os.path.join(BASE, 'fixtures'),
)
### Testing
SKIP_NETWORK_TESTS = True
### Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
### Development
DEV_FIXTURE_URL = 'https://www.python.org/m/fixtures/dev-fixtures.json.gz'
### Comments
COMMENTS_APP = 'django_comments_xtd'
COMMENTS_XTD_MAX_THREAD_LEVEL = 0
COMMENTS_XTD_FORM_CLASS = "jobs.forms.JobCommentForm"
### Honeypot
HONEYPOT_FIELD_NAME = 'email_body_text'
HONEYPOT_VALUE = 'write your message'
### Blog Feed URL
PYTHON_BLOG_FEED_URL = "http://feeds.feedburner.com/PythonInsider"
PYTHON_BLOG_URL = "http://blog.python.org"
### Registration mailing lists
MAILING_LIST_PSF_MEMBERS = "psf-members-announce-request@python.org"
### PEP Repo Location
PEP_REPO_PATH = ''
### Fastly ###
FASTLY_API_KEY = False # Set to Fastly API key in production to allow pages to
# be purged on save
# Jobs
JOB_THRESHOLD_DAYS = 90
JOB_FROM_EMAIL = 'jobs@python.org'
### Pipeline
from .pipeline import (
PIPELINE_CSS, PIPELINE_JS,
PIPELINE_COMPILERS,
PIPELINE_SASS_BINARY, PIPELINE_SASS_ARGUMENTS,
PIPELINE_CSS_COMPRESSOR, PIPELINE_JS_COMPRESSOR,
)
|
apache-2.0
| -7,469,629,725,360,730,000
| 23.557851
| 79
| 0.676931
| false
| 3.346284
| false
| false
| false
|
stevegt/UltimakerUtils
|
leveling-rings-UM1.py
|
1
|
2681
|
#!/usr/bin/python
# Derived from the UM2 version by an anonymous contributor...
#
# http://umforum.ultimaker.com/index.php?/topic/5951-um2-calibration-utility-leveling-ringsgcode/?p=54694
#
# ...who wisely says: "I accept NO liability for any damage done by
# using either version or any derivatives. USE AT YOUR OWN RISK."
filament_diameter = 2.89
build_area_width = 205.0
build_area_depth = 205.0
rings = 10
wide = 0.4
thick = 0.2925 / 2
temperature = 230
bed_temperature = 60
base_dia = 180
pi=3.1415927
center_x = build_area_width/2.0
center_y = build_area_depth/2.0
filament_area = (filament_diameter / 2) ** 2 * pi
head = '''
M107 ;start with the fan off
G21 ;metric values
G90 ;absolute positioning
M82 ;set extruder to absolute mode
M107 ;start with the fan off
G28 X0 Y0 ;move X/Y to min endstops
G28 Z0 ;move Z to min endstops
G1 Z15.0 F9000 ;move the platform down 15mm
M140 S{bed_temperature:.2f} ;set bed temp (no wait)
M109 T0 S{temperature:.2f} ;set extruder temp (wait)
M190 S{bed_temperature:.2f} ;set bed temp (wait)
G92 E0 ;zero the extruded length
G1 F200 E3 ;extrude 3mm of feed stock
G92 E0 ;zero the extruded length again
G1 F9000 ;set speed to 9000
;Put printing message on LCD screen
M117 Printing...
;Layer count: 1
;LAYER:0
'''
loop = '''
G0 F9000 X{x:.2f} Y{y:.2f} Z{z:.2f}
G2 F1000 X{x:.2f} Y{y:.2f} I{r:.2f} E{total_mm3:.2f}'''
tail = '''
;End GCode
M104 S0 ;extruder heater off
M140 S0 ;heated bed heater off (if you have it)
G91 ;relative positioning
G1 E-1 F300 ;retract the filament a bit before lifting the nozzle, to release some of the pressure
G1 Z+0.5 E-5 X-20 Y-20 F9000 ;move Z up a bit and retract filament even more
G28 X0 Y0 ;move X/Y to min endstops, so the head is out of the way
M84 ;steppers off
G90 ;absolute positioning'''
total_mm3 = 0
body = ''
cross_section = thick * wide
z = thick
for i in range(rings):
dia = base_dia - ((wide * 2) * i)
circumference = pi * dia
r = dia/2.0;
x = center_x - r
y = center_y
mm3 = (circumference * cross_section) / filament_area
total_mm3 += mm3
body += loop.format(**vars())
print head.format(**vars())
print body
print tail.format(**vars())
|
gpl-2.0
| 5,621,385,078,935,052,000
| 30.174419
| 118
| 0.564715
| false
| 3.099422
| false
| false
| false
|
udapi/udapi-python
|
udapi/block/ud/complywithtext.py
|
1
|
11648
|
r"""Block ComplyWithText for adapting the nodes to comply with the text.
Implementation design details:
Usually, most of the inconsistencies between tree tokens and the raw text are simple to solve.
However, there may be also rare cases when it is not clear how to align the tokens
(nodes in the tree) with the raw text (stored in ``root.text``).
This block tries to solve the general case using several heuristics.
It starts with running a LCS-like algorithm (LCS = longest common subsequence)
``difflib.SequenceMatcher`` on the raw text and concatenation of tokens' forms,
i.e. on sequences of characters (as opposed to running LCS on sequences of tokens).
To prevent mis-alignment problems, we keep the spaces present in the raw text
and we insert spaces into the concatenated forms (``tree_chars``) according to ``SpaceAfter=No``.
An example of a mis-alignment problem:
text "énfase na necesidade" with 4 nodes "énfase en a necesidade"
should be solved by adding multiword token "na" over the nodes "en" and "a".
However, running LCS (or difflib) over the character sequences
"énfaseenanecesidade"
"énfasenanecesidade"
may result in énfase -> énfas.
Author: Martin Popel
"""
import difflib
import logging
import re
from udapi.core.block import Block
from udapi.core.mwt import MWT
class ComplyWithText(Block):
"""Adapt the nodes to comply with the text."""
def __init__(self, fix_text=True, prefer_mwt=True, allow_goeswith=True, max_mwt_length=4,
**kwargs):
"""Args:
fix_text: After all heuristics are applied, the token forms may still not match the text.
Should we edit the text to match the token forms (as a last resort)? Default=True.
prefer_mwt - What to do if multiple subsequent nodes correspond to a text written
without spaces and non-word characters (punctuation)?
E.g. if "3pm doesn't" is annotated with four nodes "3 pm does n't".
We can use either SpaceAfter=No, or create a multi-word token (MWT).
Note that if there is space or punctuation, SpaceAfter=No will be used always
(e.g. "3 p.m." annotated with three nodes "3 p. m.").
If the character sequence does not match exactly, MWT will be used always
(e.g. "3pm doesn't" annotated with four nodes "3 p.m. does not").
Thus this parameter influences only the "unclear" cases.
Default=True (i.e. prefer multi-word tokens over SpaceAfter=No).
allow_goeswith - If a node corresponds to multiple space-separated strings in text,
which are not allowed as tokens with space, we can either leave this diff
unresolved or create new nodes and join them with the `goeswith` deprel.
Default=True (i.e. add the goeswith nodes if applicable).
max_mwt_length - Maximum length of newly created multi-word tokens (in syntactic words).
Default=4.
"""
super().__init__(**kwargs)
self.fix_text = fix_text
self.prefer_mwt = prefer_mwt
self.allow_goeswith = allow_goeswith
self.max_mwt_length = max_mwt_length
@staticmethod
def allow_space(form):
"""Is space allowed within this token form?"""
return re.fullmatch('[0-9 ]+([,.][0-9]+)?', form)
@staticmethod
def store_orig_form(node, new_form):
"""Store the original form of this node into MISC, unless the change is common&expected."""
_ = new_form
if node.form not in ("''", "``"):
node.misc['OrigForm'] = node.form
def process_tree(self, root):
text = root.text
if text is None:
raise ValueError('Tree %s has no text, cannot use ud.ComplyWithText' % root)
# Normalize the stored text (double space -> single space)
# and skip sentences which are already ok.
text = ' '.join(text.split())
if text == root.compute_text():
return
tree_chars, char_nodes = _nodes_to_chars(root.token_descendants)
# Align. difflib may not give LCS, but usually it is good enough.
matcher = difflib.SequenceMatcher(None, tree_chars, text, autojunk=False)
diffs = list(matcher.get_opcodes())
_log_diffs(diffs, tree_chars, text, 'matcher')
diffs = self.unspace_diffs(diffs, tree_chars, text)
_log_diffs(diffs, tree_chars, text, 'unspace')
diffs = self.merge_diffs(diffs, char_nodes)
_log_diffs(diffs, tree_chars, text, 'merge')
# Solve diffs.
self.solve_diffs(diffs, tree_chars, char_nodes, text)
# Fill SpaceAfter=No.
tmp_text = text
for node in root.token_descendants:
if tmp_text.startswith(node.form):
tmp_text = tmp_text[len(node.form):]
if not tmp_text or tmp_text[0].isspace():
del node.misc['SpaceAfter']
tmp_text = tmp_text.lstrip()
else:
node.misc['SpaceAfter'] = 'No'
else:
logging.warning('Node %s does not match text "%s"', node, tmp_text[:20])
return
# Edit root.text if needed.
if self.fix_text:
computed_text = root.compute_text()
if text != computed_text:
root.add_comment('ToDoOrigText = ' + root.text)
root.text = computed_text
def unspace_diffs(self, orig_diffs, tree_chars, text):
diffs = []
for diff in orig_diffs:
edit, tree_lo, tree_hi, text_lo, text_hi = diff
if edit != 'insert':
if tree_chars[tree_lo] == ' ':
tree_lo += 1
if tree_chars[tree_hi - 1] == ' ':
tree_hi -= 1
old = tree_chars[tree_lo:tree_hi]
new = text[text_lo:text_hi]
if old == '' and new == '':
continue
elif old == new:
edit = 'equal'
elif old == '':
edit = 'insert'
diffs.append((edit, tree_lo, tree_hi, text_lo, text_hi))
return diffs
def merge_diffs(self, orig_diffs, char_nodes):
"""Make sure each diff starts on original token boundary.
If not, merge the diff with the previous diff.
E.g. (equal, "5", "5"), (replace, "-6", "–7")
is changed into (replace, "5-6", "5–7")
"""
diffs = []
for diff in orig_diffs:
edit, tree_lo, tree_hi, text_lo, text_hi = diff
if edit != 'insert' and char_nodes[tree_lo] is not None:
diffs.append(diff)
elif edit == 'equal':
while tree_lo < tree_hi and char_nodes[tree_lo] is None:
tree_lo += 1
text_lo += 1
diffs[-1] = ('replace', diffs[-1][1], tree_lo, diffs[-1][3], text_lo)
if tree_lo < tree_hi:
diffs.append(('equal', tree_lo, tree_hi, text_lo, text_hi))
else:
if not diffs:
diffs = [diff]
elif diffs[-1][0] != 'equal':
diffs[-1] = ('replace', diffs[-1][1], tree_hi, diffs[-1][3], text_hi)
else:
p_tree_hi = diffs[-1][2] - 1
p_text_hi = diffs[-1][4] - 1
while char_nodes[p_tree_hi] is None:
p_tree_hi -= 1
p_text_hi -= 1
assert p_tree_hi >= diffs[-1][1]
assert p_text_hi >= diffs[-1][3]
diffs[-1] = ('equal', diffs[-1][1], p_tree_hi, diffs[-1][3], p_text_hi)
diffs.append(('replace', p_tree_hi, tree_hi, p_text_hi, text_hi))
return diffs
def solve_diffs(self, diffs, tree_chars, char_nodes, text):
for diff in diffs:
edit, tree_lo, tree_hi, text_lo, text_hi = diff
# Focus only on edits of type 'replace', log insertions and deletions as failures.
if edit == 'equal':
continue
if edit in ('insert', 'delete'):
logging.warning('Unable to solve token-vs-text mismatch\n%s',
_diff2str(diff, tree_chars, text))
continue
# Revert the splittng and solve the diff.
nodes = [n for n in char_nodes[tree_lo:tree_hi] if n is not None]
form = text[text_lo:text_hi]
self.solve_diff(nodes, form.strip())
def solve_diff(self, nodes, form):
"""Fix a given (minimal) tokens-vs-text inconsistency."""
nodes_str = ' '.join([n.form for n in nodes]) # just for debugging
node = nodes[0]
# First, solve the cases when the text contains a space.
if ' ' in form:
if len(nodes) == 1 and node.form == form.replace(' ', ''):
if self.allow_space(form):
self.store_orig_form(node, form)
node.form = form
elif self.allow_goeswith:
forms = form.split()
node.form = forms[0]
for split_form in reversed(forms[1:]):
new = node.create_child(form=split_form, deprel='goeswith', upos=node.upos)
new.shift_after_node(node)
else:
logging.warning('Unable to solve 1:m diff:\n%s -> %s', nodes_str, form)
else:
logging.warning('Unable to solve n:m diff:\n%s -> %s', nodes_str, form)
# Second, solve the cases when multiple nodes match one form (without any spaces).
elif len(nodes) > 1:
# If the match is exact, we can choose between MWT ans SpaceAfter solutions.
if not self.prefer_mwt and ''.join([n.form for n in nodes]) == form:
pass # SpaceAfter=No will be added later on.
# If one of the nodes is already a MWT, we cannot have nested MWTs.
# TODO: enlarge the MWT instead of failing.
elif any(isinstance(n, MWT) for n in nodes):
logging.warning('Unable to solve partial-MWT diff:\n%s -> %s', nodes_str, form)
# MWT with too many words are suspicious.
elif len(nodes) > self.max_mwt_length:
logging.warning('Not creating too long (%d>%d) MWT:\n%s -> %s',
len(nodes), self.max_mwt_length, nodes_str, form)
# Otherwise, create a new MWT.
else:
node.root.create_multiword_token(nodes, form)
# Third, solve the 1-1 cases.
else:
self.store_orig_form(node, form)
node.form = form
def _nodes_to_chars(nodes):
chars, char_nodes = [], []
for node in nodes:
form = node.form
if node.misc['SpaceAfter'] != 'No' and node != nodes[-1]:
form += ' '
chars.extend(form)
char_nodes.append(node)
char_nodes.extend([None] * (len(form) - 1))
return ''.join(chars), char_nodes
def _log_diffs(diffs, tree_chars, text, msg):
if logging.getLogger().isEnabledFor(logging.DEBUG):
logging.warning('=== After %s:', msg)
for diff in diffs:
logging.warning(_diff2str(diff, tree_chars, text))
def _diff2str(diff, tree, text):
old = '|' + ''.join(tree[diff[1]:diff[2]]) + '|'
new = '|' + ''.join(text[diff[3]:diff[4]]) + '|'
if diff[0] == 'equal':
return '{:7} {!s:>50}'.format(diff[0], old)
return '{:7} {!s:>50} --> {!s}'.format(diff[0], old, new)
|
gpl-3.0
| 6,423,815,890,427,901,000
| 42.75188
| 99
| 0.559117
| false
| 3.789645
| false
| false
| false
|
storiesofsolidarity/story-database
|
stories/admin.py
|
1
|
1393
|
from django.contrib import admin
from models import Location, Story
from people.models import Author
class LocationAdmin(admin.ModelAdmin):
list_display = ('zipcode', 'city_fmt', 'county_fmt', 'state_fmt', 'story_count')
list_filter = ('state',)
search_fields = ('zipcode', 'city', 'county')
admin.site.register(Location, LocationAdmin)
class EmployerFilter(admin.SimpleListFilter):
title = 'author employer'
parameter_name = 'employer'
def lookups(self, request, model_admin):
employer_set = set()
for a in Author.objects.all():
if a.employer:
employer_set.add(a.employer.split(' ', 1)[0])
return [(str(c), str(c)) for c in employer_set if c]
def queryset(self, request, queryset):
if self.value() or self.value() == 'None':
return queryset.filter(author__employer__startswith=self.value())
else:
return queryset
class StoryAdmin(admin.ModelAdmin):
list_display = ('excerpt', 'author_display', 'employer', 'anonymous', 'created_at')
list_filter = (EmployerFilter, 'location__state', 'truncated')
date_hierarchy = 'created_at'
readonly_fields = ('truncated',)
raw_id_fields = ('author', 'location')
search_fields = ('location__city', 'author__user__first_name', 'author__user__last_name', 'content')
admin.site.register(Story, StoryAdmin)
|
agpl-3.0
| -544,278,433,607,546,560
| 33.825
| 104
| 0.648959
| false
| 3.491228
| false
| false
| false
|
peppelinux/inventario_verdebinario
|
museo/models.py
|
1
|
4183
|
from django.db import models
from photologue.models import ImageModel
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
class Produttore(ImageModel):
id_tabella = models.AutoField(primary_key=True)
nome = models.CharField(max_length=135, blank=True)
nome_abbreviato = models.CharField(max_length=135, blank=True)
#slug = models.SlugField(unique=True, help_text=('"slug": un identificatore automatico e univoco'))
descrizione = models.TextField(max_length=1024, blank=True)
data_nascita = models.DateField(null=True, blank=True)
data_chiusura = models.DateField(null=True, blank=True)
#immagine_logo = models.ImageField(upload_to="LoghiProduttori", blank=True)
url = models.CharField(max_length=256, blank=True)
def save(self, *args, **kwargs):
if self.nome_abbreviato == None or self.nome_abbreviato.split() == []:
self.nome_abbreviato = self.nome.upper()
super(self.__class__, self).save(*args, **kwargs) # Call the "real" save() method.
class Meta:
ordering = ['nome']
db_table = 'produttore'
verbose_name_plural = "Produttore"
# def get_absolute_url(self):
# return '%s' % (self.url)
def __str__(self):
return '%s' % (self.nome_abbreviato)
class SchedaTecnica(models.Model):
id_tabella = models.AutoField(primary_key=True)
modello = models.CharField(max_length=135, blank=True)
produttore = models.ForeignKey(Produttore, null=True, blank=True, on_delete=models.SET_NULL)
paese_di_origine = models.CharField(max_length=135, blank=True)
anno = models.CharField(max_length=135, blank=True)
tastiera = models.CharField(max_length=135, blank=True)
cpu = models.CharField(max_length=135, blank=True)
velocita = models.CharField(max_length=135, blank=True)
memoria_volatile = models.CharField(max_length=135, blank=True)
memoria_di_massa = models.CharField(max_length=135, blank=True)
modalita_grafica = models.CharField(max_length=135, blank=True)
audio = models.CharField(max_length=135, blank=True)
dispositivi_media = models.CharField(max_length=135, blank=True)
alimentazione = models.CharField(max_length=135, blank=True)
prezzo = models.CharField(max_length=135, blank=True)
descrizione = models.TextField(max_length=1024, blank=True)
data_inserimento = models.DateField(null=True, blank=False, auto_now_add=True)
class Meta:
db_table = 'scheda_tecnica'
verbose_name_plural = "Scheda Tecnica"
class FotoHardwareMuseo(ImageModel):
id_tabella = models.AutoField(primary_key=True)
#immagine = models.ImageField(upload_to="FotoHardwareMuseo/%d.%m.%Y", blank=True)
etichetta_verde = models.CharField(max_length=135, blank=True)
data_inserimento = models.DateField(null=True, blank=False, auto_now_add=True)
seriale = models.CharField(max_length=384, blank=True)
didascalia = models.TextField(max_length=328, blank=True)
scheda_tecnica = models.ForeignKey(SchedaTecnica, null=True, blank=True, on_delete=models.SET_NULL)
class Meta:
db_table = 'foto_hardware_museo'
verbose_name_plural = "Foto Hardware Museo"
def __str__(self):
return '%s %s' % (self.seriale, self.scheda_tecnica)
def get_absolute_url(self):
#return '/media/foto/FotoHardwareMuseo/' + self.data_inserimento.strftime('%d.%m.%Y') + '/' + self.image.name
return '/media/%s' % self.image.name
def admin_thumbnail(self):
func = getattr(self, 'get_admin_thumbnail_url', None)
if func is None:
return _('An "admin_thumbnail" photo size has not been defined.')
else:
if hasattr(self, 'get_absolute_url'):
return '<a class="foto_admin_thumbs" target="_blank" href="%s"><img src="%s"></a>' % \
(self.get_absolute_url(), func())
else:
return '<a class="foto_admin_thumbs" target="_blank" href="%s"><img src="%s"></a>' % \
(self.image.url, func())
admin_thumbnail.short_description = _('Thumbnail')
admin_thumbnail.allow_tags = True
|
gpl-3.0
| 3,234,902,780,985,170,000
| 44.967033
| 117
| 0.671528
| false
| 3.220169
| false
| false
| false
|
XtheOne/Inverter-Data-Logger
|
InverterLib.py
|
1
|
3301
|
import socket
import struct
import os
import binascii
import sys
if sys.version[0] == '2':
reload(sys)
sys.setdefaultencoding('cp437')
def getNetworkIp():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.connect(('<broadcast>', 0))
return s.getsockname()[0]
def createV4RequestFrame(logger_sn):
"""Create request frame for inverter logger.
The request string is build from several parts. The first part is a
fixed 4 char string; the second part is the reversed hex notation of
the s/n twice; then again a fixed string of two chars; a checksum of
the double s/n with an offset; and finally a fixed ending char.
Args:
logger_sn (int): Serial number of the inverter
Returns:
str: Information request string for inverter
"""
#frame = (headCode) + (dataFieldLength) + (contrlCode) + (sn) + (sn) + (command) + (checksum) + (endCode)
frame_hdr = binascii.unhexlify('680241b1') #from SolarMan / new Omnik app
command = binascii.unhexlify('0100')
defchk = binascii.unhexlify('87')
endCode = binascii.unhexlify('16')
tar = bytearray.fromhex(hex(logger_sn)[8:10] + hex(logger_sn)[6:8] + hex(logger_sn)[4:6] + hex(logger_sn)[2:4])
frame = bytearray(frame_hdr + tar + tar + command + defchk + endCode)
checksum = 0
frame_bytes = bytearray(frame)
for i in range(1, len(frame_bytes) - 2, 1):
checksum += frame_bytes[i] & 255
frame_bytes[len(frame_bytes) - 2] = int((checksum & 255))
return bytearray(frame_bytes)
def expand_path(path):
"""
Expand relative path to absolute path.
Args:
path: file path
Returns: absolute path to file
"""
if os.path.isabs(path):
return path
else:
return os.path.dirname(os.path.abspath(__file__)) + "/" + path
def getLoggers():
# Create the datagram socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((getNetworkIp(), 48899))
# Set a timeout so the socket does not block indefinitely when trying to receive data.
sock.settimeout(3)
# Set the time-to-live for messages to 1 so they do not go past the local network segment.
ttl = struct.pack('b', 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
SendData = "WIFIKIT-214028-READ" # Lotto/TM = "AT+YZAPP=214028,READ"
gateways = ''
try:
# Send data to the broadcast address
sent = sock.sendto(SendData, ('<broadcast>', 48899))
# Look for responses from all recipients
while True:
try:
data, server = sock.recvfrom(1024)
except socket.timeout:
break
else:
if (data == SendData): continue #skip sent data
a = data.split(',')
wifi_ip, wifi_mac, wifi_sn = a[0],a[1],a[2]
if (len(gateways)>1):
gateways = gateways+','
gateways = gateways+wifi_ip+','+wifi_sn
finally:
sock.close()
return gateways
|
gpl-3.0
| 8,714,258,900,020,281,000
| 33.385417
| 115
| 0.62678
| false
| 3.54565
| false
| false
| false
|
jiaojianbupt/tools
|
project_manager/alias.py
|
1
|
1746
|
# -*- coding: utf-8 -*-
"""
Created by jiaojian at 2018/6/29 16:30
"""
import os
import sys
import termios
from tools.utils.basic_printer import print_with_style, ConsoleColor
HOME = os.environ['HOME']
def get_input():
fd = sys.stdin.fileno()
old_tty_info = termios.tcgetattr(fd)
new_tty_info = old_tty_info[:]
new_tty_info[3] &= ~termios.ICANON
new_tty_info[3] &= ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, new_tty_info)
answer = os.read(fd, 1)
termios.tcsetattr(fd, termios.TCSANOW, old_tty_info)
return answer
def add_alias():
if sys.platform == 'darwin':
bash_profile_name = '.bash_profile'
else:
bash_profile_name = '.bashrc'
linux_bash_profile_path = os.path.join(HOME, bash_profile_name)
exec_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'main.py')
alias = 'alias updateall="python %s"' % exec_file_path
if os.path.exists(linux_bash_profile_path):
with open(linux_bash_profile_path, 'rw') as bashrc_file:
bash_profile = bashrc_file.read()
if bash_profile.find(alias) >= 0:
return
answer = ''
while not answer or answer not in {'y', 'n'}:
print_with_style('Add \'%s\' to your %s?(y/n)' % (alias, bash_profile_name), color=ConsoleColor.YELLOW)
answer = get_input()
if answer == 'n':
return
elif answer == 'y':
break
bash_profile = bash_profile + '\n' + alias
with open(linux_bash_profile_path, 'w') as bashrc_file:
bashrc_file.write(bash_profile)
print_with_style('Alias added.', color=ConsoleColor.YELLOW)
|
gpl-3.0
| -6,002,306,353,356,231
| 35.375
| 119
| 0.587056
| false
| 3.332061
| false
| false
| false
|
dapengchen123/code_v1
|
reid/datasets/market1501.py
|
1
|
3563
|
from __future__ import print_function, absolute_import
import os.path as osp
from ..utils.data import Dataset
from ..utils.osutils import mkdir_if_missing
from ..utils.serialization import write_json
class Market1501(Dataset):
url = 'https://drive.google.com/file/d/0B8-rUzbwVRk0c054eEozWG9COHM/view'
md5 = '65005ab7d12ec1c44de4eeafe813e68a'
def __init__(self, root, split_id=0, num_val=0.3, download=False):
super(Market1501, self).__init__(root, split_id=split_id)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. " +
"You can use download=True to download it.")
self.load(num_val)
def download(self):
if self._check_integrity():
print("Files already downloaded and verified")
return
import re
import hashlib
import shutil
from glob import glob
from zipfile import ZipFile
raw_dir = osp.join(self.root, 'raw')
mkdir_if_missing(raw_dir)
# Download the raw zip file
fpath = osp.join(raw_dir, 'Market-1501-v15.09.15.zip')
if osp.isfile(fpath) and \
hashlib.md5(open(fpath, 'rb').read()).hexdigest() == self.md5:
print("Using downloaded file: " + fpath)
else:
raise RuntimeError("Please download the dataset manually from {} "
"to {}".format(self.url, fpath))
# Extract the file
exdir = osp.join(raw_dir, 'Market-1501-v15.09.15')
if not osp.isdir(exdir):
print("Extracting zip file")
with ZipFile(fpath) as z:
z.extractall(path=raw_dir)
# Format
images_dir = osp.join(self.root, 'images')
mkdir_if_missing(images_dir)
# 1501 identities (+1 for background) with 6 camera views each
identities = [[[] for _ in range(6)] for _ in range(1502)]
def register(subdir, pattern=re.compile(r'([-\d]+)_c(\d)')):
fpaths = sorted(glob(osp.join(exdir, subdir, '*.jpg')))
pids = set()
for fpath in fpaths:
fname = osp.basename(fpath)
pid, cam = map(int, pattern.search(fname).groups())
if pid == -1: continue # junk images are just ignored
assert 0 <= pid <= 1501 # pid == 0 means background
assert 1 <= cam <= 6
cam -= 1
pids.add(pid)
fname = ('{:08d}_{:02d}_{:04d}.jpg'
.format(pid, cam, len(identities[pid][cam])))
identities[pid][cam].append(fname)
shutil.copy(fpath, osp.join(images_dir, fname))
return pids
trainval_pids = register('bounding_box_train')
gallery_pids = register('bounding_box_test')
query_pids = register('query')
assert query_pids <= gallery_pids
assert trainval_pids.isdisjoint(gallery_pids)
# Save meta information into a json file
meta = {'name': 'Market1501', 'shot': 'multiple', 'num_cameras': 6,
'identities': identities}
write_json(meta, osp.join(self.root, 'meta.json'))
# Save the only training / test split
splits = [{
'trainval': sorted(list(trainval_pids)),
'query': sorted(list(query_pids)),
'gallery': sorted(list(gallery_pids))}]
write_json(splits, osp.join(self.root, 'splits.json'))
|
mit
| -2,535,048,846,858,501,600
| 36.505263
| 78
| 0.561605
| false
| 3.782378
| false
| false
| false
|
glenflet/ZtoRGBpy
|
ZtoRGBpy/_info.py
|
1
|
2082
|
# -*- coding: utf-8 -*-
# =================================================================================
# Copyright 2019 Glen Fletcher <mail@glenfletcher.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# All documentation this file as docstrings or comments are licensed under the
# Creative Commons Attribution-ShareAlike 4.0 International License; you may
# not use this documentation except in compliance with this License.
# You may obtain a copy of this License at
#
# https://creativecommons.org/licenses/by-sa/4.0
#
# =================================================================================
"""
ZtoRGB information definition module
Special private module used for automatic processing, and inclusion
.. moduleauthor:: Glen Fletcher <mail@glenfletcher.com>
"""
__authors__ = [
("Glen Fletcher", "mail@glenfletcher.com")]
__copyright__ = "2019 Glen Fletcher"
__license__ = """\
The source code for this package is licensed under the [Apache 2.0 License](http://www.apache.org/licenses/LICENSE-2.0),
while the documentation including docstrings and comments embedded in the source code are licensed under the
[Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0)
"""
__contact__ = "Glen Fletcher <mail@glenfletcher.com>"
__version__ = "2.0"
__title__ = "ZtoRGBpy"
__desc__ = """\
Complex number to perceptually uniform RGB subset mapping library"""
__all__ = [
'__authors__', '__copyright__', '__license__',
'__contact__', '__version__', '__title__',
'__desc__']
|
mit
| -2,552,345,746,239,531,500
| 40.64
| 120
| 0.662344
| false
| 3.869888
| false
| false
| false
|
ElecProg/decmath
|
decmath/trig.py
|
1
|
4598
|
from decimal import getcontext, Decimal
from decmath import _pi, _to_Decimal, sign
# Trigonometric functions
def acos(x):
"""Return the arc cosine (measured in radians) of x."""
x = _to_Decimal(x)
if x.is_nan():
return Decimal("NaN")
elif abs(x) > 1:
raise ValueError("Domain error: acos accepts -1 <= x <= 1.")
elif x == -1:
return _pi()
elif x == 0:
return _pi() / 2
elif x == 1:
return Decimal(0)
getcontext().prec += 2
one_half = Decimal('0.5')
i, lasts, s, gamma, fact, num = Decimal(0), 0, _pi() / 2 - x, 1, 1, x
while s != lasts:
lasts = s
i += 1
fact *= i
num *= x * x
gamma *= i - one_half
coeff = gamma / ((2 * i + 1) * fact)
s -= coeff * num
getcontext().prec -= 2
return +s
def asin(x):
"""Return the arc sine (measured in radians) of x."""
x = _to_Decimal(x)
if x.is_nan():
return Decimal("NaN")
elif abs(x) > 1:
raise ValueError("Domain error: asin accepts -1 <= x <= 1.")
elif x == -1:
return -_pi() / 2
elif x == 0:
return Decimal(0)
elif x == 1:
return _pi() / 2
getcontext().prec += 2
one_half = Decimal('0.5')
i, lasts, s, gamma, fact, num = Decimal(0), 0, x, 1, 1, x
while s != lasts:
lasts = s
i += 1
fact *= i
num *= x * x
gamma *= i - one_half
coeff = gamma / ((2 * i + 1) * fact)
s += coeff * num
getcontext().prec -= 2
return +s
def atan(x):
"""Return the arc tangent (measured in radians) of x."""
x = _to_Decimal(x)
if x.is_nan():
return Decimal("NaN")
elif x == Decimal('-Inf'):
return -_pi() / 2
elif x == 0:
return Decimal(0)
elif x == Decimal('Inf'):
return _pi() / 2
if x < -1:
c = _pi() / -2
x = 1 / x
elif x > 1:
c = _pi() / 2
x = 1 / x
else:
c = 0
getcontext().prec += 2
x_squared = x**2
y = x_squared / (1 + x_squared)
y_over_x = y / x
i, lasts, s, coeff, num = Decimal(0), 0, y_over_x, 1, y_over_x
while s != lasts:
lasts = s
i += 2
coeff *= i / (i + 1)
num *= y
s += coeff * num
if c:
s = c - s
getcontext().prec -= 2
return +s
def atan2(y, x):
"""Return the arc tangent (measured in radians) of y/x.
Unlike atan(y/x), the signs of both x and y are considered."""
y = _to_Decimal(y)
x = _to_Decimal(x)
abs_y = abs(y)
abs_x = abs(x)
y_is_real = abs_y != Decimal('Inf')
if y.is_nan() or x.is_nan():
return Decimal("NaN")
if x:
if y_is_real:
a = y and atan(y / x) or Decimal(0)
if x < 0:
a += sign(y) * _pi()
return a
elif abs_y == abs_x:
x = sign(x)
y = sign(y)
return _pi() * (Decimal(2) * abs(x) - x) / (Decimal(4) * y)
if y:
return atan(sign(y) * Decimal('Inf'))
elif sign(x) < 0:
return sign(y) * _pi()
else:
return sign(y) * Decimal(0)
def cos(x):
"""Return the cosine of x as measured in radians."""
x = _to_Decimal(x) % (2 * _pi())
if x.is_nan():
return Decimal('NaN')
elif x == _pi() / 2 or x == 3 * _pi() / 2:
return Decimal(0)
getcontext().prec += 2
i, lasts, s, fact, num, sign = 0, 0, 1, 1, 1, 1
while s != lasts:
lasts = s
i += 2
fact *= i * (i - 1)
num *= x * x
sign *= -1
s += num / fact * sign
getcontext().prec -= 2
return +s
def hypot(x, y):
"""Return the Euclidean distance, sqrt(x*x + y*y)."""
return (_to_Decimal(x).__pow__(2) + _to_Decimal(y).__pow__(2)).sqrt()
def sin(x):
"""Return the sine of x as measured in radians."""
x = _to_Decimal(x) % (2 * _pi())
if x.is_nan():
return Decimal('NaN')
elif x == 0 or x == _pi():
return Decimal(0)
getcontext().prec += 2
i, lasts, s, fact, num, sign = 1, 0, x, 1, x, 1
while s != lasts:
lasts = s
i += 2
fact *= i * (i - 1)
num *= x * x
sign *= -1
s += num / fact * sign
getcontext().prec -= 2
return +s
def tan(x):
"""Return the tangent of x (measured in radians)."""
x = _to_Decimal(x)
if x.is_nan():
return Decimal('NaN')
elif x == _pi() / 2:
return Decimal('Inf')
elif x == 3 * _pi() / 2:
return Decimal('-Inf')
return sin(x) / cos(x)
|
mit
| -8,599,662,519,578,526,000
| 22.579487
| 73
| 0.45933
| false
| 3.102564
| false
| false
| false
|
martindurant/astrobits
|
time_series.py
|
1
|
12543
|
"""Take a list of files and known star coordinates, and
perform photometry on them all, either with apertures (phot)
or by PSF fitting (daophot, which required additional
parameters and is apropriate to poor S/N or crowded fields).
Makes extensive use of iraf tasks; set all photometry parameters
before running:
datapars - for data characteristics
centerpars - finding the reference star on each image.
centerpars, photpars, fitskypars - for controling aperture photometry
daopars - for controling daophot
filelist: set of image files, in IRAF syntax (image.fits[1][*,*,2] etc);
can be more than one per cube.
coords: name a file containing all star coords for photometry, based on
an image unshifted relative to (0,0) in the shifts list. Be pure numbers
for phot method, .mag or .als for daophot method.
shifts: name a file containing shifts, a tuple of shifts arrays, image
header keywords (tuple of two= or None for no shifts
refstar: coords of star for deriving (x,y) offset, as in coords
timestamp: source of the timing information: a header keyword, delta-t
for uniform sampling or a file with times (in whatever formate you'll be
using later.
psf: whether to use daophot or aperture phot for analysis. If this is a
filename, that is the PSF profile to use for every image; if it is "True",
make a new PSF for every image. Pars below only for full PSF fitting
pststars: a .pst file from daophot, listing the IDs of stars for making
the PSF for each image. NB: DAOphot refuses to measure any star with SNR<2.
ids: which stars are interesting, by ID (in input coord list order)
coords: starting well-measured coords (pdump-ed from a .als, perhaps).
"""
import os
import numpy
from glob import glob
import pyfits
from pylab import find
from numpy import load,vstack,save,median
thisdir = os.getcwd()
os.chdir("/home/durant")
from pyraf import iraf
iraf.cd(thisdir)
iraf.digiphot()
iraf.daophot()
import pyraf
import pyfits
import numpy as n
def shift_file_coords(filename,xshift,yshift,output,sort=None):
"""Understands filetypes: 2-column ascii numbers, .mag, .als, .pst.
NB: shift means where each image is, relative to the original (not where
it should be moved to).
"""
if not(sort):
sort = 'num'
if filename.find('.mag')>0: sort = 'mag'
if filename.find('.als')>0: sort = 'als'
if filename.find('.pst')>0: sort = 'pst'
if not(sort=='num' or sort=='mag' or sort=='als' or sort=='pst'):
raise ValueError('Unknown input filetype: %s'%filename)
if sort=='num': # shift 2-column numeric ASCII table
x,y = load(filename,usecols=[0,1],unpack=True)
x += xshift
y += yshift
X = vstack((x,y))
save(output,X.transpose())
return
if sort=='mag': #shift a .mag photometry file
fred = open(filename)
freda= open(output,'w')
for line in fred:
if line.split()[-1]=='\\' and len(line.split())==9 and line[0]!='#':
x = float(line.split()[0]) + xshift
y = float(line.split()[1]) + yshift
line = "%-14.3f %-11.3f"%(x,y)+line[21:]
freda.write(line)
if sort=='als': #shift a .als DAOphot photometry file
fred = open(filename)
freda= open(output,'w')
for line in fred:
if line.split()[-1]=='\\' and len(line.split())==8 and line[0]!='#':
x = float(line.split()[1]) + xshift
y = float(line.split()[2]) + yshift
line = line[:9] + "%-10.3f %-10.3f"%(x,y) + line[29:]
freda.write(line)
if sort=='pst': #shift a PSF star list for DAOphot
fred = open(filename)
freda= open(output,'w')
for line in fred:
if line[0]!="#":
x = float(line.split()[1]) + xshift
y = float(line.split()[2]) + yshift
line = line[:9] + "%-10.3f %-10.3f"%(x,y) + line[29:]
freda.write(line)
fred.close()
freda.close()
def recentre(image,refcoordfile):
"""Returns improved shift by centroiding
on the reference star using phot. This can be VERY
sensitive to the parameters in centerpars."""
xin,yin = load(refcoordfile,unpack=True)
try:
iraf.phot(image,refcoordfile,'temp.mag',inter="no",calgorithm='centroid',
mode='h',verify='no',update='no',verbose='no')
xout,yout=iraf.pdump('temp.mag','xcen,ycen','yes',Stdout=1)[0].split()
except:
print "Recentring failed on", image
return 0.,0.
xout,yout = float(xout),float(yout)
return xout-xin,yout-yin
vary_par = 1.
vary_max = 10
vary_min = 6
vary_fwhm= 0
def setaperture(image,refstar):
"""Measure the FWHM of the reference star unsing simple DAOphot editor
and then set the photometry aperture to this number"""
x,y = load(refstar,unpack=True)
fred = open('tempaperfile','w')
fred.write("%f %f 100 a\nq"%(x,y))
fred.close()
try:
output=iraf.daoedit(image,icomm='tempaperfile',Stdout=1,Stderr=1)
except:
print "Aperture setting failed on",image
return
FWHM = float(output[3].split()[4])
iraf.photpars.apertures = min(max(FWHM*vary_par,vary_min),vary_max)
iraf.daopars.fitrad = min(max(FWHM*vary_par,vary_min),vary_max)
global vary_fwhm
vary_fwhm = FWHM
print "FWHM: ", FWHM, " aperture: ",iraf.photpars.apertures
def apphot(image,coords,refstar=None,centre=False,vary=False):
"""Apperture photometry with centering based on a reference star.
NB: centre refers to shifting the coordinates by centroiding on the
reference star; recentering on the final phot depends on
centerpars.calgorithm ."""
iraf.dele('temp.mag*')
if centre:
xsh,ysh = recentre(image,refstar)
print "Fine centring: ", xsh,ysh
else: #no recentreing by reference star (but could still have calgorithm!=none)
xsh,ysh = 0,0
if vary:
setaperture(image,refstar)
shift_file_coords(coords,xsh,ysh,'tempcoords')
iraf.phot(image,'tempcoords','temp.mag2',inter="no",
mode='h',verify='no',update='no',verbose='no')
out = iraf.pdump('temp.mag2','id,flux,msky,stdev','yes',Stdout=1)
return out
def psfphot(image,coords,pststars,refstar,centre=True,vary=False):
"""PSF photometry. Centering is through phot on refstar.
Assume coords is a .als file for now. Recentering is always done
for the reference star, never for the targets."""
iraf.dele('temp.mag*')
iraf.dele('temp.psf.fits')
iraf.dele('temp.als')
if centre:
xsh,ysh = recentre(image,refstar)
print "Fine Centring: ", xsh,ysh
else: xsh,ysh = 0,0
if vary:
setaperture(image,refstar)
shift_file_coords(coords,xsh,ysh,'tempcoords2',sort='als')
shift_file_coords(pststars,xsh,ysh,'temppst2',sort='pst')
iraf.phot(image,'tempcoords2','temp.mag2',inter="no",calgorithm='none',
mode='h',verify='no',update='no',verbose='no')
iraf.psf(image,'temp.mag2','temppst2','temp.psf','temp.mag.pst','temp.mag.psg',
inter='no',mode='h',verify='no',update='no',verbose='no')
iraf.allstar(image,'temp.mag2','temp.psf','temp.als','temp.mag.arj',"default",
mode='h',verify='no',update='no',verbose='no')
out = iraf.pdump('temp.als','id,mag,merr,msky','yes',Stdout=1)
return out
def simplepsfphot(image,coords,psf,refstar,centre=True,vary=False):
"""PSF photometry, with a given PSF file in psf used for every image"""
iraf.dele('temp.mag*')
iraf.dele('temp.als')
iraf.dele('temp.sub.fits')
if centre:
xsh,ysh = recentre(image,refstar)
print "Fine Centring: ", xsh,ysh
else: xsh,ysh = 0,0
if vary:
setaperture(image,refstar)
shift_file_coords(coords,xsh,ysh,'tempcoords2',sort='als')
iraf.phot(image,'tempcoords2','temp.mag2',inter="no",calgorithm='none',
mode='h',verify='no',update='no',verbose='no')
iraf.allstar(image,'temp.mag2',psf,'temp.als','temp.mag.arj','temp.sub.fits',
mode='h',verify='no',update='no',verbose='no')
out = iraf.pdump('temp.als','id,mag,merr,msky','yes',Stdout=1)
return out
def custom1(filename): # for NACO timing mode cubes - removes horizontal banding
#iraf.imarith(filename,'-','dark','temp')
iraf.imarith(filename,'/','flatK','temp')
im = pyfits.getdata('temp.fits')
med = median(im.transpose())
out = ((im).transpose()-med).transpose()
(pyfits.ImageHDU(out)).writeto("temp2.fits",clobber=True)
iraf.imdel('temp')
iraf.imcopy('temp2[1]','temp')
def get_id(starid,output='output'):
"""from the output of the photometry, grab the magnitudes and magerrs of starid"""
mag = load(output,usecols=[4+starid*4])
merr= load(output,usecols=[5+starid*4])
return mag,merr
def run(filelist,coords,refstar,shifts=None,centre=False,psf=False,pststars=None,
ids=None,dark=0,flat=1,timestamp="TIME",output='output',custom_process=None,
vary=False):
"""If psf==True, must include all extra par files.
If PSF is a filename (.psf.fits), this profileis used to fit every image.
Timestamp can be either a file of times (same length as filelist), a header
keyword, or an array of times.
The input list can include [] notation for multiple extensions or sections
of each file (incompatible with header-based time-stamps).
custom_process(file) is a function taking a filename (possible including [x]
syntax) and places a processed image in temp.fits."""
output = open(output,'w')
x = load(coords,usecols=[1])
numstars = len(x)
myfiles = open(filelist).readlines()
myfiles = [myfiles[i][:-1] for i in range(len(myfiles))]
if timestamp.__class__ == numpy.ndarray: #--sort out times--
times = 1 #times=1 means we know the times beforehand
elif len(glob(timestamp))>0:
timestamp = load(timestamp,usecols=[0])
times=1
else:
times=0 #times=0 mean find the time from each image
if type(shifts)==type(" "): #--sort out shifts--
xshifts,yshifts = load(shifts,unpack=True)#filename give, assuming 2 columns
xshifts,yshifts = -xshifts,-yshifts #these are in the opposite sense to coords from stack
elif n.iterable(shifts):
xshifts=n.array(shifts[0]) #for shifts given as arrays/lists
yshifts=n.array(shifts[1])
else:
print "No shifts" #assume all shifts are zero
xshifts = n.zeros(len(myfiles))
yshifts = n.zeros(len(myfiles))
for i,thisfile in enumerate(myfiles): #run!
print i,thisfile
if times:
time = timestamp[i] #known time
else:
time = pyfits.getval(thisfile,timestamp) #FITS keyword
try:
iraf.dele('temp.fits')
if custom_process: #arbitrary subroutine to process a file -> temp.fits
custom_process(thisfile)
else: #typical dark/bias subtract and flatfield
iraf.imarith(thisfile,'-',dark,'temp')
iraf.imarith('temp','/',flat,'temp')
shift_file_coords(coords,xshifts[i],yshifts[i],'tempcoords') #apply coarse shifts
shift_file_coords(refstar,xshifts[i],yshifts[i],'tempref',sort='num')
if psf:
if psf is True: #full PSF fit
shift_file_coords(pststars,xshifts[i],yshifts[i],'temppst')
out=psfphot('temp.fits','tempcoords','temppst','tempref',centre,vary)
else: #DAOphot with known PSF
out=simplepsfphot('temp.fits','tempcoords',psf,'tempref',centre,vary)
else: #aperture photometry
out=apphot('temp.fits','tempcoords','tempref',centre,vary=vary)
output.write("%s %s %s "%(thisfile,time,vary_fwhm))
myids = n.array([int(out[i].split()[0]) for i in range(len(out))])
for i in ids or range(numstars):
try: #search for each requested ID
foundid = find(myids==i)[0]
output.write(out[foundid]+" ")
except: #ID not found
output.write(" 0 0 0 0 ")
output.write("\n")
except KeyboardInterrupt: #exit on Ctrl-C
break
except pyraf.irafglobals.IrafError, err:
print "IRAF error ",err,thisfile
break
except ValueError, err:
print "Value error ",err,thisfile
raise
output.close()
#iraf.dele('temp*')
|
mit
| -5,665,480,579,238,870,000
| 42.251724
| 97
| 0.63358
| false
| 3.20957
| false
| false
| false
|
freelawproject/recap-server
|
settings.py
|
1
|
1377
|
"""Settings are derived by compiling any files ending in .py in the settings
directory, in alphabetical order.
This results in the following concept:
- default settings are in 10-public.py (this should contain most settings)
- custom settings are in 05-private.py (an example of this file is here for
you)
- any overrides to public settings can go in 20-private.py (you'll need to
create this)
"""
from __future__ import with_statement
import os
import glob
import sys
def _generate_secret_key(file_path):
import random
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
def random_char():
return chars[int(len(chars)*random.random())]
rand_str = ''.join(random_char() for i in range(64))
with open(file_path, 'w') as f:
f.write('SECRET_KEY=%s\n' % repr(rand_str))
ROOT_PATH = os.path.dirname(__file__)
# Try importing the SECRET_KEY from the file secret_key.py. If it doesn't exist,
# there is an import error, and the key is generated and written to the file.
try:
from secret_key import SECRET_KEY
except ImportError:
_generate_secret_key(os.path.join(ROOT_PATH, 'secret_key.py'))
from secret_key import SECRET_KEY
# Load the conf files.
conf_files = glob.glob(os.path.join(
os.path.dirname(__file__), 'settings', '*.py'))
conf_files.sort()
for f in conf_files:
execfile(os.path.abspath(f))
|
gpl-3.0
| 8,784,527,857,870,266,000
| 31.023256
| 80
| 0.697168
| false
| 3.451128
| false
| false
| false
|
mxamin/youtube-dl
|
youtube_dl/extractor/criterion.py
|
1
|
1284
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class CriterionIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?criterion\.com/films/(?P<id>[0-9]+)-.+'
_TEST = {
'url': 'http://www.criterion.com/films/184-le-samourai',
'md5': 'bc51beba55685509883a9a7830919ec3',
'info_dict': {
'id': '184',
'ext': 'mp4',
'title': 'Le Samouraï',
'description': 'md5:a2b4b116326558149bef81f76dcbb93f',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
final_url = self._search_regex(
r'so.addVariable\("videoURL", "(.+?)"\)\;', webpage, 'video url')
title = self._og_search_title(webpage)
description = self._html_search_meta('description', webpage)
thumbnail = self._search_regex(
r'so.addVariable\("thumbnailURL", "(.+?)"\)\;',
webpage, 'thumbnail url')
return {
'id': video_id,
'url': final_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
|
unlicense
| -7,290,849,255,959,012,000
| 30.292683
| 77
| 0.535464
| false
| 3.439678
| false
| false
| false
|
qedsoftware/commcare-hq
|
custom/opm/constants.py
|
1
|
1732
|
from corehq.apps.fixtures.models import FixtureDataItem
from corehq.util.quickcache import quickcache
DOMAIN = 'opm'
PREG_REG_XMLNS = "http://openrosa.org/formdesigner/D127C457-3E15-4F5E-88C3-98CD1722C625"
VHND_XMLNS = "http://openrosa.org/formdesigner/ff5de10d75afda15cddb3b00a0b1e21d33a50d59"
BIRTH_PREP_XMLNS = "http://openrosa.org/formdesigner/50378991-FEC3-408D-B4A5-A264F3B52184"
DELIVERY_XMLNS = "http://openrosa.org/formdesigner/492F8F0E-EE7D-4B28-B890-7CDA5F137194"
CHILD_FOLLOWUP_XMLNS = "http://openrosa.org/formdesigner/C90C2C1F-3B34-47F3-B3A3-061EAAC1A601"
CFU1_XMLNS = "http://openrosa.org/formdesigner/d642dd328514f2af92c093d414d63e5b2670b9c"
CFU2_XMLNS = "http://openrosa.org/formdesigner/9ef423bba8595a99976f0bc9532617841253a7fa"
CFU3_XMLNS = "http://openrosa.org/formdesigner/f15b9f8fb92e2552b1885897ece257609ed16649"
GROWTH_MONITORING_XMLNS= "http://openrosa.org/formdesigner/F1356F3F-C695-491F-9277-7F9B5522200C"
CLOSE_FORM = "http://openrosa.org/formdesigner/41A1B3E0-C1A4-41EA-AE90-71A328F0D8FD"
CHILDREN_FORMS = [CFU1_XMLNS, CFU2_XMLNS, CFU3_XMLNS, CHILD_FOLLOWUP_XMLNS]
OPM_XMLNSs = [PREG_REG_XMLNS, VHND_XMLNS, BIRTH_PREP_XMLNS, DELIVERY_XMLNS,
CHILD_FOLLOWUP_XMLNS, CFU1_XMLNS, CFU2_XMLNS, CFU3_XMLNS,
GROWTH_MONITORING_XMLNS, CLOSE_FORM]
# TODO Move these to a cached fixtures lookup
MONTH_AMT = 250
TWO_YEAR_AMT = 2000
THREE_YEAR_AMT = 3000
@quickcache([], timeout=30 * 60)
def get_fixture_data():
fixtures = FixtureDataItem.get_indexed_items(DOMAIN, 'condition_amounts', 'condition')
return dict((k, int(fixture['rs_amount'])) for k, fixture in fixtures.items())
class InvalidRow(Exception):
"""
Raise this in the row constructor to skip row
"""
|
bsd-3-clause
| 7,714,274,633,423,886,000
| 44.578947
| 96
| 0.769053
| false
| 2.255208
| false
| false
| false
|
tonioo/modoboa
|
modoboa/lib/u2u_decode.py
|
1
|
2282
|
# -*- coding: utf-8 -*-
"""
Unstructured rfc2047 header to unicode.
A stupid (and not accurate) answer to https://bugs.python.org/issue1079.
"""
from __future__ import unicode_literals
import re
from email.header import decode_header, make_header
from email.utils import parseaddr
from django.utils.encoding import smart_text
# check spaces between encoded_words (and strip them)
sre = re.compile(r"\?=[ \t]+=\?")
# re pat for MIME encoded_word (without trailing spaces)
mre = re.compile(r"=\?[^?]*?\?[bq]\?[^?\t]*?\?=", re.I)
# re do detect encoded ASCII characters
ascii_re = re.compile(r"=[\dA-F]{2,3}", re.I)
def clean_spaces(m):
"""Replace unencoded spaces in string.
:param str m: a match object
:return: the cleaned string
"""
return m.group(0).replace(" ", "=20")
def clean_non_printable_char(m):
"""Strip non printable characters."""
code = int(m.group(0)[1:], 16)
if code < 20:
return ""
return m.group(0)
def decode_mime(m):
"""Substitute matching encoded_word with unicode equiv."""
h = decode_header(clean_spaces(m))
try:
u = smart_text(make_header(h))
except (LookupError, UnicodeDecodeError):
return m.group(0)
return u
def clean_header(header):
"""Clean header function."""
header = "".join(header.splitlines())
header = sre.sub("?==?", header)
return ascii_re.sub(clean_non_printable_char, header)
def u2u_decode(s):
"""utility function for (final) decoding of mime header
note: resulting string is in one line (no \n within)
note2: spaces between enc_words are stripped (see RFC2047)
"""
return mre.sub(decode_mime, clean_header(s)).strip(" \r\t\n")
def decode_address(value):
"""Special function for address decoding.
We need a dedicated processing because RFC1342 explicitely says
address MUST NOT contain encoded-word:
These are the ONLY locations where an encoded-word may appear. In
particular, an encoded-word MUST NOT appear in any portion of an
"address". In addition, an encoded-word MUST NOT be used in a
Received header field.
"""
phrase, address = parseaddr(clean_header(value))
if phrase:
phrase = mre.sub(decode_mime, phrase)
return phrase, address
|
isc
| -962,520,203,660,710,000
| 26.493976
| 72
| 0.660824
| false
| 3.510769
| false
| false
| false
|
tudarmstadt-lt/topicrawler
|
lt.lm/src/main/py/mr_ngram_count.py
|
1
|
1297
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test:
cat data | map | sort | reduce
cat data | ./x.py -m | sort | ./x.py -r
hadoop jar /opt/cloudera/parcels/CDH/lib/hadoop-mapreduce/hadoop-streaming.jar \
-files x.py \
-mapper 'x.py -m' \
-reducer 'x.py -r' \
-input in \
-output out
@author: stevo
"""
from __future__ import print_function
from __future__ import division
import itertools as it
import sys
def readlines():
with sys.stdin as f:
for line in f:
if line.strip():
yield line
def mapper(lines):
for line in lines:
print('{}'.format(line.rstrip()))
def line2tuple(lines):
for line in lines:
splits = line.rstrip().split('\t')
yield splits
def reducer(lines, mincount=1):
for key, values in it.groupby(lines, lambda line : line.rstrip()):
num = reduce(lambda x, y: x + 1, values, 0)
if num >= mincount:
print('{}\t{}'.format(key, num))
if len(sys.argv) < 2:
raise Exception('specify mapper (-m) or reducer (-r) function')
t = sys.argv[1]
mincount = int(sys.argv[2]) if len(sys.argv) > 2 else 1
if '-m' == t:
mapper(readlines());
elif '-r' == t:
reducer(readlines(), mincount);
else:
raise Exception('specify mapper (-m) or reducer (-r) function')
|
apache-2.0
| -1,830,261,497,265,860,000
| 22.6
| 80
| 0.597533
| false
| 3.117788
| false
| false
| false
|
Ziqi-Li/bknqgis
|
bokeh/bokeh/sphinxext/example_handler.py
|
1
|
2905
|
import sys
from ..application.handlers.code_runner import CodeRunner
from ..application.handlers.handler import Handler
from ..io import set_curdoc, curdoc
class ExampleHandler(Handler):
""" A stripped-down handler similar to CodeHandler but that does
some appropriate monkeypatching to
"""
_output_funcs = ['output_notebook', 'output_file', 'reset_output']
_io_funcs = ['show', 'save']
def __init__(self, source, filename):
super(ExampleHandler, self).__init__(self)
self._runner = CodeRunner(source, filename, [])
def modify_document(self, doc):
if self.failed:
return
module = self._runner.new_module()
sys.modules[module.__name__] = module
doc._modules.append(module)
old_doc = curdoc()
set_curdoc(doc)
old_io, old_doc = self._monkeypatch()
try:
self._runner.run(module, lambda: None)
finally:
self._unmonkeypatch(old_io, old_doc)
set_curdoc(old_doc)
def _monkeypatch(self):
def _pass(*args, **kw): pass
def _add_root(obj, *args, **kw):
from bokeh.io import curdoc
curdoc().add_root(obj)
def _curdoc(*args, **kw):
return curdoc()
# these functions are transitively imported from io into plotting,
# so we have to patch them all. Assumption is that no other patching
# has occurred, i.e. we can just save the funcs being patched once,
# from io, and use those as the originals to replace everywhere
import bokeh.io as io
import bokeh.plotting as p
mods = [io, p]
# TODO (bev) restore when bkcharts package is ready (but remove at 1.0 release)
# import bkcharts as c
# mods.append(c)
old_io = {}
for f in self._output_funcs + self._io_funcs:
old_io[f] = getattr(io, f)
for mod in mods:
for f in self._output_funcs:
setattr(mod, f, _pass)
for f in self._io_funcs:
setattr(mod, f, _add_root)
import bokeh.document as d
old_doc = d.Document
d.Document = _curdoc
return old_io, old_doc
def _unmonkeypatch(self, old_io, old_doc):
import bokeh.io as io
import bokeh.plotting as p
mods = [io, p]
# TODO (bev) restore when bkcharts package is ready (but remove at 1.0 release)
# import bkcharts as c
# mods.append(c)
for mod in mods:
for f in old_io:
setattr(mod, f, old_io[f])
import bokeh.document as d
d.Document = old_doc
@property
def failed(self):
return self._runner.failed
@property
def error(self):
return self._runner.error
@property
def error_detail(self):
return self._runner.error_detail
|
gpl-2.0
| -5,235,527,630,608,026,000
| 27.203883
| 87
| 0.578313
| false
| 3.878505
| false
| false
| false
|
BurningNetel/ctf-manager
|
CTFmanager/tests/views/event/test_event.py
|
1
|
6138
|
import json
from django.core.urlresolvers import reverse
from CTFmanager.tests.views.base import ViewTestCase
class EventPageAJAXJoinEventTest(ViewTestCase):
""" Tests that a user can join an event
A user should be able to join upcoming events.
And get a response without the page reloading
"""
def get_valid_event_join_post(self):
event = self.create_event()
response = self.client.post(reverse('event_join', args=[event.name]))
_json = json.loads(response.content.decode())
return _json, event
def test_POST_returns_expected_json_on_valid_post(self):
_json, event = self.get_valid_event_join_post()
self.assertEqual(200, _json['status_code'])
def test_POST_gives_correct_user_count(self):
_json, event = self.get_valid_event_join_post()
self.assertEqual(1, _json['members'])
def test_logout_POST_gives_401_and_negative(self):
self.client.logout()
_json, event = self.get_valid_event_join_post()
self.assertEqual(-1, _json['members'])
self.assertEqual(401, _json['status_code'])
def test_duplicate_POST_gives_304_and_negative(self):
_json, event = self.get_valid_event_join_post()
response = self.client.post(reverse('event_join', args=[event.name]))
_json = json.loads(response.content.decode())
self.assertEqual(-1, _json['members'])
self.assertEqual(304, _json['status_code'])
def test_valid_DELETE_gives_valid_json(self):
event = self.create_event_join_user()
response = self.client.delete(reverse('event_join', args=[event.name]))
_json = json.loads(response.content.decode())
self.assertEqual(200, _json['status_code'])
self.assertEqual(0, _json['members'])
def test_duplicate_DELETE_gives_304_and_negative(self):
event = self.create_event_join_user()
self.client.delete(reverse('event_join', args=[event.name]))
response = self.client.delete(reverse('event_join', args=[event.name]))
_json = json.loads(response.content.decode())
self.assertEqual(304, _json['status_code'])
self.assertEqual(-1, _json['members'])
def test_logout_then_DELTE_gives_401_and_negative(self):
event = self.create_event_join_user()
self.client.logout()
response = self.client.delete(reverse('event_join', args=[event.name]))
_json = json.loads(response.content.decode())
self.assertEqual(401, _json['status_code'])
self.assertEqual(-1, _json['members'])
def create_event_join_user(self):
event = self.create_event()
event.join(self.user)
return event
class EventPageTest(ViewTestCase):
def test_events_page_requires_authentication(self):
self.client.logout()
response = self.client.get(reverse('events'))
self.assertRedirects(response, reverse('login') + '?next=' + reverse('events'))
def test_events_page_renders_events_template(self):
response = self.client.get(reverse('events'))
self.assertTemplateUsed(response, 'event/events.html')
def test_events_page_contains_new_event_button(self):
response = self.client.get(reverse('events'))
expected = 'id="btn_add_event" href="/events/new/">Add Event</a>'
self.assertContains(response, expected)
def test_events_page_displays_only_upcoming_events(self):
event_future = self.create_event("hatCTF", True)
event_past = self.create_event("RuCTF_2015", False)
response = self.client.get(reverse('events'))
_event = response.context['events']
self.assertEqual(_event[0], event_future)
self.assertEqual(len(_event), 1)
self.assertNotEqual(_event[0], event_past)
def test_events_page_has_correct_headers(self):
response = self.client.get(reverse('events'))
expected = 'Upcoming Events'
expected2 = 'Archive'
self.assertContains(response, expected)
self.assertContains(response, expected2)
def test_empty_events_set_shows_correct_message(self):
response = self.client.get(reverse('events'))
expected = 'No upcoming events!'
self.assertContains(response, expected)
def test_events_page_display_archive(self):
event_past = self.create_event('past_event', False)
response = self.client.get(reverse('events'))
archive = response.context['archive']
self.assertContains(response, '<table id="table_archive"')
self.assertContains(response, event_past.name)
self.assertEqual(archive[0], event_past)
def test_events_page_displays_error_message_when_nothing_in_archive(self):
response = self.client.get(reverse('events'))
archive = response.context['archive']
self.assertEqual(len(archive), 0)
self.assertContains(response, 'No past events!')
def test_event_page_displays_event_members_count(self):
event = self.create_event()
response = self.client.get(reverse('events'))
self.assertContains(response, '0 Participating')
event.members.add(self.user)
event.save()
response = self.client.get(reverse('events'))
self.assertContains(response, '1 Participating')
def test_event_page_displays_correct_button_text(self):
event = self.create_event()
response = self.client.get(reverse('events'))
self.assertContains(response, 'Join</button>')
event.join(self.user)
response = self.client.get(reverse('events'))
self.assertContains(response, 'Leave</button>')
def test_event_page_shows_username_in_popup(self):
event = self.create_event()
response = self.client.get(reverse('events'))
self.assertContains(response, self.user.username, 1)
self.assertContains(response, 'Nobody has joined yet!')
event.join(self.user)
response = self.client.get(reverse('events'))
self.assertContains(response, self.user.username, 2)
self.assertNotContains(response, 'Nobody has joined yet!')
|
gpl-3.0
| -6,477,876,122,721,076,000
| 38.352564
| 87
| 0.654774
| false
| 3.807692
| true
| false
| false
|
jeffmurphy/cif-router
|
poc/cif-router.py
|
1
|
21349
|
#!/usr/bin/python
#
#
# cif-router proof of concept
#
# cif-router [-p pubport] [-r routerport] [-m myname] [-h]
# -p default: 5556
# -r default: 5555
# -m default: cif-router
#
# cif-router is a zmq device with the following sockets:
# XPUB
# for republishing messages
# XSUB
# for subscribing to message feeds
# ROUTER
# for routing REQ/REP messages between clients
# also for accepting REQs from clients
# locally accepted types:
# REGISTER, UNREGISTER, LIST-CLIENTS
# locally generated replies:
# UNAUTHORIZED, OK, FAILED
#
# communication between router and clients is via CIF.msg passing
# the 'ControlStruct' portion of CIF.msg is used for communication
#
# a typical use case:
#
# cif-smrt's REQ connects to ROUTER and sends a REGISTER message with dst=cif-router
# cif-router's ROUTER responds with SUCCESS (if valid) or UNAUTHORIZED (if not valid)
# the apikey will be validated during this step
# cif-router's XSUB connects to cif-smrt's XPUB
# cif-smrt begins publishing CIF messages
# cif-router re-publishes the CIF messages to clients connected to cif-router's XPUB
# clients may be: cif-correlator, cif-db
import sys
import zmq
import time
import datetime
import threading
import getopt
import json
import pprint
import struct
sys.path.append('/usr/local/lib/cif-protocol/pb-python/gen-py')
import msg_pb2
import feed_pb2
import RFC5070_IODEF_v1_pb2
import MAEC_v2_pb2
import control_pb2
import cifsupport
sys.path.append('../../libcif/lib')
from CIF.RouterStats import *
from CIF.CtrlCommands.Clients import *
from CIF.CtrlCommands.Ping import *
from CIFRouter.MiniClient import *
from CIF.CtrlCommands.ThreadTracker import ThreadTracker
myname = "cif-router"
def dosubscribe(client, m):
client = m.src
if client in publishers :
print "dosubscribe: we've seen this client before. re-using old connection."
return control_pb2.ControlType.SUCCESS
elif clients.isregistered(client) == True:
if clients.apikey(client) == m.apikey:
print "dosubscribe: New publisher to connect to " + client
publishers[client] = time.time()
addr = m.iPublishRequest.ipaddress
port = m.iPublishRequest.port
print "dosubscribe: connect our xsub -> xpub on " + addr + ":" + str(port)
xsub.connect("tcp://" + addr + ":" + str(port))
return control_pb2.ControlType.SUCCESS
print "dosubscribe: iPublish from a registered client with a bad apikey: " + client + " " + m.apikey
print "dosubscribe: iPublish from a client who isnt registered: \"" + client + "\""
return control_pb2.ControlType.FAILED
def list_clients(client, apikey):
if clients.isregistered(client) == True and clients.apikey(client) == apikey:
return clients.asmessage()
return None
def make_register_reply(msgfrom, _apikey):
msg = control_pb2.ControlType()
msg.version = msg.version # required
msg.type = control_pb2.ControlType.REPLY
msg.command = control_pb2.ControlType.REGISTER
msg.dst = msgfrom
msg.src = "cif-router"
print "mrr " + _apikey
msg.apikey = _apikey
return msg
def make_unregister_reply(msgfrom, _apikey):
msg = control_pb2.ControlType()
msg.version = msg.version # required
msg.type = control_pb2.ControlType.REPLY
msg.command = control_pb2.ControlType.UNREGISTER
msg.dst = msgfrom
msg.src = "cif-router"
msg.apikey = _apikey
return msg
def make_msg_seq(msg):
_md5 = hashlib.md5()
_md5.update(msg.SerializeToString())
return _md5.digest()
def handle_miniclient_reply(socket, routerport, publisherport):
pending_registers = miniclient.pending_apikey_lookups()
print "pending_apikey_lookups: ", pending_registers
for apikey in pending_registers:
if apikey in register_wait_map:
reply_to = register_wait_map[apikey]
apikey_results = miniclient.get_pending_apikey(apikey)
print " send reply to: ", reply_to
msg = make_register_reply(reply_to['msgfrom'], apikey)
msg.status = control_pb2.ControlType.FAILED
if apikey_results != None:
if apikey_results.revoked == False:
if apikey_results.expires == 0 or apikey_results.expires >= time.time():
msg.registerResponse.REQport = routerport
msg.registerResponse.PUBport = publisherport
msg.status = control_pb2.ControlType.SUCCESS
clients.register(reply_to['msgfrom'], reply_to['from_zmqid'], apikey)
print " Register succeeded."
else:
print " Register failed: key expired"
else:
print " Register failed: key revoked"
else:
print " Register failed: unknown key"
msg.seq = reply_to['msgseq']
socket.send_multipart([reply_to['from_zmqid'], '', msg.SerializeToString()])
del register_wait_map[apikey]
elif apikey in unregister_wait_map:
reply_to = unregister_wait_map[apikey]
apikey_results = miniclient.get_pending_apikey(apikey)
print " send reply to: ", reply_to
msg = make_unregister_reply(reply_to['msgfrom'], apikey)
msg.status = control_pb2.ControlType.FAILED
if apikey_results != None:
if apikey_results.revoked == False:
if apikey_results.expires == 0 or apikey_results.expires >= time.time():
msg.status = control_pb2.ControlType.SUCCESS
clients.unregister(reply_to['msgfrom'])
print " Unregister succeeded."
else:
print " Unregister failed: key expired"
else:
print " Unregister failed: key revoked"
else:
print " Unregister failed: unknown key"
msg.seq = reply_to['msgseq']
socket.send_multipart([reply_to['from_zmqid'], '', msg.SerializeToString()])
del unregister_wait_map[apikey]
miniclient.remove_pending_apikey(apikey)
def myrelay(pubport):
relaycount = 0
print "[myrelay] Create XPUB socket on " + str(pubport)
xpub = context.socket(zmq.PUB)
xpub.bind("tcp://*:" + str(pubport))
while True:
try:
relaycount = relaycount + 1
m = xsub.recv()
_m = msg_pb2.MessageType()
_m.ParseFromString(m)
if _m.type == msg_pb2.MessageType.QUERY:
mystats.setrelayed(1, 'QUERY')
elif _m.type == msg_pb2.MessageType.REPLY:
mystats.setrelayed(1, 'REPLY')
elif _m.type == msg_pb2.MessageType.SUBMISSION:
mystats.setrelayed(1, 'SUBMISSION')
for bmt in _m.submissionRequest:
mystats.setrelayed(1, bmt.baseObjectType)
print "[myrelay] total:%d got:%d bytes" % (relaycount, len(m))
#print "[myrelay] got msg on our xsub socket: " , m
xpub.send(m)
except Exception as e:
print "[myrelay] invalid message received: ", e
def usage():
print "cif-router [-r routerport] [-p pubport] [-m myid] [-a myapikey] [-dn dbname] [-dk dbkey] [-h]"
print " routerport = 5555, pubport = 5556, myid = cif-router"
print " dbkey = a8fd97c3-9f8b-477b-b45b-ba06719a0088"
print " dbname = cif-db"
try:
opts, args = getopt.getopt(sys.argv[1:], 'p:r:m:h')
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
global mystats
global clients
global thread_tracker
context = zmq.Context()
clients = Clients()
mystats = RouterStats()
publishers = {}
routerport = 5555
publisherport = 5556
myid = "cif-router"
dbkey = 'a8fd97c3-9f8b-477b-b45b-ba06719a0088'
dbname = 'cif-db'
global apikey
apikey = 'a1fd11c1-1f1b-477b-b45b-ba06719a0088'
miniclient = None
miniclient_id = myid + "-miniclient"
register_wait_map = {}
unregister_wait_map = {}
for o, a in opts:
if o == "-r":
routerport = a
elif o == "-p":
publisherport = a
elif o == "-m":
myid = a
elif o == "-dk":
dbkey = a
elif o == "-dn":
dbname = a
elif o == "-a":
apikey = a
elif o == "-h":
usage()
sys.exit(2)
print "Create ROUTER socket on " + str(routerport)
global socket
socket = context.socket(zmq.ROUTER)
socket.bind("tcp://*:" + str(routerport))
socket.setsockopt(zmq.IDENTITY, myname)
poller = zmq.Poller()
poller.register(socket, zmq.POLLIN)
print "Create XSUB socket"
xsub = context.socket(zmq.SUB)
xsub.setsockopt(zmq.SUBSCRIBE, '')
print "Connect XSUB<->XPUB"
thread = threading.Thread(target=myrelay, args=(publisherport,))
thread.start()
while not thread.isAlive():
print "waiting for pubsub relay thread to become alive"
time.sleep(1)
thread_tracker = ThreadTracker(False)
thread_tracker.add(id=thread.ident, user='Router', host='localhost', state='Running', info="PUBSUB Relay")
print "Entering event loop"
try:
open_for_business = False
while True:
sockets_with_data_ready = dict(poller.poll(1000))
#print "[up " + str(int(mystats.getuptime())) + "s]: Wakeup: "
if miniclient != None:
if miniclient.pending() == True:
print "\tMiniclient has replies we need to handle."
handle_miniclient_reply(socket, routerport, publisherport)
if sockets_with_data_ready and sockets_with_data_ready.get(socket) == zmq.POLLIN:
print "[up " + str(int(mystats.getuptime())) + "s]: Got an inbound message"
rawmsg = socket.recv_multipart()
#print " Got ", rawmsg
msg = control_pb2.ControlType()
try:
msg.ParseFromString(rawmsg[2])
except Exception as e:
print "Received message isn't a protobuf: ", e
mystats.setbad()
else:
from_zmqid = rawmsg[0] # save the ZMQ identity of who sent us this message
#print "Got msg: "#, msg.seq
try:
cifsupport.versionCheck(msg)
except Exception as e:
print "\tReceived message has incompatible version: ", e
mystats.setbadversion(1, msg.version)
else:
if cifsupport.isControl(msg):
msgfrom = msg.src
msgto = msg.dst
msgcommand = msg.command
msgcommandtext = control_pb2._CONTROLTYPE_COMMANDTYPE.values_by_number[msg.command].name
msgid = msg.seq
if msgfrom != '' and msg.apikey != '':
if msgto == myname and msg.type == control_pb2.ControlType.REPLY:
print "\tREPLY for me: ", msgcommand
if msgcommand == control_pb2.ControlType.APIKEY_GET:
print "\tReceived a REPLY for an APIKEY_GET"
elif msgto == myname and msg.type == control_pb2.ControlType.COMMAND:
print "\tCOMMAND for me: ", msgcommandtext
mystats.setcontrols(1, msgcommandtext)
"""
For REGISTER:
We allow only the db to register with us while we are not
open_for_business. Once the DB registers, we are open_for_business
since we can then start validating apikeys. Until that time, we can
only validate the dbkey that is specified on the command line when
you launch this program.
"""
if msgcommand == control_pb2.ControlType.REGISTER:
print "\tREGISTER from: " + msgfrom
msg.status = control_pb2.ControlType.FAILED
msg.type = control_pb2.ControlType.REPLY
msg.seq = msgid
if msgfrom == miniclient_id and msg.apikey == apikey:
clients.register(msgfrom, from_zmqid, msg.apikey)
msg.status = control_pb2.ControlType.SUCCESS
msg.registerResponse.REQport = routerport
msg.registerResponse.PUBport = publisherport
print "\tMiniClient has registered."
socket.send_multipart([from_zmqid, '', msg.SerializeToString()])
elif msgfrom == dbname and msg.apikey == dbkey:
clients.register(msgfrom, from_zmqid, msg.apikey)
msg.status = control_pb2.ControlType.SUCCESS
msg.registerResponse.REQport = routerport
msg.registerResponse.PUBport = publisherport
open_for_business = True
print "\tDB has connected successfully. Sending reply to DB."
print "\tStarting embedded client"
miniclient = MiniClient(apikey, "127.0.0.1", "127.0.0.1:" + str(routerport), 5557, miniclient_id, thread_tracker, True)
socket.send_multipart([from_zmqid, '', msg.SerializeToString()])
elif open_for_business == True:
"""
Since we need to wait for the DB to response, we note this pending request, ask the miniclient
to handle the lookup. We will poll the MC to see if the lookup has finished. Reply to client
will be sent from handle_miniclient_reply()
"""
miniclient.lookup_apikey(msg.apikey)
register_wait_map[msg.apikey] = {'msgfrom': msgfrom, 'from_zmqid': from_zmqid, 'msgseq': msg.seq}
else:
print "\tNot open_for_business yet. Go away."
elif msgcommand == control_pb2.ControlType.UNREGISTER:
"""
If the database unregisters, then we are not open_for_business any more.
"""
print "\tUNREGISTER from: " + msgfrom
if open_for_business == True:
if msgfrom == dbname and msg.apikey == dbkey:
print "\t\tDB unregistered. Closing for business."
open_for_business = False
clients.unregister(msgfrom)
msg.status = control_pb2.ControlType.SUCCESS
msg.seq = msgid
socket.send_multipart([ from_zmqid, '', msg.SerializeToString()])
else:
"""
Since we need to wait for the DB to response, we note this pending request, ask the miniclient
to handle the lookup. We will poll the MC to see if the lookup has finished. Reply to the client
will be sent from handle_miniclient_reply()
"""
miniclient.lookup_apikey(msg.apikey)
unregister_wait_map[msg.apikey] = {'msgfrom': msgfrom, 'from_zmqid': from_zmqid, 'msgseq': msg.seq}
elif msgcommand == control_pb2.ControlType.LISTCLIENTS:
print "\tLIST-CLIENTS for: " + msgfrom
if open_for_business == True:
rv = list_clients(msg.src, msg.apikey)
msg.seq = msgid
msg.status = msg.status | control_pb2.ControlType.FAILED
if rv != None:
msg.status = msg.status | control_pb2.ControlType.SUCCESS
msg.listClientsResponse.client.extend(rv.client)
msg.listClientsResponse.connectTimestamp.extend(rv.connectTimestamp)
socket.send_multipart( [ from_zmqid, '', msg.SerializeToString() ] )
elif msg.command == control_pb2.ControlType.STATS:
print "\tSTATS for: " + msgfrom
if open_for_business == True:
tmp = msg.dst
msg.dst = msg.src
msg.src = tmp
msg.status = control_pb2.ControlType.SUCCESS
msg.statsResponse.statsType = control_pb2.StatsResponse.ROUTER
msg.statsResponse.stats = mystats.asjson()
socket.send_multipart( [ from_zmqid, '', msg.SerializeToString() ] )
elif msg.command == control_pb2.ControlType.THREADS_LIST:
tmp = msg.dst
msg.dst = msg.src
msg.src = tmp
msg.status = control_pb2.ControlType.SUCCESS
thread_tracker.asmessage(msg.listThreadsResponse)
socket.send_multipart( [ from_zmqid, '', msg.SerializeToString() ] )
if msg.command == control_pb2.ControlType.PING:
c = Ping.makereply(msg)
socket.send_multipart( [ from_zmqid, '', c.SerializeToString() ] )
elif msgcommand == control_pb2.ControlType.IPUBLISH:
print "\tIPUBLISH from: " + msgfrom
if open_for_business == True:
rv = dosubscribe(from_zmqid, msg)
msg.status = rv
socket.send_multipart( [from_zmqid, '', msg.SerializeToString()] )
else:
print "\tCOMMAND for someone else: cmd=", msgcommandtext, "src=", msgfrom, " dst=", msgto
msgto_zmqid = clients.getzmqidentity(msgto)
if msgto_zmqid != None:
socket.send_multipart([msgto_zmqid, '', msg.SerializeToString()])
else:
print "\tUnknown message destination: ", msgto
else:
print "\tmsgfrom and/or msg.apikey is empty"
except KeyboardInterrupt:
print "Shut down."
if thread.isAlive():
try:
thread._Thread__stop()
except:
print(str(thread.getName()) + ' could not be terminated')
sys.exit(0)
|
bsd-3-clause
| -4,783,758,994,462,898,000
| 44.230932
| 161
| 0.492154
| false
| 4.620996
| false
| false
| false
|
fdouetteau/PyBabe
|
pybabe/pivot.py
|
1
|
2935
|
try:
from collections import OrderedDict
except:
## 2.6 Fallback
from ordereddict import OrderedDict
from base import StreamHeader, StreamFooter, BabeBase
class OrderedDefaultdict(OrderedDict):
def __init__(self, *args, **kwargs):
newdefault = None
newargs = ()
if args:
newdefault = args[0]
if not (newdefault is None or callable(newdefault)):
raise TypeError('first argument must be callable or None')
newargs = args[1:]
self.default_factory = newdefault
super(self.__class__, self).__init__(*newargs, **kwargs)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self): # optional, for pickle support
args = self.default_factory if self.default_factory else tuple()
return type(self), args, None, None, self.items()
class OrderedSet(set):
def __init__(self):
self.list = []
def add(self, elt):
if elt in self:
return
else:
super(OrderedSet, self).add(elt)
self.list.append(elt)
def __iter__(self):
return self.list.__iter__()
def pivot(stream, pivot, group):
"Create a pivot around field, grouping on identical value for 'group'"
groups = OrderedDefaultdict(dict)
pivot_values = OrderedSet()
header = None
group_n = map(StreamHeader.keynormalize, group)
for row in stream:
if isinstance(row, StreamHeader):
header = row
elif isinstance(row, StreamFooter):
# HEADER IS : GROUP + (OTHER FIELDS * EACH VALUE
other_fields = [f for f in header.fields if not f in group and not f == pivot]
other_fields_k = map(StreamHeader.keynormalize, other_fields)
fields = group + [f + "-" + str(v)
for v in pivot_values.list for f in other_fields]
newheader = header.replace(fields=fields)
yield newheader
for _, row_dict in groups.iteritems():
## Create a line per group
mrow = row_dict.itervalues().next()
group_cols = [getattr(mrow, col) for col in group_n]
for v in pivot_values:
if v in row_dict:
mrow = row_dict[v]
group_cols.extend([getattr(mrow, col) for col in other_fields_k])
else:
group_cols.extend([None for col in other_fields])
yield group_cols
yield row
else:
kgroup = ""
for f in group_n:
kgroup = kgroup + str(getattr(row, f))
groups[kgroup][getattr(row, pivot)] = row
pivot_values.add(getattr(row, pivot))
BabeBase.register("pivot", pivot)
|
bsd-3-clause
| -1,801,747,529,367,375,600
| 33.529412
| 90
| 0.560136
| false
| 4.186876
| false
| false
| false
|
rbn42/stiler
|
config.py
|
1
|
1027
|
WinBorder = 2
LeftPadding = 15
BottomPadding = 15
TopPadding = BottomPadding
RightPadding = BottomPadding
NavigateAcrossWorkspaces = True # availabe in Unity7
TempFile = "/dev/shm/.stiler_db"
LockFile = "/dev/shm/.stiler.lock"
# This is the congiguration that works for unity7. If you are using a
# different Desktop Environment, close all windows and execute "wmctrl
# -lG" to find out all the applications need to exclude.
EXCLUDE_APPLICATIONS = ['<unknown>', 'x-nautilus-desktop', 'unity-launcher',
'unity-panel', 'Hud', 'unity-dash', 'Desktop',
'Docky',
'screenkey', 'XdndCollectionWindowImp']
# An alternative method to exclude applications.
EXCLUDE_WM_CLASS = ['wesnoth-1.12']
UNRESIZABLE_APPLICATIONS = ['Screenkey']
RESIZE_STEP = 50
MOVE_STEP = 50
MIN_WINDOW_WIDTH = 50
MIN_WINDOW_HEIGHT = 50
#NOFRAME_WMCLASS = ['Wine']
# In i3-wm's window tree, only one child of a node is allowed to split.
#MAX_KD_TREE_BRANCH = 1
MAX_KD_TREE_BRANCH = 2
|
mit
| 8,967,949,853,643,365,000
| 31.09375
| 76
| 0.685492
| false
| 3.22956
| false
| false
| false
|
ojii/sandlib
|
lib/lib_pypy/_ctypes/primitive.py
|
1
|
11496
|
import _ffi
import _rawffi
import weakref
import sys
SIMPLE_TYPE_CHARS = "cbBhHiIlLdfguzZqQPXOv?"
from _ctypes.basics import _CData, _CDataMeta, cdata_from_address,\
CArgObject
from _ctypes.builtin import ConvMode
from _ctypes.array import Array
from _ctypes.pointer import _Pointer, as_ffi_pointer
#from _ctypes.function import CFuncPtr # this import is moved at the bottom
# because else it's circular
class NULL(object):
pass
NULL = NULL()
TP_TO_DEFAULT = {
'c': 0,
'u': 0,
'b': 0,
'B': 0,
'h': 0,
'H': 0,
'i': 0,
'I': 0,
'l': 0,
'L': 0,
'q': 0,
'Q': 0,
'f': 0.0,
'd': 0.0,
'g': 0.0,
'P': None,
# not part of struct
'O': NULL,
'z': None,
'Z': None,
'?': False,
}
if sys.platform == 'win32':
TP_TO_DEFAULT['X'] = NULL
TP_TO_DEFAULT['v'] = 0
DEFAULT_VALUE = object()
class GlobalPyobjContainer(object):
def __init__(self):
self.objs = []
def add(self, obj):
num = len(self.objs)
self.objs.append(weakref.ref(obj))
return num
def get(self, num):
return self.objs[num]()
pyobj_container = GlobalPyobjContainer()
def generic_xxx_p_from_param(cls, value):
if value is None:
return cls(None)
if isinstance(value, basestring):
return cls(value)
if isinstance(value, _SimpleCData) and \
type(value)._type_ in 'zZP':
return value
return None # eventually raise
def from_param_char_p(cls, value):
"used by c_char_p and c_wchar_p subclasses"
res = generic_xxx_p_from_param(cls, value)
if res is not None:
return res
if isinstance(value, (Array, _Pointer)):
from ctypes import c_char, c_byte, c_wchar
if type(value)._type_ in [c_char, c_byte, c_wchar]:
return value
def from_param_void_p(cls, value):
"used by c_void_p subclasses"
res = generic_xxx_p_from_param(cls, value)
if res is not None:
return res
if isinstance(value, Array):
return value
if isinstance(value, (_Pointer, CFuncPtr)):
return cls.from_address(value._buffer.buffer)
if isinstance(value, (int, long)):
return cls(value)
FROM_PARAM_BY_TYPE = {
'z': from_param_char_p,
'Z': from_param_char_p,
'P': from_param_void_p,
}
class SimpleType(_CDataMeta):
def __new__(self, name, bases, dct):
try:
tp = dct['_type_']
except KeyError:
for base in bases:
if hasattr(base, '_type_'):
tp = base._type_
break
else:
raise AttributeError("cannot find _type_ attribute")
if (not isinstance(tp, str) or
not len(tp) == 1 or
tp not in SIMPLE_TYPE_CHARS):
raise ValueError('%s is not a type character' % (tp))
default = TP_TO_DEFAULT[tp]
ffiarray = _rawffi.Array(tp)
result = type.__new__(self, name, bases, dct)
result._ffiargshape = tp
result._ffishape = tp
result._fficompositesize = None
result._ffiarray = ffiarray
if tp == 'z':
# c_char_p
def _getvalue(self):
addr = self._buffer[0]
if addr == 0:
return None
else:
return _rawffi.charp2string(addr)
def _setvalue(self, value):
if isinstance(value, basestring):
if isinstance(value, unicode):
value = value.encode(ConvMode.encoding,
ConvMode.errors)
#self._objects = value
array = _rawffi.Array('c')(len(value)+1, value)
self._objects = CArgObject(value, array)
value = array.buffer
elif value is None:
value = 0
self._buffer[0] = value
result.value = property(_getvalue, _setvalue)
result._ffiargtype = _ffi.types.Pointer(_ffi.types.char)
elif tp == 'Z':
# c_wchar_p
def _getvalue(self):
addr = self._buffer[0]
if addr == 0:
return None
else:
return _rawffi.wcharp2unicode(addr)
def _setvalue(self, value):
if isinstance(value, basestring):
if isinstance(value, str):
value = value.decode(ConvMode.encoding,
ConvMode.errors)
#self._objects = value
array = _rawffi.Array('u')(len(value)+1, value)
self._objects = CArgObject(value, array)
value = array.buffer
elif value is None:
value = 0
self._buffer[0] = value
result.value = property(_getvalue, _setvalue)
result._ffiargtype = _ffi.types.Pointer(_ffi.types.unichar)
elif tp == 'P':
# c_void_p
def _getvalue(self):
addr = self._buffer[0]
if addr == 0:
return None
return addr
def _setvalue(self, value):
if isinstance(value, str):
array = _rawffi.Array('c')(len(value)+1, value)
self._objects = CArgObject(value, array)
value = array.buffer
elif value is None:
value = 0
self._buffer[0] = value
result.value = property(_getvalue, _setvalue)
elif tp == 'u':
def _setvalue(self, val):
if isinstance(val, str):
val = val.decode(ConvMode.encoding, ConvMode.errors)
# possible if we use 'ignore'
if val:
self._buffer[0] = val
def _getvalue(self):
return self._buffer[0]
result.value = property(_getvalue, _setvalue)
elif tp == 'c':
def _setvalue(self, val):
if isinstance(val, unicode):
val = val.encode(ConvMode.encoding, ConvMode.errors)
if val:
self._buffer[0] = val
def _getvalue(self):
return self._buffer[0]
result.value = property(_getvalue, _setvalue)
elif tp == 'O':
def _setvalue(self, val):
num = pyobj_container.add(val)
self._buffer[0] = num
def _getvalue(self):
return pyobj_container.get(self._buffer[0])
result.value = property(_getvalue, _setvalue)
elif tp == 'X':
from ctypes import WinDLL
# Use WinDLL("oleaut32") instead of windll.oleaut32
# because the latter is a shared (cached) object; and
# other code may set their own restypes. We need out own
# restype here.
oleaut32 = WinDLL("oleaut32")
SysAllocStringLen = oleaut32.SysAllocStringLen
SysStringLen = oleaut32.SysStringLen
SysFreeString = oleaut32.SysFreeString
def _getvalue(self):
addr = self._buffer[0]
if addr == 0:
return None
else:
size = SysStringLen(addr)
return _rawffi.wcharp2rawunicode(addr, size)
def _setvalue(self, value):
if isinstance(value, basestring):
if isinstance(value, str):
value = value.decode(ConvMode.encoding,
ConvMode.errors)
array = _rawffi.Array('u')(len(value)+1, value)
value = SysAllocStringLen(array.buffer, len(value))
elif value is None:
value = 0
if self._buffer[0]:
SysFreeString(self._buffer[0])
self._buffer[0] = value
result.value = property(_getvalue, _setvalue)
elif tp == '?': # regular bool
def _getvalue(self):
return bool(self._buffer[0])
def _setvalue(self, value):
self._buffer[0] = bool(value)
result.value = property(_getvalue, _setvalue)
elif tp == 'v': # VARIANT_BOOL type
def _getvalue(self):
return bool(self._buffer[0])
def _setvalue(self, value):
if value:
self._buffer[0] = -1 # VARIANT_TRUE
else:
self._buffer[0] = 0 # VARIANT_FALSE
result.value = property(_getvalue, _setvalue)
# make pointer-types compatible with the _ffi fast path
if result._is_pointer_like():
def _as_ffi_pointer_(self, ffitype):
return as_ffi_pointer(self, ffitype)
result._as_ffi_pointer_ = _as_ffi_pointer_
return result
from_address = cdata_from_address
def from_param(self, value):
if isinstance(value, self):
return value
from_param_f = FROM_PARAM_BY_TYPE.get(self._type_)
if from_param_f:
res = from_param_f(self, value)
if res is not None:
return res
else:
try:
return self(value)
except (TypeError, ValueError):
pass
return super(SimpleType, self).from_param(value)
def _CData_output(self, resbuffer, base=None, index=-1):
output = super(SimpleType, self)._CData_output(resbuffer, base, index)
if self.__bases__[0] is _SimpleCData:
return output.value
return output
def _sizeofinstances(self):
return _rawffi.sizeof(self._type_)
def _alignmentofinstances(self):
return _rawffi.alignment(self._type_)
def _is_pointer_like(self):
return self._type_ in "sPzUZXO"
class _SimpleCData(_CData):
__metaclass__ = SimpleType
_type_ = 'i'
def __init__(self, value=DEFAULT_VALUE):
if not hasattr(self, '_buffer'):
self._buffer = self._ffiarray(1, autofree=True)
if value is not DEFAULT_VALUE:
self.value = value
def _ensure_objects(self):
if self._type_ not in 'zZP':
assert self._objects is None
return self._objects
def _getvalue(self):
return self._buffer[0]
def _setvalue(self, value):
self._buffer[0] = value
value = property(_getvalue, _setvalue)
del _getvalue, _setvalue
def __ctypes_from_outparam__(self):
meta = type(type(self))
if issubclass(meta, SimpleType) and meta != SimpleType:
return self
return self.value
def __repr__(self):
if type(self).__bases__[0] is _SimpleCData:
return "%s(%r)" % (type(self).__name__, self.value)
else:
return "<%s object at 0x%x>" % (type(self).__name__,
id(self))
def __nonzero__(self):
return self._buffer[0] not in (0, '\x00')
from _ctypes.function import CFuncPtr
|
bsd-3-clause
| 4,007,503,311,104,080,000
| 31.752137
| 78
| 0.501044
| false
| 4.08674
| false
| false
| false
|
mongolab/mongoctl
|
mongoctl/tests/sharded_test.py
|
1
|
2582
|
# The MIT License
# Copyright (c) 2012 ObjectLabs Corporation
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import unittest
import time
from mongoctl.tests.test_base import MongoctlTestBase, append_user_arg
########################################################################################################################
# Servers
SHARD_TEST_SERVERS = [
"ConfigServer1",
"ConfigServer2",
"ConfigServer3",
"Mongos1",
"Mongos2",
"ShardServer1",
"ShardServer2",
"ShardServer3",
"ShardServer4",
"ShardServer5",
"ShardServer6",
"ShardArbiter"
]
########################################################################################################################
### Sharded Servers
class ShardedTest(MongoctlTestBase):
########################################################################################################################
def test_sharded(self):
# Start all sharded servers
for s_id in SHARD_TEST_SERVERS:
self.assert_start_server(s_id, start_options=["--rs-add"])
print "Sleeping for 10 seconds..."
# sleep for 10 of seconds
time.sleep(10)
conf_cmd = ["configure-shard-cluster", "ShardedCluster"]
append_user_arg(conf_cmd)
# Configure the sharded cluster
self.mongoctl_assert_cmd(conf_cmd)
###########################################################################
def get_my_test_servers(self):
return SHARD_TEST_SERVERS
# booty
if __name__ == '__main__':
unittest.main()
|
mit
| 1,538,437,245,596,689,700
| 33.891892
| 124
| 0.585593
| false
| 4.635548
| true
| false
| false
|
jamasi/Xtal-xplore-R
|
gui/doublespinslider.py
|
1
|
3682
|
# -*- coding: utf-8 -*-
"""DoubleSpinSlider - a custom widget combining a slider with a spinbox
Copyright (C) 2014 Jan M. Simons <marten@xtal.rwth-aachen.de>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import division, print_function, absolute_import
from decimal import Decimal
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import pyqtSlot
class DoubleSpinSlider(QtGui.QWidget):
"""This is a QWidget containing a QSlider and a QDoubleSpinBox"""
def __init__(self, parent=None, width=50, height=100, dpi=100):
#super(DoubleSpinSlider, self).__init__(parent)
QtGui.QWidget.__init__(self, parent)
self._vLayout = QtGui.QVBoxLayout()
self._label = QtGui.QLabel(parent)
self._label.setAlignment(QtCore.Qt.AlignCenter)
self._vLayout.addWidget(self._label)
self._dSBox = QtGui.QDoubleSpinBox(parent)
self._dSBox.setWrapping(True)
self._dSBox.setDecimals(4)
self._dSBox.setMaximum(1.00000000)
self._dSBox.setSingleStep(0.1000000000)
self._vLayout.addWidget(self._dSBox)
self._hLayout = QtGui.QHBoxLayout()
self._vSlider = QtGui.QSlider(parent)
self._vSlider.setMinimum(0)
self._vSlider.setMaximum(10000)
self._vSlider.setPageStep(1000)
self._vSlider.setOrientation(QtCore.Qt.Vertical)
self._vSlider.setTickPosition(QtGui.QSlider.TicksBothSides)
self._vSlider.setTickInterval(0)
self._hLayout.addWidget(self._vSlider)
self._vLayout.addLayout(self._hLayout)
self.setLayout(self._vLayout)
self.setParent(parent)
# map functions
self.setText = self._label.setText
self.text = self._label.text
self.setValue = self._dSBox.setValue
self.value = self._dSBox.value
self._vSlider.valueChanged.connect(self.ChangeSpinBox)
self._dSBox.valueChanged.connect(self.ChangeSlider)
def _multiplier(self):
return 10.000000 ** self._dSBox.decimals()
@pyqtSlot(int)
def ChangeSpinBox(self, slidervalue):
#print("sv: {}".format(slidervalue))
newvalue = round(slidervalue / (self._multiplier()),4)
#print("nv: {}".format(newvalue))
if newvalue != self._dSBox.value():
self._dSBox.setValue(newvalue)
@pyqtSlot('double')
def ChangeSlider(self, spinboxvalue):
newvalue = spinboxvalue * self._multiplier()
#print("sb: {sb} mult: {mult} prod: {prod}".format(
# sb=spinboxvalue,
# mult=int(10.00000000 ** self._dSBox.decimals()),
# prod=newvalue))
self._vSlider.setValue(newvalue)
@pyqtSlot('double')
def setMaximum(self, maximum):
self._dSBox.setMaximum(maximum)
self._vSlider.setMaximum(maximum * self._multiplier())
@pyqtSlot('double')
def setMinimum(self, minimum):
self._dSBox.setMinimum(minimum)
self._vSlider.setMinimum(minimum * self._multiplier())
|
agpl-3.0
| 7,329,879,116,559,789,000
| 38.591398
| 77
| 0.655894
| false
| 3.835417
| false
| false
| false
|
adw0rd/lettuce-py3
|
lettuce/__init__.py
|
1
|
6767
|
# -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__version__ = version = '0.2.22'
release = 'kryptonite'
import os
import sys
import traceback
import warnings
try:
from imp import reload
except ImportError:
# python 2.5 fallback
pass
import random
from lettuce.core import Feature, TotalResult
from lettuce.terrain import after
from lettuce.terrain import before
from lettuce.terrain import world
from lettuce.decorators import step, steps
from lettuce.registry import call_hook
from lettuce.registry import STEP_REGISTRY
from lettuce.registry import CALLBACK_REGISTRY
from lettuce.exceptions import StepLoadingError
from lettuce.plugins import (
xunit_output,
subunit_output,
autopdb,
smtp_mail_queue,
)
from lettuce import fs
from lettuce import exceptions
try:
from colorama import init as ms_windows_workaround
ms_windows_workaround()
except ImportError:
pass
__all__ = [
'after',
'before',
'step',
'steps',
'world',
'STEP_REGISTRY',
'CALLBACK_REGISTRY',
'call_hook',
]
try:
terrain = fs.FileSystem._import("terrain")
reload(terrain)
except Exception as e:
if not "No module named 'terrain'" in str(e):
string = 'Lettuce has tried to load the conventional environment ' \
'module "terrain"\nbut it has errors, check its contents and ' \
'try to run lettuce again.\n\nOriginal traceback below:\n\n'
sys.stderr.write(string)
sys.stderr.write(exceptions.traceback.format_exc())
raise SystemExit(1)
class Runner(object):
""" Main lettuce's test runner
Takes a base path as parameter (string), so that it can look for
features and step definitions on there.
"""
def __init__(self, base_path, scenarios=None,
verbosity=0, no_color=False, random=False,
enable_xunit=False, xunit_filename=None,
enable_subunit=False, subunit_filename=None,
tags=None, failfast=False, auto_pdb=False,
smtp_queue=None, root_dir=None, **kwargs):
""" lettuce.Runner will try to find a terrain.py file and
import it from within `base_path`
"""
self.tags = tags
self.single_feature = None
if os.path.isfile(base_path) and os.path.exists(base_path):
self.single_feature = base_path
base_path = os.path.dirname(base_path)
sys.path.insert(0, base_path)
self.loader = fs.FeatureLoader(base_path, root_dir)
self.verbosity = verbosity
self.scenarios = scenarios and list(map(int, scenarios.split(","))) or None
self.failfast = failfast
if auto_pdb:
autopdb.enable(self)
sys.path.remove(base_path)
if verbosity == 0:
from lettuce.plugins import non_verbose as output
elif verbosity == 1:
from lettuce.plugins import dots as output
elif verbosity == 2:
from lettuce.plugins import scenario_names as output
else:
if verbosity == 4:
from lettuce.plugins import colored_shell_output as output
msg = ('Deprecated in lettuce 2.2.21. Use verbosity 3 without '
'--no-color flag instead of verbosity 4')
warnings.warn(msg, DeprecationWarning)
elif verbosity == 3:
if no_color:
from lettuce.plugins import shell_output as output
else:
from lettuce.plugins import colored_shell_output as output
self.random = random
if enable_xunit:
xunit_output.enable(filename=xunit_filename)
if smtp_queue:
smtp_mail_queue.enable()
if enable_subunit:
subunit_output.enable(filename=subunit_filename)
reload(output)
self.output = output
def run(self):
""" Find and load step definitions, and them find and load
features under `base_path` specified on constructor
"""
results = []
if self.single_feature:
features_files = [self.single_feature]
else:
features_files = self.loader.find_feature_files()
if self.random:
random.shuffle(features_files)
if not features_files:
self.output.print_no_features_found(self.loader.base_dir)
return
# only load steps if we've located some features.
# this prevents stupid bugs when loading django modules
# that we don't even want to test.
try:
self.loader.find_and_load_step_definitions()
except StepLoadingError as e:
print("Error loading step definitions:\n", e)
return
call_hook('before', 'all')
failed = False
try:
for filename in features_files:
feature = Feature.from_file(filename)
results.append(
feature.run(self.scenarios,
tags=self.tags,
random=self.random,
failfast=self.failfast))
except exceptions.LettuceSyntaxError as e:
sys.stderr.write(e.msg)
failed = True
except exceptions.NoDefinitionFound as e:
sys.stderr.write(e.msg)
failed = True
except:
if not self.failfast:
e = sys.exc_info()[1]
print("Died with %s" % str(e))
traceback.print_exc()
else:
print()
print ("Lettuce aborted running any more tests "
"because was called with the `--failfast` option")
failed = True
finally:
total = TotalResult(results)
total.output_format()
call_hook('after', 'all', total)
if failed:
raise SystemExit(2)
return total
|
gpl-3.0
| -6,675,595,172,369,562,000
| 30.469767
| 83
| 0.604936
| false
| 4.143295
| false
| false
| false
|
sam-roth/Keypad
|
keypad/plugins/shell/bourne_model.py
|
1
|
4068
|
import subprocess
import shlex
from keypad.api import (Plugin,
register_plugin,
Filetype,
Cursor)
from keypad.abstract.code import IndentRetainingCodeModel, AbstractCompletionResults
from keypad.core.syntaxlib import SyntaxHighlighter, lazy
from keypad.core.processmgr.client import AsyncServerProxy
from keypad.core.fuzzy import FuzzyMatcher
from keypad.core.executors import future_wrap
from keypad.core.attributed_string import AttributedString
@lazy
def lexer():
from . import bourne_lexer
return bourne_lexer.Shell
class GetManPage:
def __init__(self, cmd):
self.cmd = cmd
def __call__(self, ns):
with subprocess.Popen(['man', self.cmd], stdout=subprocess.PIPE) as proc:
out, _ = proc.communicate()
import re
return [re.subn('.\x08', '', out.decode())[0]]
class ShellCompletionResults(AbstractCompletionResults):
def __init__(self, token_start, results, prox):
'''
token_start - the (line, col) position at which the token being completed starts
'''
super().__init__(token_start)
self.results = [(AttributedString(x.decode()),) for x in results]
self._prox = prox
def doc_async(self, index):
'''
Return a Future for the documentation for a given completion result as a list of
AttributedString.
'''
return self._prox.submit(GetManPage(self.text(index)))
@property
def rows(self):
'''
Return a list of tuples of AttributedString containing the contents of
each column for each row in the completion results.
'''
return self._filtered.rows
def text(self, index):
'''
Return the text that should be inserted for the given completion.
'''
return self._filtered.rows[index][0].text
def filter(self, text=''):
'''
Filter the completion results using the given text.
'''
self._filtered = FuzzyMatcher(text).filter(self.results, key=lambda x: x[0].text)
self._filtered.sort(lambda item: len(item[0].text))
def dispose(self):
pass
class GetPathItems:
def __init__(self, prefix):
self.prefix = prefix
def __call__(self, ns):
with subprocess.Popen(['bash',
'-c',
'compgen -c ' + shlex.quote(self.prefix)],
stdout=subprocess.PIPE) as proc:
out, _ = proc.communicate()
return [l.strip() for l in out.splitlines()]
class BourneCodeModel(IndentRetainingCodeModel):
completion_triggers = []
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self._prox = AsyncServerProxy()
self._prox.start()
def dispose(self):
self._prox.shutdown()
super().dispose()
def highlight(self):
'''
Rehighlight the buffer.
'''
highlighter = SyntaxHighlighter(
'keypad.plugins.shell.syntax',
lexer(),
dict(lexcat=None)
)
highlighter.highlight_buffer(self.buffer)
def completions_async(self, pos):
'''
Return a future to the completions available at the given position in the document.
Raise NotImplementedError if not implemented.
'''
c = Cursor(self.buffer).move(pos)
text_to_pos = c.line.text[:c.x]
for x, ch in reversed(list(enumerate(text_to_pos))):
if ch.isspace():
x += 1
break
else:
x = 0
print('text_to_pos', text_to_pos[x:], pos)
return self._prox.submit(GetPathItems(text_to_pos[x:]),
transform=lambda r: ShellCompletionResults((pos[0], x), r,
self._prox))
|
gpl-3.0
| -4,100,649,501,340,423,000
| 26.863014
| 91
| 0.555556
| false
| 4.360129
| false
| false
| false
|
dmvieira/P.O.D.
|
func.py
|
1
|
5799
|
from mergesort import *
def comeca(sequencia,entrada,entrada2,entrada3):
div=open(entrada3,'w')
t=open(entrada,'r')
saida=open(entrada2,'w')
x=t.readlines()
if (x[-1][-1])<>'\n':
comp=x[-1][-1]
comp=comp+'\n'
x.insert(-1,comp)
comp=x[-1]
comp=comp+'\n'
del(x[-1])
x.insert(-1,comp)
del(x[-1])
l=[]
b=0
t.close()
if sequencia=='r':
for j in range(0,len(x)):
k=len(x[j])
if x[j][0]=='>':
if b==1:
l.append(c)
l.append(x[j][:k-1])
c=""
b=1
else:
y=""
for i in range(0,k-1):
if x[j][i] == 'a' or x[j][i] == 'A' or x[j][i] == 'c' or x[j][i] == 'C' or x[j][i] == 'g' or x[j][i] == 'G' or x[j][i] == 'u' or x[j][i] == 'U' or x[j][i] == 'r' or x[j][i] == 'R' or x[j][i] == 'y' or x[j][i] == 'Y' or x[j][i] == 'k' or x[j][i] == 'K' or x[j][i] == 'm' or x[j][i] == 'M' or x[j][i] == 's' or x[j][i] == 'S' or x[j][i] == 'w' or x[j][i] == 'W' or x[j][i] == 'b' or x[j][i] == 'B' or x[j][i] == 'd' or x[j][i] == 'D' or x[j][i] == 'h' or x[j][i] == 'H' or x[j][i] == 'v' or x[j][i] == 'V' or x[j][i] == 'n' or x[j][i] == 'N':
y=y+x[j][i]
c=c+y
l.append(c)
elif sequencia=='p':
for j in range(0,len(x)):
k=len(x[j])
if x[j][0]=='>':
if b==1:
l.append(c)
l.append(x[j][:k-1])
c=""
b=1
else:
y=""
for i in range(0,k-1):
if x[j][i] == 'a' or x[j][i] == 'A' or x[j][i] == 'c' or x[j][i] == 'C' or x[j][i] == 'g' or x[j][i] == 'G' or x[j][i] == 'v' or x[j][i] == 'V' or x[j][i] == 'L' or x[j][i] == 'l' or x[j][i] == 'I' or x[j][i] == 'i' or x[j][i] == 'S' or x[j][i] == 's' or x[j][i] == 'T' or x[j][i] == 't' or x[j][i] == 'Y' or x[j][i] == 'y' or x[j][i] == 'M' or x[j][i] == 'm' or x[j][i] == 'd' or x[j][i] == 'D' or x[j][i] == 'n' or x[j][i] == 'N' or x[j][i] == 'E' or x[j][i] == 'e' or x[j][i] == 'Q' or x[j][i] == 'q' or x[j][i] == 'R' or x[j][i] == 'r' or x[j][i] == 'K' or x[j][i] == 'k' or x[j][i] == 'H' or x[j][i] == 'h' or x[j][i] == 'F' or x[j][i] == 'f' or x[j][i] == 'W' or x[j][i] == 'w' or x[j][i] == 'P' or x[j][i] == 'p' or x[j][i] == 'b' or x[j][i] == 'B' or x[j][i] == 'z' or x[j][i] == 'Z' or x[j][i] == 'x' or x[j][i] == 'X' or x[j][i] == 'u' or x[j][i] == 'U':
y=y+x[j][i]
c=c+y
l.append(c)
else:
for j in range(0,len(x)):
k=len(x[j])
if x[j][0]=='>':
if b==1:
l.append(c)
l.append(x[j][:k-1])
c=""
b=1
else:
y=""
for i in range(0,k-1):
if x[j][i] == 'a' or x[j][i] == 'A' or x[j][i] == 'c' or x[j][i] == 'C' or x[j][i] == 'g' or x[j][i] == 'G' or x[j][i] == 't' or x[j][i] == 'T' or x[j][i] == 'r' or x[j][i] == 'R' or x[j][i] == 'y' or x[j][i] == 'Y' or x[j][i] == 'k' or x[j][i] == 'K' or x[j][i] == 'm' or x[j][i] == 'M' or x[j][i] == 's' or x[j][i] == 'S' or x[j][i] == 'w' or x[j][i] == 'W' or x[j][i] == 'b' or x[j][i] == 'B' or x[j][i] == 'd' or x[j][i] == 'D' or x[j][i] == 'h' or x[j][i] == 'H' or x[j][i] == 'v' or x[j][i] == 'V' or x[j][i] == 'n' or x[j][i] == 'N':
y=y+x[j][i]
c=c+y
l.append(c)
dec,dic={},{}
for j in range(0,len(l),2):
alta=(l[j+1]).upper()
del(l[j+1])
l.insert(j+1,alta)
if (dic.has_key((l[j+1][::-1])))==True:
del(l[j+1])
l.insert((j+1),alta[::-1])
d={l[j]:l[j+1]}
dec.update(d)
d={l[j+1]:l[j]}
dic.update(d)
vou=dic.keys()
v=dec.values()
diversidade=[]
dic={}
for j in range(0,len(l),2):
alta=(l[j+1])
divo=(len(alta))/65
if divo > 0:
alta2=''
for h in range(1,divo+1):
alta2=alta2+alta[(65*(h-1)):(65*h)]+'\n'
alta=alta2+alta[65*divo:]
del(l[j+1])
l.insert(j+1,alta)
d= {alta:l[j]}
dic.update(d)
key=dic.keys()
value=dic.values()
for j in range(len(key)):
saida.write(value[j]+'\n'+key[j]+'\n')
diversidade.append((v.count(vou[j])))
saida.close()
ordena(diversidade, value, key, div)
div.close()
|
gpl-3.0
| -1,694,803,398,801,581,800
| 52.196262
| 904
| 0.272116
| false
| 2.777299
| false
| false
| false
|
cdriehuys/chmvh-website
|
chmvh_website/contact/forms.py
|
1
|
2333
|
import logging
from smtplib import SMTPException
from captcha.fields import ReCaptchaField
from django import forms
from django.conf import settings
from django.core import mail
from django.template import loader
logger = logging.getLogger("chmvh_website.{0}".format(__name__))
class ContactForm(forms.Form):
captcha = ReCaptchaField()
name = forms.CharField()
email = forms.EmailField()
message = forms.CharField(widget=forms.Textarea(attrs={"rows": 5}))
street_address = forms.CharField(required=False)
city = forms.CharField(required=False)
zipcode = forms.CharField(required=False)
template = loader.get_template("contact/email/message.txt")
def clean_city(self):
"""
If no city was provided, use a default string.
"""
if not self.cleaned_data["city"]:
return "<No City Given>"
return self.cleaned_data["city"]
def send_email(self):
assert self.is_valid(), self.errors
subject = "[CHMVH Website] Message from {}".format(
self.cleaned_data["name"]
)
address_line_2_parts = [self.cleaned_data["city"], "North Carolina"]
if self.cleaned_data["zipcode"]:
address_line_2_parts.append(self.cleaned_data["zipcode"])
address_line_1 = self.cleaned_data["street_address"]
address_line_2 = ", ".join(address_line_2_parts)
address = ""
if address_line_1:
address = "\n".join([address_line_1, address_line_2])
context = {
"name": self.cleaned_data["name"],
"email": self.cleaned_data["email"],
"message": self.cleaned_data["message"],
"address": address,
}
logger.debug("Preparing to send email")
try:
emails_sent = mail.send_mail(
subject,
self.template.render(context),
settings.DEFAULT_FROM_EMAIL,
["info@chapelhillvet.com"],
)
logger.info(
"Succesfully sent email from {0}".format(
self.cleaned_data["email"]
)
)
except SMTPException as e:
emails_sent = 0
logger.exception("Failed to send email.", exc_info=e)
return emails_sent == 1
|
mit
| 3,360,558,135,283,314,700
| 28.1625
| 76
| 0.582512
| false
| 4.078671
| false
| false
| false
|
gdsfactory/gdsfactory
|
pp/components/coupler.py
|
1
|
2755
|
import pp
from pp.component import Component
from pp.components.coupler_straight import coupler_straight
from pp.components.coupler_symmetric import coupler_symmetric
from pp.cross_section import get_waveguide_settings
from pp.snap import assert_on_1nm_grid
from pp.types import ComponentFactory
@pp.cell_with_validator
def coupler(
gap: float = 0.236,
length: float = 20.0,
coupler_symmetric_factory: ComponentFactory = coupler_symmetric,
coupler_straight_factory: ComponentFactory = coupler_straight,
dy: float = 5.0,
dx: float = 10.0,
waveguide: str = "strip",
**kwargs
) -> Component:
r"""Symmetric coupler.
Args:
gap: between straights
length: of coupling region
coupler_symmetric_factory
coupler_straight_factory
dy: port to port vertical spacing
dx: length of bend in x direction
waveguide: from tech.waveguide
kwargs: overwrites waveguide_settings
.. code::
dx dx
|------| |------|
W1 ________ _______E1
\ / |
\ length / |
======================= gap | dy
/ \ |
________/ \_______ |
W0 E0
coupler_straight_factory coupler_symmetric_factory
"""
assert_on_1nm_grid(length)
assert_on_1nm_grid(gap)
c = Component()
waveguide_settings = get_waveguide_settings(waveguide, **kwargs)
sbend = coupler_symmetric_factory(gap=gap, dy=dy, dx=dx, **waveguide_settings)
sr = c << sbend
sl = c << sbend
cs = c << coupler_straight_factory(length=length, gap=gap, **waveguide_settings)
sl.connect("W1", destination=cs.ports["W0"])
sr.connect("W0", destination=cs.ports["E0"])
c.add_port("W1", port=sl.ports["E0"])
c.add_port("W0", port=sl.ports["E1"])
c.add_port("E0", port=sr.ports["E0"])
c.add_port("E1", port=sr.ports["E1"])
c.absorb(sl)
c.absorb(sr)
c.absorb(cs)
c.length = sbend.length
c.min_bend_radius = sbend.min_bend_radius
return c
if __name__ == "__main__":
# c = pp.Component()
# cp1 = c << coupler(gap=0.2)
# cp2 = c << coupler(gap=0.5)
# cp1.ymin = 0
# cp2.ymin = 0
# c = coupler(gap=0.2, waveguide="nitride")
# c = coupler(width=0.9, length=1, dy=2, gap=0.2)
# print(c.settings_changed)
c = coupler(gap=0.2, waveguide="nitride")
# c = coupler(gap=0.2, waveguide="strip_heater")
c.show()
|
mit
| 505,920,847,848,893,060
| 29.955056
| 84
| 0.52559
| false
| 3.44806
| false
| false
| false
|
Murali-group/GraphSpace
|
applications/uniprot/models.py
|
1
|
1246
|
from __future__ import unicode_literals
from sqlalchemy import ForeignKeyConstraint, text
from applications.users.models import *
from django.conf import settings
from graphspace.mixins import *
Base = settings.BASE
# ================== Table Definitions =================== #
class UniprotAlias(IDMixin, TimeStampMixin, Base):
__tablename__ = 'uniprot_alias'
accession_number = Column(String, nullable=False)
alias_source = Column(String, nullable=False)
alias_name = Column(String, nullable=False)
constraints = (
UniqueConstraint('accession_number', 'alias_source', 'alias_name', name='_uniprot_alias_uc_accession_number_alias_source_alias_name'),
)
indices = (
Index('uniprot_alias_idx_accession_number', text("accession_number gin_trgm_ops"), postgresql_using="gin"),
Index('uniprot_alias_idx_alias_name', text("alias_name gin_trgm_ops"), postgresql_using="gin"),
)
@declared_attr
def __table_args__(cls):
args = cls.constraints + cls.indices
return args
def serialize(cls, **kwargs):
return {
# 'id': cls.id,
'id': cls.accession_number,
'alias_source': cls.alias_source,
'alias_name': cls.alias_name,
'created_at': cls.created_at.isoformat(),
'updated_at': cls.updated_at.isoformat()
}
|
gpl-2.0
| 1,488,446,659,459,923,500
| 27.976744
| 136
| 0.695024
| false
| 3.178571
| false
| false
| false
|
rohithredd94/Computer-Vision-using-OpenCV
|
Particle-Filter-Tracking/PF_Tracker.py
|
1
|
4110
|
import cv2
import numpy as mp
from similarity import *
from hist import *
class PF_Tracker:
def __init__(self, model, search_space, num_particles=100, state_dims=2,
control_std=10, sim_std=20, alpha=0.0):
self.model = model
self.search_space = search_space[::-1]
self.num_particles = num_particles
self.state_dims = state_dims
self.control_std = control_std
self.sim_std = sim_std
self.alpha = alpha
#Initialize particles using a uniform distribution
self.particles = np.array([np.random.uniform(0, self.search_space[i],self.num_particles) for i in range(self.state_dims)]).T
self.weights = np.ones(len(self.particles)) / len(self.particles)
self.idxs = np.arange(num_particles)
self.estimate_state()
def update(self, frame):
self.displace()
self.observe(frame)
self.resample()
self.estimate_state()
if self.alpha > 0:
self.update_model(frame)
def displace(self):
#Displace particles using a normal distribution centered around 0
self.particles += np.random.normal(0, self.control_std,
self.particles.shape)
def observe(self, img):
#Get patches corresponding to each particle
mh, mw = self.model.shape[:2]
minx = (self.particles[:,0] - mw/2).astype(np.int)
miny = (self.particles[:,1] - mh/2).astype(np.int)
candidates = [img[miny[i]:miny[i]+mh, minx[i]:minx[i]+mw]
for i in range(self.num_particles)]
#Compute importance weight - similarity of each patch to the model
self.weights = np.array([similarity(cand, self.model, self.sim_std) for cand in candidates])
self.weights /= np.sum(self.weights)
def resample(self):
sw, sh = self.search_space[:2]
mh, mw = self.model.shape[:2]
j = np.random.choice(self.idxs, self.num_particles, True,
p=self.weights.T) #Sample new particle indices using the distribution of the weights
control = np.random.normal(0, self.control_std, self.particles.shape) #Get a random control input from a normal distribution
self.particles = np.array(self.particles[j])
self.particles[:,0] = np.clip(self.particles[:,0], 0, sw - 1)
self.particles[:,1] = np.clip(self.particles[:,1], 0, sh - 1)
def estimate_state(self):
state_idx = np.random.choice(self.idxs, 1, p=self.weights)
self.state = self.particles[state_idx][0]
def update_model(self, frame):
#Get current model based on belief
mh, mw = self.model.shape[:2]
minx = int(self.state[0] - mw/2)
miny = int(self.state[1] - mh/2)
best_model = frame[miny:miny+mh, minx:minx+mw]
#Apply appearance model update if new model shape is unchanged
if best_model.shape == self.model.shape:
self.model = self.alpha * best_model + (1-self.alpha) * self.model
self.model = self.model.astype(np.uint8)
def visualize_filter(self, img):
self.draw_particles(img)
self.draw_window(img)
self.draw_std(img)
def draw_particles(self, img):
for p in self.particles:
cv2.circle(img, tuple(p.astype(int)), 2, (180,255,0), -1)
def draw_window(self, img):
best_idx = cv2.minMaxLoc(self.weights)[3][1]
best_state = self.particles[best_idx]
pt1 = (best_state - np.array(self.model.shape[::-1])/2).astype(np.int)
pt2 = pt1 + np.array(self.model.shape[::-1])
cv2.rectangle(img, tuple(pt1), tuple(pt2), (0,255,0), 2)
def draw_std(self, img):
weighted_sum = 0
dist = np.linalg.norm(self.particles - self.state)
weighted_sum = np.sum(dist * self.weights.reshape((-1,1)))
cv2.circle(img, tuple(self.state.astype(np.int)),
int(weighted_sum), (255,255,255), 1)
|
mit
| -8,084,760,379,983,337,000
| 37.92233
| 132
| 0.584672
| false
| 3.506826
| false
| false
| false
|
dc3-plaso/plaso
|
tests/storage/fake_storage.py
|
1
|
6205
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the fake storage."""
import unittest
from plaso.containers import errors
from plaso.containers import event_sources
from plaso.containers import reports
from plaso.containers import sessions
from plaso.containers import tasks
from plaso.lib import definitions
from plaso.storage import fake_storage
from plaso.storage import zip_file
from tests import test_lib as shared_test_lib
from tests.storage import test_lib
class FakeStorageWriterTest(test_lib.StorageTestCase):
"""Tests for the fake storage writer object."""
def testAddAnalysisReport(self):
"""Tests the AddAnalysisReport function."""
session = sessions.Session()
analysis_report = reports.AnalysisReport(
plugin_name=u'test', text=u'test report')
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.AddAnalysisReport(analysis_report)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddAnalysisReport(analysis_report)
def testAddError(self):
"""Tests the AddError function."""
session = sessions.Session()
extraction_error = errors.ExtractionError(
message=u'Test extraction error')
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.AddError(extraction_error)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddError(extraction_error)
def testAddEvent(self):
"""Tests the AddEvent function."""
session = sessions.Session()
test_events = self._CreateTestEvents()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
event = None
for event in test_events:
storage_writer.AddEvent(event)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddEvent(event)
def testAddEventSource(self):
"""Tests the AddEventSource function."""
session = sessions.Session()
event_source = event_sources.EventSource()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.AddEventSource(event_source)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddEventSource(event_source)
def testAddEventTag(self):
"""Tests the AddEventTag function."""
session = sessions.Session()
test_events = self._CreateTestEvents()
event_tags = self._CreateTestEventTags()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
for event in test_events:
storage_writer.AddEvent(event)
event_tag = None
for event_tag in event_tags:
storage_writer.AddEventTag(event_tag)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddEventTag(event_tag)
def testOpenClose(self):
"""Tests the Open and Close functions."""
session = sessions.Session()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.Close()
storage_writer.Open()
storage_writer.Close()
storage_writer = fake_storage.FakeStorageWriter(
session, storage_type=definitions.STORAGE_TYPE_TASK)
storage_writer.Open()
storage_writer.Close()
storage_writer.Open()
with self.assertRaises(IOError):
storage_writer.Open()
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.Close()
# TODO: add test for GetEvents.
# TODO: add test for GetFirstWrittenEventSource and
# GetNextWrittenEventSource.
@shared_test_lib.skipUnlessHasTestFile([u'psort_test.json.plaso'])
@shared_test_lib.skipUnlessHasTestFile([u'pinfo_test.json.plaso'])
def testMergeFromStorage(self):
"""Tests the MergeFromStorage function."""
session = sessions.Session()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
test_file = self._GetTestFilePath([u'psort_test.json.plaso'])
storage_reader = zip_file.ZIPStorageFileReader(test_file)
storage_writer.MergeFromStorage(storage_reader)
test_file = self._GetTestFilePath([u'pinfo_test.json.plaso'])
storage_reader = zip_file.ZIPStorageFileReader(test_file)
storage_writer.MergeFromStorage(storage_reader)
storage_writer.Close()
# TODO: add test for GetNextEventSource.
def testWriteSessionStartAndCompletion(self):
"""Tests the WriteSessionStart and WriteSessionCompletion functions."""
session = sessions.Session()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.WriteSessionStart()
storage_writer.WriteSessionCompletion()
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.WriteSessionStart()
with self.assertRaises(IOError):
storage_writer.WriteSessionCompletion()
storage_writer = fake_storage.FakeStorageWriter(
session, storage_type=definitions.STORAGE_TYPE_TASK)
storage_writer.Open()
with self.assertRaises(IOError):
storage_writer.WriteSessionStart()
with self.assertRaises(IOError):
storage_writer.WriteSessionCompletion()
storage_writer.Close()
def testWriteTaskStartAndCompletion(self):
"""Tests the WriteTaskStart and WriteTaskCompletion functions."""
session = sessions.Session()
task = tasks.Task(session_identifier=session.identifier)
storage_writer = fake_storage.FakeStorageWriter(
session, storage_type=definitions.STORAGE_TYPE_TASK, task=task)
storage_writer.Open()
storage_writer.WriteTaskStart()
storage_writer.WriteTaskCompletion()
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.WriteTaskStart()
with self.assertRaises(IOError):
storage_writer.WriteTaskCompletion()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
with self.assertRaises(IOError):
storage_writer.WriteTaskStart()
with self.assertRaises(IOError):
storage_writer.WriteTaskCompletion()
storage_writer.Close()
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| -6,102,286,598,320,775,000
| 27.204545
| 75
| 0.72361
| false
| 4.026606
| true
| false
| false
|
ella/django-ratings
|
django_ratings/aggregation.py
|
1
|
1768
|
"""
This file is for aggregation records from Rating,Agg tables to Agg and TotalRate table
"""
import logging
from datetime import datetime, timedelta
from django_ratings.models import Rating, Agg, TotalRate
logger = logging.getLogger('django_ratings')
# aggregate ratings older than 2 years by year
DELTA_TIME_YEAR = 2*365*24*60*60
# ratings older than 2 months by month
DELTA_TIME_MONTH = 2*30*24*60*60
# rest of the ratings (last 2 months) aggregate daily
DELTA_TIME_DAY = -24*60*60
TIMES_ALL = {DELTA_TIME_YEAR : 'year', DELTA_TIME_MONTH : 'month', DELTA_TIME_DAY : 'day'}
def transfer_agg_to_totalrate():
"""
Transfer aggregation data from table Agg to table TotalRate
"""
logger.info("transfer_agg_to_totalrate BEGIN")
if TotalRate.objects.count() != 0:
TotalRate.objects.all().delete()
Agg.objects.agg_to_totalrate()
logger.info("transfer_agg_to_totalrate END")
def transfer_agg_to_agg():
"""
aggregation data from table Agg to table Agg
"""
logger.info("transfer_agg_to_agg BEGIN")
timenow = datetime.now()
for t in TIMES_ALL:
TIME_DELTA = t
time_agg = timenow - timedelta(seconds=TIME_DELTA)
Agg.objects.move_agg_to_agg(time_agg, TIMES_ALL[t])
Agg.objects.agg_assume()
logger.info("transfer_agg_to_agg END")
def transfer_data():
"""
transfer data from table Rating to table Agg
"""
logger.info("transfer_data BEGIN")
timenow = datetime.now()
for t in sorted(TIMES_ALL.keys(), reverse=True):
TIME_DELTA = t
time_agg = timenow - timedelta(seconds=TIME_DELTA)
Rating.objects.move_rate_to_agg(time_agg, TIMES_ALL[t])
transfer_agg_to_agg()
transfer_agg_to_totalrate()
logger.info("transfer_data END")
|
bsd-3-clause
| 4,587,567,461,580,501,500
| 28.966102
| 90
| 0.675339
| false
| 3.292365
| false
| false
| false
|
natcoin/natcoin
|
contrib/bitrpc/bitrpc.py
|
1
|
7836
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Natcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Natcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
mit
| 1,866,030,577,551,255,300
| 23.185185
| 79
| 0.66169
| false
| 3.052591
| false
| false
| false
|
javipalanca/ojoalplato
|
ojoalplato/users/models.py
|
1
|
1358
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.contrib.auth.models import AbstractUser
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
USER_STATUS_CHOICES = (
(0, "active"),
)
@python_2_unicode_compatible
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = models.CharField(_("Name of User"), blank=True, max_length=255)
login = models.CharField(max_length=60, default="")
url = models.URLField(max_length=100, blank=True)
activation_key = models.CharField(max_length=60, default="0")
status = models.IntegerField(default=0, choices=USER_STATUS_CHOICES)
def __str__(self):
return self.username
def get_absolute_url(self):
return reverse('users:detail', kwargs={'username': self.username})
class UserMeta(models.Model):
"""
Meta information about a user.
"""
id = models.IntegerField(primary_key=True)
user = models.ForeignKey(User, related_name="meta", blank=True, null=True)
key = models.CharField(max_length=255)
value = models.TextField()
def __unicode__(self):
return u"%s: %s" % (self.key, self.value)
|
mit
| 3,922,233,845,667,998,000
| 30.581395
| 78
| 0.694404
| false
| 3.720548
| false
| false
| false
|
dwitvliet/CATMAID
|
django/applications/catmaid/control/link.py
|
1
|
6452
|
import json
from django.http import HttpResponse
from django.core.exceptions import ObjectDoesNotExist
from catmaid.models import UserRole, Project, Relation, Treenode, Connector, \
TreenodeConnector, ClassInstance
from catmaid.control.authentication import requires_user_role, can_edit_or_fail
@requires_user_role(UserRole.Annotate)
def create_link(request, project_id=None):
""" Create a link, currently only a presynaptic_to or postsynaptic_to relationship
between a treenode and a connector.
"""
from_id = int(request.POST.get('from_id', 0))
to_id = int(request.POST.get('to_id', 0))
link_type = request.POST.get('link_type', 'none')
try:
project = Project.objects.get(id=project_id)
relation = Relation.objects.get(project=project, relation_name=link_type)
from_treenode = Treenode.objects.get(id=from_id)
to_connector = Connector.objects.get(id=to_id, project=project)
links = TreenodeConnector.objects.filter(
connector=to_id,
treenode=from_id,
relation=relation.id)
except ObjectDoesNotExist as e:
return HttpResponse(json.dumps({'error': e.message}))
if links.count() > 0:
return HttpResponse(json.dumps({'error': "A relation '%s' between these two elements already exists!" % link_type}))
related_skeleton_count = ClassInstance.objects.filter(project=project, id=from_treenode.skeleton.id).count()
if related_skeleton_count > 1:
# Can never happen. What motivated this check for an error of this kind? Would imply that a treenode belongs to more than one skeleton, which was possible when skeletons owned treendoes via element_of relations rather than by the skeleton_id column.
return HttpResponse(json.dumps({'error': 'Multiple rows for treenode with ID #%s found' % from_id}))
elif related_skeleton_count == 0:
return HttpResponse(json.dumps({'error': 'Failed to retrieve skeleton id of treenode #%s' % from_id}))
if link_type == 'presynaptic_to':
# Enforce only one presynaptic link
presyn_links = TreenodeConnector.objects.filter(project=project, connector=to_connector, relation=relation)
if (presyn_links.count() != 0):
return HttpResponse(json.dumps({'error': 'Connector %s does not have zero presynaptic connections.' % to_id}))
# The object returned in case of success
result = {}
if link_type == 'postsynaptic_to':
# Warn if there is already a link from the source skeleton to the
# target skeleton. This can happen and is not necessarely wrong, but
# worth to double check, because it is likely a mistake.
post_links_to_skeleton = TreenodeConnector.objects.filter(project=project,
connector=to_connector, relation=relation, skeleton_id=from_treenode.skeleton_id).count()
if post_links_to_skeleton == 1:
result['warning'] = 'There is already one post-synaptic ' \
'connection to the target skeleton'
elif post_links_to_skeleton > 1:
result['warning'] = 'There are already %s post-synaptic ' \
'connections to the target skeleton' % post_links_to_skeleton
# Enforce only synaptic links
gapjunction_links = TreenodeConnector.objects.filter(project=project, connector=to_connector,
relation__relation_name='gapjunction_with')
if (gapjunction_links.count() != 0):
return HttpResponse(json.dumps({'error': 'Connector %s cannot have both a gap junction and a postsynaptic node.' % to_id}))
if link_type == 'gapjunction_with':
# Enforce only two gap junction links
gapjunction_links = TreenodeConnector.objects.filter(project=project, connector=to_connector, relation=relation)
synapse_links = TreenodeConnector.objects.filter(project=project, connector=to_connector, relation__relation_name__endswith='synaptic_to')
if (gapjunction_links.count() > 1):
return HttpResponse(json.dumps({'error': 'Connector %s can only have two gap junction connections.' % to_id}))
if (synapse_links.count() != 0):
return HttpResponse(json.dumps({'error': 'Connector %s is part of a synapse, and gap junction can not be added.' % to_id}))
# Enforce same relations across all linked connectors; only new postsynaptic links are valid
if any([to_connector.children.exists(), to_connector.parent]) and link_type != 'postsynaptic_to':
return HttpResponse(json.dumps({'error': 'Cannot add %s connection to a linked connector.' % link_type}))
TreenodeConnector(
user=request.user,
project=project,
relation=relation,
treenode=from_treenode, # treenode_id = from_id
skeleton=from_treenode.skeleton, # treenode.skeleton_id where treenode.id = from_id
connector=to_connector # connector_id = to_id
).save()
result['message'] = 'success'
return HttpResponse(json.dumps(result), content_type='application/json')
@requires_user_role(UserRole.Annotate)
def delete_link(request, project_id=None):
connector_id = int(request.POST.get('connector_id', 0))
treenode_id = int(request.POST.get('treenode_id', 0))
links = TreenodeConnector.objects.filter(
connector=connector_id,
treenode=treenode_id)
if links.count() == 0:
return HttpResponse(json.dumps({'error': 'Failed to delete connector #%s from geometry domain.' % connector_id}))
# Enforce same relations across all linked connectors; only removal of postsynaptic links are valid
try:
to_connector = Connector.objects.get(id=connector_id, project=project_id)
link_type = links[0].relation.relation_name
except ObjectDoesNotExist as e:
return HttpResponse(json.dumps({'error': e.message}))
if any([to_connector.children.exists(), to_connector.parent]) and link_type != 'postsynaptic_to':
return HttpResponse(json.dumps({'error': 'Cannot remove %s connection to a linked connector.' % link_type}))
# Could be done by filtering above when obtaining the links,
# but then one cannot distinguish between the link not existing
# and the user_id not matching or not being superuser.
can_edit_or_fail(request.user, links[0].id, 'treenode_connector')
links[0].delete()
return HttpResponse(json.dumps({'result': 'Removed treenode to connector link'}))
|
gpl-3.0
| -8,270,022,877,487,769,000
| 51.032258
| 257
| 0.688314
| false
| 3.83591
| false
| false
| false
|
manankalra/Twitter-Sentiment-Analysis
|
main/sentiment/tweepy_demo/tweep.py
|
1
|
1099
|
#!/usr/bin/env python
"""
tweepy(Twitter API) demo
"""
__author__ = "Manan Kalra"
__email__ = "manankalr29@gmail.com"
from tweepy import Stream, OAuthHandler
from tweepy.streaming import StreamListener
import time
# Add your own
consumer_key = ""
consumer_secret = ""
access_token = ""
access_token_secret = ""
class listener(StreamListener):
def on_data(self, raw_data):
try:
# print(raw_data)
tweet = raw_data.split(",\"text\":")[1].split(",\"source\"")[0]
print(tweet)
save_time = str(time.time()) + "::" + tweet
save_file = open('tweetDB.csv', 'a')
save_file.write(save_time)
save_file.write("\n")
save_file.close()
return True
except BaseException:
print("Failed")
def on_error(self, status_code):
print(status_code)
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
twitterStream = Stream(auth, listener())
twitterStream.filter(track=["<anything: noun/verb/adverb/...>"])
|
mit
| 3,081,403,557,936,290,300
| 23.422222
| 75
| 0.605096
| false
| 3.423676
| false
| false
| false
|
Bladefidz/wfuzz
|
plugins/iterations.py
|
1
|
2703
|
from externals.moduleman.plugin import moduleman_plugin
import itertools
class piterator_void:
text="void"
def count(self):
return self.__count
def __init__(self, *i):
self._dic = i
self.__count = max(map(lambda x:x.count(), i))
self.it = self._dic[0]
def next(self):
return (self.it.next(),)
def restart(self):
for dic in self._dic:
dic.restart()
self.it = self._dic[0]
def __iter__(self):
self.restart()
return self
@moduleman_plugin("restart", "count", "next", "__iter__")
class zip:
name = "zip"
description = "Returns an iterator that aggregates elements from each of the iterables."
category = ["default"]
priority = 99
def __init__(self, *i):
self._dic = i
self.it = itertools.izip(*self._dic)
self.__count = min(map(lambda x:x.count(), i)) # Only possible match counted.
def count(self):
return self.__count
def restart(self):
for dic in self._dic:
dic.restart()
self.it = itertools.izip.__init__(self, *self._dic)
def next(self):
return self.it.next()
def __iter__(self):
self.restart()
return self
@moduleman_plugin("restart", "count", "next", "__iter__")
class product:
name = "product"
description = "Returns an iterator cartesian product of input iterables."
category = ["default"]
priority = 99
def __init__(self, *i):
self._dic = i
self.it = itertools.product(*self._dic)
self.__count = reduce(lambda x,y:x*y.count(), i[1:], i[0].count())
def restart(self):
for dic in self._dic:
dic.restart()
self.it = itertools.product(*self._dic)
def count(self):
return self.__count
def next(self):
return self.it.next()
def __iter__(self):
self.restart()
return self
@moduleman_plugin("restart", "count", "next", "__iter__")
class chain:
name = "chain"
description = "Returns an iterator returns elements from the first iterable until it is exhausted, then proceeds to the next iterable, until all of the iterables are exhausted."
category = ["default"]
priority = 99
def count(self):
return self.__count
def __init__(self, *i):
self.__count = sum(map(lambda x:x.count(), i))
self._dic = i
self.it = itertools.chain(*i)
def restart(self):
for dic in self._dic:
dic.restart()
self.it = itertools.chain(*self._dic)
def next(self):
return (self.it.next(),)
def __iter__(self):
self.restart()
return self
|
gpl-2.0
| 2,271,427,541,823,712,800
| 24.261682
| 181
| 0.564928
| false
| 3.785714
| false
| false
| false
|
berkmancenter/mediacloud
|
apps/common/src/python/mediawords/db/locks.py
|
1
|
3477
|
"""Constants and routines for handling advisory postgres locks."""
import mediawords.db
from mediawords.util.log import create_logger
from mediawords.util.perl import decode_object_from_bytes_if_needed
log = create_logger(__name__)
"""
This package just has constants that can be passed to the first value of the postgres pg_advisory_*lock functions.
If you are using an advisory lock, you should use the two key version and use a constant from this package to
avoid conflicts.
"""
# locks to make sure we are not mining or snapshotting a topic in more than one process at a time
LOCK_TYPES = {
'test-a': 10,
'test-b': 11,
'MediaWords::Job::TM::MineTopic': 12,
'MediaWords::Job::TM::SnapshotTopic': 13,
'MediaWords::TM::Media::media_normalized_urls': 14,
'MediaWords::Crawler::Engine::run_fetcher': 15,
# Testing lock types
'TestPerlWorkerLock': 900,
'TestPythonWorkerLock': 901,
}
class McDBLocksException(Exception):
"""Default exception for package."""
pass
def get_session_lock(db: mediawords.db.DatabaseHandler, lock_type: str, lock_id: int, wait: bool = False) -> bool:
"""Get a postgres advisory lock with the lock_type and lock_id as the two keys.
Arguments:
db - db handle
lock_type - must be in LOCK_TYPES dict above
lock_id - id for the particular lock within the type
wait - if true, block while waiting for the lock, else return false if the lock is not available
Returns:
True if the lock is available
"""
lock_type = str(decode_object_from_bytes_if_needed(lock_type))
if isinstance(lock_id, bytes):
lock_id = decode_object_from_bytes_if_needed(lock_id)
lock_id = int(lock_id)
if isinstance(wait, bytes):
wait = decode_object_from_bytes_if_needed(wait)
wait = bool(wait)
log.debug("trying for lock: %s, %d" % (lock_type, lock_id))
if lock_type not in LOCK_TYPES:
raise McDBLocksException("lock type not in LOCK_TYPES: %s" % lock_type)
lock_type_id = LOCK_TYPES[lock_type]
if wait:
db.query("select pg_advisory_lock(%(a)s, %(b)s)", {'a': lock_type_id, 'b': lock_id})
return True
else:
r = db.query("select pg_try_advisory_lock(%(a)s, %(b)s) as locked", {'a': lock_type_id, 'b': lock_id}).hash()
return r['locked']
def release_session_lock(db: mediawords.db.DatabaseHandler, lock_type: str, lock_id: int) -> None:
"""Release the postgres advisory lock if it is held."""
lock_type = str(decode_object_from_bytes_if_needed(lock_type))
if isinstance(lock_id, bytes):
lock_id = decode_object_from_bytes_if_needed(lock_id)
lock_id = int(lock_id)
if lock_type not in LOCK_TYPES:
raise McDBLocksException("lock type not in LOCK_TYPES: %s" % lock_type)
lock_type_id = LOCK_TYPES[lock_type]
db.query("select pg_advisory_unlock(%(a)s, %(b)s)", {'a': lock_type_id, 'b': lock_id})
def list_session_locks(db: mediawords.db.DatabaseHandler, lock_type: str) -> list:
"""Return a list of all locked ids for the given lock_type."""
lock_type = str(decode_object_from_bytes_if_needed(lock_type))
if lock_type not in LOCK_TYPES:
raise McDBLocksException("lock type not in LOCK_TYPES: %s" % lock_type)
lock_type_id = LOCK_TYPES[lock_type]
# noinspection SqlResolve
return db.query(
"select objid from pg_locks where locktype = 'advisory' and classid = %(a)s",
{'a': lock_type_id}).flat()
|
agpl-3.0
| 1,828,699,678,270,009,600
| 33.425743
| 117
| 0.667242
| false
| 3.261726
| false
| false
| false
|
QTek/QRadio
|
tramatego/src/tramatego/transforms/ipv4_to_score.py
|
1
|
1161
|
#!/usr/bin/env python
from canari.maltego.utils import debug, progress
from canari.framework import configure #, superuser
from canari.maltego.entities import IPv4Address, Phrase
from common.launchers import get_qradio_data
__author__ = 'Zappus'
__copyright__ = 'Copyright 2016, TramaTego Project'
__credits__ = []
__license__ = 'GPL'
__version__ = '0.1'
__maintainer__ = 'Zappus'
__email__ = 'zappus@protonmail.com'
__status__ = 'Development'
__all__ = [
'dotransform',
#'onterminate' # comment out this line if you don't need this function.
]
#@superuser
@configure(
label='IPv4 to Score',
description='Converts IPv4 into Score using QRadio.',
uuids=[ 'TramaTego.v1.IPv4ToScore' ],
inputs=[ ( 'TramaTego', IPv4Address ) ],
debug=True
)
def dotransform(request, response, config):
command = "--ipv4_to_score " + request.value
qradio_output = get_qradio_data(command, 3)
for entry in qradio_output:
response += Phrase(entry)
return response
def onterminate():
"""
TODO: Write your cleanup logic below or delete the onterminate function and remove it from the __all__ variable
"""
pass
|
apache-2.0
| 7,825,119,063,382,835,000
| 24.822222
| 115
| 0.676141
| false
| 3.345821
| false
| false
| false
|
coinkite/connectrum
|
connectrum/findall.py
|
1
|
4527
|
#!/usr/bin/env python3
#
#
import bottom, random, time, asyncio
from .svr_info import ServerInfo
import logging
logger = logging.getLogger('connectrum')
class IrcListener(bottom.Client):
def __init__(self, irc_nickname=None, irc_password=None, ssl=True):
self.my_nick = irc_nickname or 'XC%d' % random.randint(1E11, 1E12)
self.password = irc_password or None
self.results = {} # by hostname
self.servers = set()
self.all_done = asyncio.Event()
super(IrcListener, self).__init__(host='irc.freenode.net', port=6697 if ssl else 6667, ssl=ssl)
# setup event handling
self.on('CLIENT_CONNECT', self.connected)
self.on('PING', self.keepalive)
self.on('JOIN', self.joined)
self.on('RPL_NAMREPLY', self.got_users)
self.on('RPL_WHOREPLY', self.got_who_reply)
self.on("client_disconnect", self.reconnect)
self.on('RPL_ENDOFNAMES', self.got_end_of_names)
async def collect_data(self):
# start it process
self.loop.create_task(self.connect())
# wait until done
await self.all_done.wait()
# return the results
return self.results
def connected(self, **kwargs):
logger.debug("Connected")
self.send('NICK', nick=self.my_nick)
self.send('USER', user=self.my_nick, realname='Connectrum Client')
# long delay here as it does an failing Ident probe (10 seconds min)
self.send('JOIN', channel='#electrum')
#self.send('WHO', mask='E_*')
def keepalive(self, message, **kwargs):
self.send('PONG', message=message)
async def joined(self, nick=None, **kwargs):
# happens when we or someone else joins the channel
# seem to take 10 seconds or longer for me to join
logger.debug('Joined: %r' % kwargs)
if nick != self.my_nick:
await self.add_server(nick)
async def got_who_reply(self, nick=None, real_name=None, **kws):
'''
Server replied to one of our WHO requests, with details.
'''
#logger.debug('who reply: %r' % kws)
nick = nick[2:] if nick[0:2] == 'E_' else nick
host, ports = real_name.split(' ', 1)
self.servers.remove(nick)
logger.debug("Found: '%s' at %s with port list: %s",nick, host, ports)
self.results[host.lower()] = ServerInfo(nick, host, ports)
if not self.servers:
self.all_done.set()
async def got_users(self, users=[], **kws):
# After successful join to channel, we are given a list of
# users on the channel. Happens a few times for busy channels.
logger.debug('Got %d (more) users in channel', len(users))
for nick in users:
await self.add_server(nick)
async def add_server(self, nick):
# ignore everyone but electrum servers
if nick.startswith('E_'):
self.servers.add(nick[2:])
async def who_worker(self):
# Fetch details on each Electrum server nick we see
logger.debug('who task starts')
copy = self.servers.copy()
for nn in copy:
logger.debug('do WHO for: ' + nn)
self.send('WHO', mask='E_'+nn)
logger.debug('who task done')
def got_end_of_names(self, *a, **k):
logger.debug('Got all the user names')
assert self.servers, "No one on channel!"
# ask for details on all of those users
self.loop.create_task(self.who_worker())
async def reconnect(self, **kwargs):
# Trigger an event that may cascade to a client_connect.
# Don't continue until a client_connect occurs, which may be never.
logger.warn("Disconnected (will reconnect)")
# Note that we're not in a coroutine, so we don't have access
# to await and asyncio.sleep
time.sleep(3)
# After this line we won't necessarily be connected.
# We've simply scheduled the connect to happen in the future
self.loop.create_task(self.connect())
logger.debug("Reconnect scheduled.")
if __name__ == '__main__':
import logging
logging.getLogger('bottom').setLevel(logging.DEBUG)
logging.getLogger('connectrum').setLevel(logging.DEBUG)
logging.getLogger('asyncio').setLevel(logging.DEBUG)
bot = IrcListener(ssl=False)
bot.loop.set_debug(True)
fut = bot.collect_data()
#bot.loop.create_task(bot.connect())
rv = bot.loop.run_until_complete(fut)
print(rv)
|
mit
| 8,429,156,360,593,355,000
| 31.106383
| 103
| 0.610559
| false
| 3.69551
| false
| false
| false
|
zstyblik/infernal-twin
|
sql_insert.py
|
1
|
3025
|
import MySQLdb
import db_connect_creds
from datetime import datetime
username, password = db_connect_creds.read_creds()
cxn = MySQLdb.connect('localhost', user=username, passwd=password)
date = datetime.now()
cxn.query('CREATE DATABASE IF NOT EXISTS InfernalWireless')
cxn.commit()
cxn.close()
cxn = MySQLdb.connect(db='InfernalWireless')
cur = cxn.cursor()
current_project_id = 0
#~ cxn = MySQLdb.connect('localhost','root',"")
#~
#~ date = datetime.now()
#~
#~
#~ cxn.query('CREATE DATABASE IF NOT EXISTS InfernalWireless')
#~
#~ cxn.commit()
#~ cxn.close()
#~
#~ cxn = MySQLdb.connect(db='InfernalWireless')
#~
#~ cur = cxn.cursor()
#~
#~ current_project_id = 0
def create_project_table():
##############3333 THIS IS GOING TO CRAETE A TABLE FOR PROJECT
#~ cur.execute("CREATE TABLE mytable (id AUTO_INCREMENT")
PROJECT_TITLE = '''CREATE TABLE IF NOT EXISTS Projects (
ProjectId MEDIUMINT NOT NULL AUTO_INCREMENT, ProjectName TEXT, PRIMARY KEY (ProjectId), AuditorName TEXT, TargetName TEXT, date TEXT)'''
cur.execute(PROJECT_TITLE)
create_project_table()
def project_details(projectname, Authors_name, TargetName, date):
PROJECT_DETAILS = 'INSERT INTO Projects (ProjectName, AuditorName, TargetName, date) VALUES ("%s","%s","%s","%s")'%(projectname, Authors_name, TargetName, date)
cur.execute(PROJECT_DETAILS)
current_project_id_tmp = cur.lastrowid
current_project_id = current_project_id_tmp
print "report is generated"
return current_project_id_tmp
def create_report_table():
##############3333 THIS IS GOING TO CRAETE A TABLE FOR PROJECT
report_table = '''CREATE TABLE IF NOT EXISTS Reports (findingID MEDIUMINT NOT NULL AUTO_INCREMENT, finding_name TEXT, phase TEXT, PRIMARY KEY (findingID), risk_level TEXT, risk_category TEXT, Findings_detail TEXT, Notes TEXT, Project_fk_Id MEDIUMINT, FOREIGN KEY (Project_fk_Id) REFERENCES Projects (ProjectId))'''
cur.execute(report_table)
create_report_table()
def create_report(self, finding_name, phase, risk_level, risk_category, Findings_detail, Notes, Project_fk_Id):
########## THIS IS GOING TO INSERT DATA INTO FINDINGS TABLE
pID = current_project_id
REPORT_DETAILS = 'INSERT INTO Reports (finding_name, phase, risk_level, risk_category, Findings_detail, Notes, Project_fk_Id) VALUES ("%s","%s","%s","%s","%s","%s","%s")'%( finding_name, phase, risk_level, risk_category, Findings_detail, Notes, Project_fk_Id)
cur.execute(REPORT_DETAILS)
print pID
def print_hello(test_data):
print test_data
################ DB POPULATE DATABASE ###########
#~ prID = project_details('test','est','23s','12/12/12')
#~
#~ create_report('Title of the finding','Choose a phase','Choose a category','Choose risk level','Enter the findings details','Notes on the findings',int(prID))
################################################################### DUMMY DATABASE QUERIES ##############
#~ print type(prID)
cur.close()
cxn.commit()
cxn.close()
print "DB has been updated"
|
gpl-3.0
| -3,604,638,107,569,597,000
| 25.077586
| 315
| 0.676694
| false
| 3.238758
| false
| false
| false
|
ksteinfe/decodes
|
src/decodes/core/dc_mesh.py
|
1
|
6004
|
from decodes.core import *
from . import dc_base, dc_vec, dc_point, dc_has_pts #here we may only import modules that have been loaded before this one. see core/__init__.py for proper order
if VERBOSE_FS: print("mesh.py loaded")
import copy, collections
class Mesh(HasPts):
"""
a very simple mesh class
"""
subclass_attr = [] # this list of props is unset any time this HasPts object changes
def __init__(self, vertices=None, faces=None, basis=None):
""" Mesh Constructor.
:param vertices: The vertices of the mesh.
:type vertices: [Point]
:param faces: List of ordered faces.
:type faces: [int]
:param basis: The (optional) basis of the mesh.
:type basis: Basis
:result: Mesh object.
:rtype: Mesh
::
pts=[
Point(0,0,0),
Point(0,1,0),
Point(1,1,0),
Point(1,0,0),
Point(0,0,1),
Point(0,1,1),
Point(1,1,1),
Point(1,0,1),
]
quad_faces=[[0,1,2,3],[4,5,6,7],[0,4,5,1],[3,7,6,2]]
quadmesh=Mesh(pts,quad_faces)
"""
super(Mesh,self).__init__(vertices,basis) #HasPts constructor handles initalization of verts and basis
self._faces = [] if (faces is None) else faces
@property
def faces(self):
""" Returns a list of mesh faces.
:result: List of mesh faces.
:rtype: list
"""
return self._faces
def add_face(self,a,b,c,d=-1):
""" Adds a face to the mesh.
:param a,b,c,d: Face to be added to the list of faces.
:type a,b,c,d: int.
:result: Modifies list of faces.
:rtype: None
::
quadmesh.add_face(4,5,6,7)
"""
#TODO: add lists of faces just the same
if max(a,b,c,d) < len(self.pts):
if (d>=0) : self._faces.append([a,b,c,d])
else: self._faces.append([a,b,c])
def face_pts(self,index):
""" Returns the points of a given face.
:param index: Face's index
:type index: int
:returns: Vertices.
:rtype: Point
::
quadmesh.face_pts(0)
"""
return [self.pts[i] for i in self.faces[index]]
def face_centroid(self,index):
""" Returns the centroids of individual mesh faces.
:param index: Index of a face.
:type index: int
:returns: The centroid of a face.
:rtype: Point
::
quadmesh.face_centroid(0)
"""
return Point.centroid(self.face_pts(index))
def face_normal(self,index):
""" Returns the normal vector of a face.
:param index: Index of a face.
:type index: int
:returns: Normal vector.
:rtype: Vec
::
quadmesh.face_normal(0)
"""
verts = self.face_pts(index)
if len(verts) == 3 : return Vec(verts[0],verts[1]).cross(Vec(verts[0],verts[2])).normalized()
else :
v0 = Vec(verts[0],verts[1]).cross(Vec(verts[0],verts[3])).normalized()
v1 = Vec(verts[2],verts[3]).cross(Vec(verts[2],verts[1])).normalized()
return Vec.bisector(v0,v1).normalized()
def __repr__(self):
return "msh[{0}v,{1}f]".format(len(self._verts),len(self._faces))
@staticmethod
def explode(msh):
""" Explodes a mesh into individual faces.
:param msh: Mesh to explode.
:type msh: Mesh
:returns: List of meshes.
:type: [Mesh]
::
Mesh.explode(quadmesh)
"""
exploded_meshes = []
for face in msh.faces:
pts = [msh.pts[v] for v in face]
nface = [0,1,2] if len(face)==3 else [0,1,2,3]
exploded_meshes.append(Mesh(pts,[nface]))
return exploded_meshes
def to_pt_graph(self):
""" Returns a Graph representation of the mesh points by index.
:returns: A Graph of point indexes.
:rtype: Graph
::
quadmesh.to_pt_graph()
"""
graph = Graph()
for index in range(len(self.pts)):
for face in self.faces:
for px in face:
if index in face and index!=px: graph.add_edge(index, px)
return graph
def to_face_graph(self, val=1):
""" Returns a Graph representation of the mesh faces by index.
:param val: number of coincident points for neighborness.
:type val: int
:returns: A Graph of face indexes.
:rtype: Graph
::
quadmesh.to_face_graph(2)
"""
from decodes.extensions.graph import Graph
graph = Graph()
graph.naked_nodes = []
for f1 in range(len(self.faces)):
for f2 in range(len(self.faces)):
if f1 != f2:
count = 0
for index in self.faces[f2]:
if index in self.faces[f1]:
count+=1
if count >= val:
graph.add_edge(f1,f2)
if len(graph.edges[f1]) < len(self.faces[f1]):
if f1 not in graph.naked_nodes:
graph.naked_nodes.append(f1)
return graph
|
gpl-3.0
| 8,685,016,591,844,526,000
| 30.276042
| 164
| 0.460693
| false
| 4.067751
| false
| false
| false
|
kmiller96/Shipping-Containers-Software
|
lib/core.py
|
1
|
8600
|
# AUTHOR: Kale Miller
# DESCRIPTION: The 'main brain' of the program is held in here.
# 50726f6772616d6d696e6720697320627265616b696e67206f66206f6e652062696720696d706f737369626c65207461736b20696e746f20736576
# 6572616c207665727920736d616c6c20706f737369626c65207461736b732e
# DEVELOPMENT LOG:
# 07/12/16: Initialized file. Moved IDGenerator class into the script. Added holding bay class.
# 12/12/16: Tweaked the IDGenerator class to help remove dependancy.
# 13/12/16: Fleshed out the NewHoldingBay class.
# 15/12/16: Added methods to add auxilary labels. Added method to generate information label. Small bug fixes.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~IMPORTS/GLOBALS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import os, time
import numpy as np
from lib import containers
CONTAINER_CLASSES = [
containers.BasicContainer,
containers.HeavyContainer,
containers.RefrigeratedContainer,
containers.LiquidContainer,
containers.ExplosivesContainer,
containers.ToxicContainer,
containers.ChemicalContainer
]
CONTAINER_TYPES = ['basic', 'heavy', 'refrigerated', 'liquid', 'explosive', 'toxic', 'chemical']
SERIAL_CODES = ['B', 'H', 'R', 'L', 'E', 'T', 'C']
TAG_APPLICATION_TIME = 0.2
PRINTALL_TIME = 1
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~.:.~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~MAIN~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def processshipfile(filename, path):
"""Processes the csv file that the ship supplies."""
def _deletenewline(string):
"""Deletes the \n symbol from a string if it exists."""
try:
truncatedstring = string[:string.index('\n')]
except ValueError:
truncatedstring = string
finally:
return truncatedstring
try:
home = os.getcwd()
os.chdir(path)
except WindowsError: # Would this hold true on all machines?
raise NameError, "The path specified does not exist."
rawfile = open(filename, 'r')
arylines = rawfile.readlines()
basematrix = map(lambda x: _deletenewline(x).split(','), arylines)
numpyarray = np.array(basematrix)
return numpyarray
class IDGenerator:
"""Controls the assignment of id tags on the containers."""
# TODO: Change the __init__ such that it works by reading a collection of tuples instead of two lists.
def __init__(self):
"""Initialise the id generator."""
self._COUNTERS = [0] * len(CONTAINER_TYPES)
return
def _findindex(self, container):
"""Determines the index in the lists the class should use."""
return CONTAINER_TYPES.index(container)
def _serialcode(self, index):
"""Fetches the serial code for a supplied index."""
return SERIAL_CODES[index]
def _counter(self, index):
"""Fetches the counter for a specific serial type and increments it by one."""
self._COUNTERS[index] += 1
return self._COUNTERS[index]
def newid(self, containertype):
"""Generates a new id."""
ii = self._findindex(containertype)
idtag = self._serialcode(ii) + str(self._counter(ii)).zfill(5)
return idtag
class NewHoldingBay:
"""Creates a new holding bay for the containers. Thus it contains all of the information about the containers
along with the methods controlling unloading and loading them."""
def __init__(self):
self._path = os.getcwd()
self.idgenerator = IDGenerator()
self.containerlist = list()
self._iOnship = 0
self._iLoaded = 0
self._iHolding = 0
return None
def _createcontainer(self, containerstr, parameters):
"""Creates a new container class based off the first column of the CSV."""
# TODO: Fix this method up to catch more and print useful error messages.
if not isinstance(containerstr, str):
raise TypeError, "The parameter passed must be a string."
elif len(containerstr) == 1:
try:
ii = SERIAL_CODES.index(containerstr)
except ValueError:
raise Exception("Bad input.") # TODO: Fix this area up.
elif len(containerstr) != 1:
try:
ii = CONTAINER_TYPES.index(containerstr)
except ValueError:
raise Exception("Bad input.")
idtag = self.idgenerator.newid(CONTAINER_TYPES[ii])
return CONTAINER_CLASSES[ii](idtag, *parameters)
def defineship(self, file):
"""Pass in the CSV file of the ship in order to unload it."""
shipdata = processshipfile(file, self._path)
shipdata = shipdata[1::] # Throw out the headers.
for line in shipdata:
newcontainer = self._createcontainer(line[0], (line[1], line[3]))
self.containerlist.append(newcontainer)
self._iOnship += 1
def printcontainer(self, serial):
"""Prints the information about a specific container."""
for container in self.containerlist:
if container.id() == serial:
container.information()
return None
else:
continue
raise NameError, "Unable to find container with serial code %s" % serial
return -1
def printallinformation(self):
"""Prints the information of all the containers."""
for container in self.containerlist:
container.information()
time.sleep(PRINTALL_TIME)
return None
def unloadall(self, debug=False):
"""Unloads all of the containers from the ship."""
for container in self.containerlist:
container.unload(debug=debug)
self._iHolding += 1
self._iOnship -= 1
return None
def loadall(self, debug=False):
"""Loads all of the containers into trucks and trains."""
# TODO: Proper loading locations.
ii = 1
for container in self.containerlist:
container.load('Truck ' + str(ii).zfill(3), debug=debug)
self._iHolding -= 1
self._iLoaded += 1
ii += 1
return None
def printauditedload(self):
"""Prints information about the holding bay at this time."""
iOnship = 0; iLoaded = 0; iHolding = 0
iContainercount = [0] * len(CONTAINER_TYPES)
for container in self.containerlist:
try:
ii = CONTAINER_TYPES.index(container._type)
iContainercount[ii] += 1
except ValueError:
raise NameError, "One (or more) containers don't have a valid type."
# Print the appropriate information.
print "----------------------------------------------------------------------"
print "TOTAL CONTAINERS: %i" % len(self.containerlist); time.sleep(0.3)
print "CONTAINERS CURRENTLY STILL ON SHIP: %i" % self._iOnship; time.sleep(0.3)
print "CONTAINERS LOADED ON TRUCKS AND TRAINS: %i" % self._iLoaded; time.sleep(0.3)
print "CONTAINERS BEING HELD IN THE HOLDING BAY: %i" % self._iHolding; time.sleep(0.3)
print ""
print "THE NUMBER OF CONTAINERS FOR EACH TYPE:"; time.sleep(0.3)
for ii in xrange(len(CONTAINER_TYPES)):
if iContainercount[ii] == 0: continue
print "\t%s: %i" % (CONTAINER_TYPES[ii], iContainercount[ii]); time.sleep(0.3)
print "----------------------------------------------------------------------"
return None
def addidtags(self, debug=False):
"""Applys appropriate serial numbers to all of the containers."""
for container in self.containerlist:
print "Applying id tag to container %s" % container.id()
if not debug: time.sleep(TAG_APPLICATION_TIME)
container.addidtag()
return None
def applyauxilarylabels(self):
"""Applys the labels that should go on containers about their contents and handling."""
for container in self.containerlist:
print "Adding labels to container %s" % container.id()
container.addauxilarylabels()
return None
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~.:.~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
mit
| 3,282,703,609,942,576,000
| 39.148325
| 120
| 0.570698
| false
| 4.232283
| false
| false
| false
|
amerlyq/airy
|
vim/res/ycm_extra_conf.py
|
1
|
5213
|
# SEE: CACHE/bundle/YouCompleteMe/cpp/ycm/.ycm_extra_conf.py
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall', '-Wextra', '-Werror', '-Wc++98-compat',
'-Wno-long-long', '-Wno-variadic-macros', '-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
#'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x', 'c++',
'-isystem', '../BoostParts',
# This path will only work on OS X, but extra paths that don't exist are not harmful
'-isystem', '/System/Library/Frameworks/Python.framework/Headers',
'-isystem', '../llvm/include',
'-isystem', '../llvm/tools/clang/include',
'-I', '.',
'-I', './ClangCompleter',
'-isystem', './tests/gmock/gtest',
'-isystem', './tests/gmock/gtest/include',
'-isystem', './tests/gmock',
'-isystem', './tests/gmock/include',
'-isystem', '/usr/include',
'-isystem', '/usr/local/include',
'-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../lib/c++/v1',
'-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = os.path.abspath( '~/aura/pdrm/gerrit/build' )
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
#try:
# final_flags.remove( '-stdlib=libc++' )
#except ValueError:
# pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return { 'flags': final_flags, 'do_cache': True }
|
mit
| 1,835,712,547,090,115,600
| 36.235714
| 115
| 0.689047
| false
| 3.479973
| true
| false
| false
|
sangwonl/stage34
|
webapp/api/handlers/stage.py
|
1
|
6612
|
from django.views import View
from django.conf import settings
from datetime import datetime
from api.helpers.mixins import AuthRequiredMixin
from api.helpers.http.jsend import JSENDSuccess, JSENDError
from api.models.resources import Membership, Stage
from libs.utils.model_ext import model_to_dict
from worker.tasks.deployment import (
task_provision_stage,
task_change_stage_status,
task_delete_stage,
task_refresh_stage
)
import pytz
import os
import json
import jwt
SERIALIZE_FIELDS = [
'id',
'title',
'endpoint',
'status',
'repo',
'default_branch',
'branch',
'created_at'
]
class StageRootHandler(AuthRequiredMixin, View):
def get(self, request, *args, **kwargs):
org = Membership.get_org_of_user(request.user)
if not org:
return JSENDError(status_code=400, msg='org not found')
stages_qs = Stage.objects.filter(org=org)
stages = [model_to_dict(s, fields=SERIALIZE_FIELDS) for s in stages_qs]
return JSENDSuccess(status_code=200, data=stages)
def post(self, request, *args, **kwargs):
json_body = json.loads(request.body)
title = json_body.get('title')
repo = json_body.get('repo')
branch= json_body.get('branch')
default_branch= json_body.get('default_branch')
run_on_create = json_body.get('run_on_create', False)
if not (title and repo and default_branch and branch):
return JSENDError(status_code=400, msg='invalid stage info')
org = Membership.get_org_of_user(request.user)
if not org:
return JSENDError(status_code=400, msg='org not found')
stage = Stage.objects.create(
org=org,
title=title,
repo=repo,
default_branch=default_branch,
branch=branch
)
github_access_key = request.user.jwt_payload.get('access_token')
task_provision_stage.apply_async(args=[github_access_key, stage.id, repo, branch, run_on_create])
stage_dict = model_to_dict(stage, fields=SERIALIZE_FIELDS)
return JSENDSuccess(status_code=200, data=stage_dict)
class StageDetailHandler(AuthRequiredMixin, View):
def get_stage(self, org, stage_id):
try:
stage = Stage.objects.get(org=org, id=stage_id)
except Stage.DoesNotExist:
return None
return stage
def get(self, request, stage_id, *args, **kwargs):
org = Membership.get_org_of_user(request.user)
if not org:
return JSENDError(status_code=400, msg='org not found')
stage = self.get_stage(org, stage_id)
if not stage:
return JSENDError(status_code=404, msg='stage not found')
stage_dict = model_to_dict(stage, fields=SERIALIZE_FIELDS)
return JSENDSuccess(status_code=200, data=stage_dict)
def put(self, request, stage_id, *args, **kwargs):
json_body = json.loads(request.body)
new_status = json_body.get('status')
if not new_status or new_status not in ('running', 'paused'):
return JSENDError(status_code=400, msg='invalid stage status')
org = Membership.get_org_of_user(request.user)
if not org:
return JSENDError(status_code=400, msg='org not found')
stage = self.get_stage(org, stage_id)
if not stage:
return JSENDError(status_code=404, msg='stage not found')
cur_status = stage.status
if cur_status != new_status:
github_access_key = request.user.jwt_payload.get('access_token')
task_change_stage_status.apply_async(args=[github_access_key, stage_id, new_status])
new_status = 'changing'
stage.title = json_body.get('title', stage.title)
stage.repo = json_body.get('repo', stage.repo)
stage.default_branch = json_body.get('default_branch', stage.default_branch)
stage.branch = json_body.get('branch', stage.branch)
stage.status = new_status
stage.save()
stage_dict = model_to_dict(stage, fields=SERIALIZE_FIELDS)
return JSENDSuccess(status_code=204)
def delete(self, request, stage_id, *args, **kwargs):
org = Membership.get_org_of_user(request.user)
if not org:
return JSENDError(status_code=400, msg='org not found')
stage = self.get_stage(org, stage_id)
if not stage:
return JSENDError(status_code=404, msg='stage not found')
stage.status = 'deleting'
stage.save()
github_access_key = request.user.jwt_payload.get('access_token')
task_delete_stage.apply_async(args=[github_access_key, stage_id])
return JSENDSuccess(status_code=204)
class StageLogHandler(AuthRequiredMixin, View):
def get_log_path(self, stage_id):
return os.path.join(settings.STAGE_REPO_HOME, stage_id, 'output.log')
def get(self, request, stage_id, *args, **kwargs):
org = Membership.get_org_of_user(request.user)
if not org:
return JSENDError(status_code=400, msg='org not found')
log_path = self.get_log_path(stage_id)
if not os.path.exists(log_path):
return JSENDError(status_code=404, msg='log file not found')
log_msgs = []
with open(log_path, 'rt') as f:
log_msg = f.read()
log_msgs = [l for l in log_msg.split('\n') if l]
ts = os.path.getmtime(log_path)
tz = pytz.timezone(settings.TIME_ZONE)
dt = datetime.fromtimestamp(ts, tz=tz)
log_data = {'log_messages': log_msgs, 'log_time': dt.isoformat()}
return JSENDSuccess(status_code=200, data=log_data)
class StageRefreshHandler(AuthRequiredMixin, View):
def get_stage(self, org, stage_id):
try:
stage = Stage.objects.get(org=org, id=stage_id)
except Stage.DoesNotExist:
return None
return stage
def post(self, request, stage_id, *args, **kwargs):
org = Membership.get_org_of_user(request.user)
if not org:
return JSENDError(status_code=400, msg='org not found')
stage = self.get_stage(org, stage_id)
if not stage:
return JSENDError(status_code=404, msg='stage not found')
github_access_key = request.user.jwt_payload.get('access_token')
task_refresh_stage.apply_async(args=[github_access_key, stage_id])
stage.status = 'changing'
stage.save()
stage_dict = model_to_dict(stage, fields=SERIALIZE_FIELDS)
return JSENDSuccess(status_code=204)
|
mit
| 3,603,858,691,656,666,600
| 33.082474
| 105
| 0.628403
| false
| 3.552929
| false
| false
| false
|
globaltoken/globaltoken
|
test/functional/test_framework/authproxy.py
|
1
|
7759
|
# Copyright (c) 2011 Jeff Garzik
#
# Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
#
# Copyright (c) 2007 Jan-Klaas Kollhof
#
# This file is part of jsonrpc.
#
# jsonrpc is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""HTTP proxy for opening RPC connection to globaltokend.
AuthServiceProxy has the following improvements over python-jsonrpc's
ServiceProxy class:
- HTTP connections persist for the life of the AuthServiceProxy object
(if server supports HTTP/1.1)
- sends protocol 'version', per JSON-RPC 1.1
- sends proper, incrementing 'id'
- sends Basic HTTP authentication headers
- parses all JSON numbers that look like floats as Decimal
- uses standard Python json lib
"""
import base64
import decimal
import http.client
import json
import logging
import socket
import time
import urllib.parse
HTTP_TIMEOUT = 30
USER_AGENT = "AuthServiceProxy/0.1"
log = logging.getLogger("BitcoinRPC")
class JSONRPCException(Exception):
def __init__(self, rpc_error):
try:
errmsg = '%(message)s (%(code)i)' % rpc_error
except (KeyError, TypeError):
errmsg = ''
super().__init__(errmsg)
self.error = rpc_error
def EncodeDecimal(o):
if isinstance(o, decimal.Decimal):
return str(o)
raise TypeError(repr(o) + " is not JSON serializable")
class AuthServiceProxy():
__id_count = 0
# ensure_ascii: escape unicode as \uXXXX, passed to json.dumps
def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True):
self.__service_url = service_url
self._service_name = service_name
self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests
self.__url = urllib.parse.urlparse(service_url)
port = 80 if self.__url.port is None else self.__url.port
user = None if self.__url.username is None else self.__url.username.encode('utf8')
passwd = None if self.__url.password is None else self.__url.password.encode('utf8')
authpair = user + b':' + passwd
self.__auth_header = b'Basic ' + base64.b64encode(authpair)
if connection:
# Callables re-use the connection of the original proxy
self.__conn = connection
elif self.__url.scheme == 'https':
self.__conn = http.client.HTTPSConnection(self.__url.hostname, port, timeout=timeout)
else:
self.__conn = http.client.HTTPConnection(self.__url.hostname, port, timeout=timeout)
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
if self._service_name is not None:
name = "%s.%s" % (self._service_name, name)
return AuthServiceProxy(self.__service_url, name, connection=self.__conn)
def _request(self, method, path, postdata):
'''
Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout).
This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5.
'''
headers = {'Host': self.__url.hostname,
'User-Agent': USER_AGENT,
'Authorization': self.__auth_header,
'Content-type': 'application/json'}
try:
self.__conn.request(method, path, postdata, headers)
return self._get_response()
except http.client.BadStatusLine as e:
if e.line == "''": # if connection was closed, try again
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()
else:
raise
except (BrokenPipeError, ConnectionResetError):
# Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset
# ConnectionResetError happens on FreeBSD with Python 3.4
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()
def get_request(self, *args, **argsn):
AuthServiceProxy.__id_count += 1
log.debug("-%s-> %s %s" % (AuthServiceProxy.__id_count, self._service_name,
json.dumps(args, default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
if args and argsn:
raise ValueError('Cannot handle both named and positional arguments')
return {'version': '1.1',
'method': self._service_name,
'params': args or argsn,
'id': AuthServiceProxy.__id_count}
def __call__(self, *args, **argsn):
postdata = json.dumps(self.get_request(*args, **argsn), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
response = self._request('POST', self.__url.path, postdata.encode('utf-8'))
if response['error'] is not None:
raise JSONRPCException(response['error'])
elif 'result' not in response:
raise JSONRPCException({
'code': -343, 'message': 'missing JSON-RPC result'})
else:
return response['result']
def batch(self, rpc_call_list):
postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
log.debug("--> " + postdata)
return self._request('POST', self.__url.path, postdata.encode('utf-8'))
def _get_response(self):
req_start_time = time.time()
try:
http_response = self.__conn.getresponse()
except socket.timeout as e:
raise JSONRPCException({
'code': -344,
'message': '%r RPC took longer than %f seconds. Consider '
'using larger timeout for calls that take '
'longer to return.' % (self._service_name,
self.__conn.timeout)})
if http_response is None:
raise JSONRPCException({
'code': -342, 'message': 'missing HTTP response from server'})
content_type = http_response.getheader('Content-Type')
if content_type != 'application/json':
raise JSONRPCException({
'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)})
responsedata = http_response.read().decode('utf8')
response = json.loads(responsedata, parse_float=decimal.Decimal)
elapsed = time.time() - req_start_time
if "error" in response and response["error"] is None:
log.debug("<-%s- [%.6f] %s" % (response["id"], elapsed, json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
else:
log.debug("<-- [%.6f] %s" % (elapsed, responsedata))
return response
def __truediv__(self, relative_uri):
return AuthServiceProxy("{}/{}".format(self.__service_url, relative_uri), self._service_name, connection=self.__conn)
|
mit
| -3,351,838,090,374,952,400
| 42.105556
| 155
| 0.621601
| false
| 4.087987
| false
| false
| false
|
jtomasek/tuskar-ui-1
|
tuskar_ui/infrastructure/resource_management/resource_classes/workflows.py
|
1
|
12384
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from tuskar_ui import api as tuskar
import tuskar_ui.workflows
import re
from tuskar_ui.infrastructure. \
resource_management.resource_classes.tables import FlavorTemplatesTable
from tuskar_ui.infrastructure. \
resource_management.resource_classes.tables import RacksTable
class ResourceClassInfoAndFlavorsAction(workflows.Action):
name = forms.CharField(max_length=255,
label=_("Class Name"),
help_text="",
required=True)
service_type = forms.ChoiceField(label=_('Class Type'),
required=True,
choices=[('', ''),
('compute',
('Compute')),
('not_compute',
('Non Compute')),
],
widget=forms.Select(
attrs={'class': 'switchable'})
)
image = forms.ChoiceField(label=_('Provisioning Image'),
required=True,
choices=[('compute-img', ('overcloud-compute'))],
widget=forms.Select(
attrs={'class': 'switchable'})
)
def clean(self):
cleaned_data = super(ResourceClassInfoAndFlavorsAction,
self).clean()
name = cleaned_data.get('name')
resource_class_id = self.initial.get('resource_class_id', None)
try:
resource_classes = tuskar.ResourceClass.list(self.request)
except Exception:
resource_classes = []
msg = _('Unable to get resource class list')
exceptions.check_message(["Connection", "refused"], msg)
raise
for resource_class in resource_classes:
if resource_class.name == name and \
resource_class_id != resource_class.id:
raise forms.ValidationError(
_('The name "%s" is already used by'
' another resource class.')
% name
)
return cleaned_data
class Meta:
name = _("Class Settings")
help_text = _("From here you can fill the class "
"settings and add flavors to class.")
class CreateResourceClassInfoAndFlavors(tuskar_ui.workflows.TableStep):
table_classes = (FlavorTemplatesTable,)
action_class = ResourceClassInfoAndFlavorsAction
template_name = 'infrastructure/resource_management/resource_classes/'\
'_resource_class_info_and_flavors_step.html'
contributes = ("name", "service_type", "flavors_object_ids",
'max_vms')
def contribute(self, data, context):
request = self.workflow.request
if data:
context["flavors_object_ids"] =\
request.POST.getlist("flavors_object_ids")
# todo: lsmola django can't parse dictionaruy from POST
# this should be rewritten to django formset
context["max_vms"] = {}
for index, value in request.POST.items():
match = re.match(
'^(flavors_object_ids__max_vms__(.*?))$',
index)
if match:
context["max_vms"][match.groups()[1]] = value
context.update(data)
return context
def get_flavors_data(self):
try:
resource_class_id = self.workflow.context.get("resource_class_id")
if resource_class_id:
resource_class = tuskar.ResourceClass.get(
self.workflow.request,
resource_class_id)
# TODO(lsmola ugly interface, rewrite)
self._tables['flavors'].active_multi_select_values = \
resource_class.flavortemplates_ids
all_flavors = resource_class.all_flavors
else:
all_flavors = tuskar.FlavorTemplate.list(
self.workflow.request)
except Exception:
all_flavors = []
exceptions.handle(self.workflow.request,
_('Unable to retrieve resource flavors list.'))
return all_flavors
class RacksAction(workflows.Action):
class Meta:
name = _("Racks")
class CreateRacks(tuskar_ui.workflows.TableStep):
table_classes = (RacksTable,)
action_class = RacksAction
contributes = ("racks_object_ids")
template_name = 'infrastructure/resource_management/'\
'resource_classes/_racks_step.html'
def contribute(self, data, context):
request = self.workflow.request
context["racks_object_ids"] =\
request.POST.getlist("racks_object_ids")
context.update(data)
return context
def get_racks_data(self):
try:
resource_class_id = self.workflow.context.get("resource_class_id")
if resource_class_id:
resource_class = tuskar.ResourceClass.get(
self.workflow.request,
resource_class_id)
# TODO(lsmola ugly interface, rewrite)
self._tables['racks'].active_multi_select_values = \
resource_class.racks_ids
racks = \
resource_class.all_racks
else:
racks = \
tuskar.Rack.list(self.workflow.request, True)
except Exception:
racks = []
exceptions.handle(self.workflow.request,
_('Unable to retrieve racks list.'))
return racks
class ResourceClassWorkflowMixin:
# FIXME active tabs coflict
# When on page with tabs, the workflow with more steps is used,
# there is a conflict of active tabs and it always shows the
# first tab after an action. So I explicitly specify to what
# tab it should redirect after action, until the coflict will
# be fixed in Horizon.
def get_index_url(self):
"""This url is used both as success and failure url"""
return "%s?tab=resource_management_tabs__resource_classes_tab" %\
reverse("horizon:infrastructure:resource_management:index")
def get_success_url(self):
return self.get_index_url()
def get_failure_url(self):
return self.get_index_url()
def format_status_message(self, message):
name = self.context.get('name')
return message % name
def _get_flavors(self, request, data):
flavors = []
flavor_ids = data.get('flavors_object_ids') or []
max_vms = data.get('max_vms')
resource_class_name = data['name']
for template_id in flavor_ids:
template = tuskar.FlavorTemplate.get(request, template_id)
capacities = []
for c in template.capacities:
capacities.append({'name': c.name,
'value': str(c.value),
'unit': c.unit})
# FIXME: tuskar uses resource-class-name prefix for flavors,
# e.g. m1.large, we add rc name to the template name:
flavor_name = "%s.%s" % (resource_class_name, template.name)
flavors.append({'name': flavor_name,
'max_vms': max_vms.get(template.id, None),
'capacities': capacities})
return flavors
def _add_racks(self, request, data, resource_class):
ids_to_add = data.get('racks_object_ids') or []
resource_class.set_racks(request, ids_to_add)
class CreateResourceClass(ResourceClassWorkflowMixin, workflows.Workflow):
default_steps = (CreateResourceClassInfoAndFlavors,
CreateRacks)
slug = "create_resource_class"
name = _("Create Class")
finalize_button_name = _("Create Class")
success_message = _('Created class "%s".')
failure_message = _('Unable to create class "%s".')
def _create_resource_class_info(self, request, data):
try:
flavors = self._get_flavors(request, data)
return tuskar.ResourceClass.create(
request,
name=data['name'],
service_type=data['service_type'],
flavors=flavors)
except Exception:
redirect = self.get_failure_url()
exceptions.handle(request,
_('Unable to create resource class.'),
redirect=redirect)
return None
def handle(self, request, data):
resource_class = self._create_resource_class_info(request, data)
self._add_racks(request, data, resource_class)
return True
class UpdateResourceClassInfoAndFlavors(CreateResourceClassInfoAndFlavors):
depends_on = ("resource_class_id",)
class UpdateRacks(CreateRacks):
depends_on = ("resource_class_id",)
class UpdateResourceClass(ResourceClassWorkflowMixin, workflows.Workflow):
default_steps = (UpdateResourceClassInfoAndFlavors,
UpdateRacks)
slug = "update_resource_class"
name = _("Update Class")
finalize_button_name = _("Update Class")
success_message = _('Updated class "%s".')
failure_message = _('Unable to update class "%s".')
def _update_resource_class_info(self, request, data):
try:
flavors = self._get_flavors(request, data)
return tuskar.ResourceClass.update(
request,
data['resource_class_id'],
name=data['name'],
service_type=data['service_type'],
flavors=flavors)
except Exception:
redirect = self.get_failure_url()
exceptions.handle(request,
_('Unable to create resource class.'),
redirect=redirect)
return None
def handle(self, request, data):
resource_class = self._update_resource_class_info(request, data)
self._add_racks(request, data, resource_class)
return True
class DetailUpdateWorkflow(UpdateResourceClass):
def get_index_url(self):
"""This url is used both as success and failure url"""
url = "horizon:infrastructure:resource_management:resource_classes:"\
"detail"
return "%s?tab=resource_class_details__overview" % (
reverse(url, args=(self.context["resource_class_id"])))
class UpdateRacksWorkflow(UpdateResourceClass):
def get_index_url(self):
"""This url is used both as success and failure url"""
url = "horizon:infrastructure:resource_management:resource_classes:"\
"detail"
return "%s?tab=resource_class_details__racks" % (
reverse(url, args=(self.context["resource_class_id"])))
class UpdateFlavorsWorkflow(UpdateResourceClass):
def get_index_url(self):
"""This url is used both as success and failure url"""
url = "horizon:infrastructure:resource_management:resource_classes:"\
"detail"
return "%s?tab=resource_class_details__flavors" % (
reverse(url, args=(self.context["resource_class_id"])))
|
apache-2.0
| -6,819,238,934,805,473,000
| 37.222222
| 79
| 0.566053
| false
| 4.488583
| false
| false
| false
|
forman/dectree
|
examples/intertidal_flat_classif/intertidal_flat_classif.py
|
1
|
12362
|
from numba import jit, jitclass, float64
import numpy as np
@jit(nopython=True)
def _B1_LT_085(x):
# B1.LT_085: lt(0.85)
if 0.0 == 0.0:
return 1.0 if x < 0.85 else 0.0
x1 = 0.85 - 0.0
x2 = 0.85 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B1_GT_1(x):
# B1.GT_1: gt(1.0)
if 0.0 == 0.0:
return 1.0 if x > 1.0 else 0.0
x1 = 1.0 - 0.0
x2 = 1.0 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B2_GT_0(x):
# B2.GT_0: gt(0.0)
if 0.0 == 0.0:
return 1.0 if x > 0.0 else 0.0
x1 = 0.0 - 0.0
x2 = 0.0 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B3_LT_005(x):
# B3.LT_005: lt(0.05)
if 0.0 == 0.0:
return 1.0 if x < 0.05 else 0.0
x1 = 0.05 - 0.0
x2 = 0.05 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B3_LT_01(x):
# B3.LT_01: lt(0.1)
if 0.0 == 0.0:
return 1.0 if x < 0.1 else 0.0
x1 = 0.1 - 0.0
x2 = 0.1 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B3_LT_015(x):
# B3.LT_015: lt(0.15)
if 0.0 == 0.0:
return 1.0 if x < 0.15 else 0.0
x1 = 0.15 - 0.0
x2 = 0.15 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B3_LT_02(x):
# B3.LT_02: lt(0.2)
if 0.0 == 0.0:
return 1.0 if x < 0.2 else 0.0
x1 = 0.2 - 0.0
x2 = 0.2 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B4_NODATA(x):
# B4.NODATA: eq(0.0)
if 0.0 == 0.0:
return 1.0 if x == 0.0 else 0.0
x1 = 0.0 - 0.0
x2 = 0.0
x3 = 0.0 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
if x <= x3:
return 1.0 - (x - x2) / (x3 - x2)
return 0.0
@jit(nopython=True)
def _B5_LT_01(x):
# B5.LT_01: lt(0.1)
if 0.0 == 0.0:
return 1.0 if x < 0.1 else 0.0
x1 = 0.1 - 0.0
x2 = 0.1 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B7_LT_05(x):
# B7.LT_05: lt(0.5)
if 0.0 == 0.0:
return 1.0 if x < 0.5 else 0.0
x1 = 0.5 - 0.0
x2 = 0.5 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B8_GT_0(x):
# B8.GT_0: gt(0.0)
if 0.0 == 0.0:
return 1.0 if x > 0.0 else 0.0
x1 = 0.0 - 0.0
x2 = 0.0 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B8_LT_009(x):
# B8.LT_009: lt(0.09)
if 0.0 == 0.0:
return 1.0 if x < 0.09 else 0.0
x1 = 0.09 - 0.0
x2 = 0.09 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B8_GT_033(x):
# B8.GT_033: gt(0.33)
if 0.0 == 0.0:
return 1.0 if x > 0.33 else 0.0
x1 = 0.33 - 0.0
x2 = 0.33 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B8_GT_035(x):
# B8.GT_035: gt(0.35)
if 0.0 == 0.0:
return 1.0 if x > 0.35 else 0.0
x1 = 0.35 - 0.0
x2 = 0.35 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B8_GT_04(x):
# B8.GT_04: gt(0.4)
if 0.0 == 0.0:
return 1.0 if x > 0.4 else 0.0
x1 = 0.4 - 0.0
x2 = 0.4 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B8_GT_045(x):
# B8.GT_045: gt(0.45)
if 0.0 == 0.0:
return 1.0 if x > 0.45 else 0.0
x1 = 0.45 - 0.0
x2 = 0.45 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B8_LT_085(x):
# B8.LT_085: lt(0.85)
if 0.0 == 0.0:
return 1.0 if x < 0.85 else 0.0
x1 = 0.85 - 0.0
x2 = 0.85 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B16_GT_0(x):
# B16.GT_0: gt(0.0)
if 0.0 == 0.0:
return 1.0 if x > 0.0 else 0.0
x1 = 0.0 - 0.0
x2 = 0.0 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B19_GT_015(x):
# B19.GT_015: gt(0.15)
if 0.0 == 0.0:
return 1.0 if x > 0.15 else 0.0
x1 = 0.15 - 0.0
x2 = 0.15 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _BSum_GT_011(x):
# BSum.GT_011: gt(0.11)
if 0.0 == 0.0:
return 1.0 if x > 0.11 else 0.0
x1 = 0.11 - 0.0
x2 = 0.11 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _BSum_GT_013(x):
# BSum.GT_013: gt(0.13)
if 0.0 == 0.0:
return 1.0 if x > 0.13 else 0.0
x1 = 0.13 - 0.0
x2 = 0.13 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _BSum_GT_016(x):
# BSum.GT_016: gt(0.16)
if 0.0 == 0.0:
return 1.0 if x > 0.16 else 0.0
x1 = 0.16 - 0.0
x2 = 0.16 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _Class_FALSE(x):
# Class.FALSE: false()
return 0.0
@jit(nopython=True)
def _Class_TRUE(x):
# Class.TRUE: true()
return 1.0
_InputsSpec = [
("b1", float64[:]),
("b2", float64[:]),
("b3", float64[:]),
("b4", float64[:]),
("b5", float64[:]),
("b6", float64[:]),
("b7", float64[:]),
("b8", float64[:]),
("b12", float64[:]),
("b13", float64[:]),
("b14", float64[:]),
("b15", float64[:]),
("b16", float64[:]),
("b19", float64[:]),
("b100", float64[:]),
("bsum", float64[:]),
]
@jitclass(_InputsSpec)
class Inputs:
def __init__(self, size: int):
self.b1 = np.zeros(size, dtype=np.float64)
self.b2 = np.zeros(size, dtype=np.float64)
self.b3 = np.zeros(size, dtype=np.float64)
self.b4 = np.zeros(size, dtype=np.float64)
self.b5 = np.zeros(size, dtype=np.float64)
self.b6 = np.zeros(size, dtype=np.float64)
self.b7 = np.zeros(size, dtype=np.float64)
self.b8 = np.zeros(size, dtype=np.float64)
self.b12 = np.zeros(size, dtype=np.float64)
self.b13 = np.zeros(size, dtype=np.float64)
self.b14 = np.zeros(size, dtype=np.float64)
self.b15 = np.zeros(size, dtype=np.float64)
self.b16 = np.zeros(size, dtype=np.float64)
self.b19 = np.zeros(size, dtype=np.float64)
self.b100 = np.zeros(size, dtype=np.float64)
self.bsum = np.zeros(size, dtype=np.float64)
_OutputsSpec = [
("nodata", float64[:]),
("Wasser", float64[:]),
("Schill", float64[:]),
("Muschel", float64[:]),
("dense2", float64[:]),
("dense1", float64[:]),
("Strand", float64[:]),
("Sand", float64[:]),
("Misch", float64[:]),
("Misch2", float64[:]),
("Schlick", float64[:]),
("schlick_t", float64[:]),
("Wasser2", float64[:]),
]
@jitclass(_OutputsSpec)
class Outputs:
def __init__(self, size: int):
self.nodata = np.zeros(size, dtype=np.float64)
self.Wasser = np.zeros(size, dtype=np.float64)
self.Schill = np.zeros(size, dtype=np.float64)
self.Muschel = np.zeros(size, dtype=np.float64)
self.dense2 = np.zeros(size, dtype=np.float64)
self.dense1 = np.zeros(size, dtype=np.float64)
self.Strand = np.zeros(size, dtype=np.float64)
self.Sand = np.zeros(size, dtype=np.float64)
self.Misch = np.zeros(size, dtype=np.float64)
self.Misch2 = np.zeros(size, dtype=np.float64)
self.Schlick = np.zeros(size, dtype=np.float64)
self.schlick_t = np.zeros(size, dtype=np.float64)
self.Wasser2 = np.zeros(size, dtype=np.float64)
@jit(nopython=True)
def apply_rules(inputs: Inputs, outputs: Outputs):
for i in range(len(outputs.nodata)):
t0 = 1.0
# if b4 is NODATA:
t1 = min(t0, _B4_NODATA(inputs.b4[i]))
# nodata = TRUE
outputs.nodata[i] = t1
# else:
t1 = min(t0, 1.0 - t1)
# if (b8 is GT_033 and b1 is LT_085) or b8 is LT_009:
t2 = min(t1, max(min(_B8_GT_033(inputs.b8[i]), _B1_LT_085(inputs.b1[i])), _B8_LT_009(inputs.b8[i])))
# if b5 is LT_01:
t3 = min(t2, _B5_LT_01(inputs.b5[i]))
# Wasser = TRUE
outputs.Wasser[i] = t3
# else:
t3 = min(t2, 1.0 - t3)
# if (b19 is GT_015 and (b8 is GT_04 and b8 is LT_085) and b7 is LT_05) or (b8 is GT_04 and bsum is GT_011) or (b8 is GT_035 and bsum is GT_016):
t4 = min(t3, max(max(min(min(_B19_GT_015(inputs.b19[i]), min(_B8_GT_04(inputs.b8[i]), _B8_LT_085(inputs.b8[i]))), _B7_LT_05(inputs.b7[i])), min(_B8_GT_04(inputs.b8[i]), _BSum_GT_011(inputs.bsum[i]))), min(_B8_GT_035(inputs.b8[i]), _BSum_GT_016(inputs.bsum[i]))))
# if bsum is GT_013:
t5 = min(t4, _BSum_GT_013(inputs.bsum[i]))
# Schill = TRUE
outputs.Schill[i] = t5
# else:
t5 = min(t4, 1.0 - t5)
# Muschel = TRUE
outputs.Muschel[i] = t5
# else:
t4 = min(t3, 1.0 - t4)
# if b8 is GT_045:
t5 = min(t4, _B8_GT_045(inputs.b8[i]))
# dense2 = TRUE
outputs.dense2[i] = t5
# else:
t5 = min(t4, 1.0 - t5)
# dense1 = TRUE
outputs.dense1[i] = t5
# else:
t2 = min(t1, 1.0 - t2)
# if b1 is GT_1:
t3 = min(t2, _B1_GT_1(inputs.b1[i]))
# Strand = TRUE
outputs.Strand[i] = t3
# else:
t3 = min(t2, 1.0 - t3)
# if b3 is LT_005:
t4 = min(t3, _B3_LT_005(inputs.b3[i]))
# Sand = TRUE
outputs.Sand[i] = t4
# else:
t4 = min(t3, 1.0 - t4)
# if b3 is LT_01 and b8 is GT_0:
t5 = min(t4, min(_B3_LT_01(inputs.b3[i]), _B8_GT_0(inputs.b8[i])))
# Misch = TRUE
outputs.Misch[i] = t5
# else:
t5 = min(t4, 1.0 - t5)
# if b3 is LT_015 and b8 is GT_0:
t6 = min(t5, min(_B3_LT_015(inputs.b3[i]), _B8_GT_0(inputs.b8[i])))
# Misch2 = TRUE
outputs.Misch2[i] = t6
# else:
t6 = min(t5, 1.0 - t6)
# if b3 is LT_02 and b2 is GT_0 and b8 is GT_0:
t7 = min(t6, min(min(_B3_LT_02(inputs.b3[i]), _B2_GT_0(inputs.b2[i])), _B8_GT_0(inputs.b8[i])))
# Schlick = TRUE
outputs.Schlick[i] = t7
# else:
t7 = min(t6, 1.0 - t7)
# if b16 is GT_0 and b8 is GT_0:
t8 = min(t7, min(_B16_GT_0(inputs.b16[i]), _B8_GT_0(inputs.b8[i])))
# schlick_t = TRUE
outputs.schlick_t[i] = t8
# else:
t8 = min(t7, 1.0 - t8)
# Wasser2 = TRUE
outputs.Wasser2[i] = t8
|
mit
| -7,789,529,066,740,844,000
| 24.647303
| 270
| 0.44928
| false
| 2.405526
| false
| false
| false
|
isabellemao/Hello-World
|
python/Junior2015CCCJ4.py
|
1
|
1278
|
#Problem J4: Arrival Time
departure_time = input()
split_departure = list(departure_time) #The time of departure, split into a list.
#Split the list
departure_hour = split_departure[0:2]
departure_minute = split_departure[3:5]
#Change the split list to integers.
departure_hour = int("".join(departure_hour))
departure_minute = int("".join(departure_minute))
#The start and end of the rush hours
rh_start_1 = 7
rh_end_1 = 10
rh_start_2 = 15
rh_end_2 = 19
#Set the current time
hour = departure_hour
minute = departure_minute
#For the 120 minutes it usually takes Fiona to commute
for counter in range(1, 121):
#If it's currently rush hour
if hour >= rh_start_1 and hour < rh_end_1 or hour >= rh_start_2 and hour < rh_end_2:
#Twice as slow if rush hour
minute += 2
else:
#Normal speed if normal time
minute += 1
if minute >= 60:
minute = 0
#Reset hour
hour += 1
if hour == 24:
hour = 0
#Add fake zeroes if required.
if hour < 10:
hour = str(hour)
hour = "0" + hour
else:
hour = str(hour)
if minute < 10:
minute = str(minute)
minute = "0" + minute
else:
minute = str(minute)
#Make a valid output.
output = hour , ":" , minute
output = "".join(output)
print(output)
|
apache-2.0
| -2,193,184,907,640,587,300
| 22.666667
| 88
| 0.640063
| false
| 3.124694
| false
| false
| false
|
robosafe/testbench_vRAL_hydro
|
bert2_simulator/sim_step_monitors/assertion_monitor_manager.py
|
1
|
2830
|
#!/usr/bin/env python
"""
Assertion Monitor Manager
Created by David Western, June 2015.
"""
from coverage import coverage
import imp
import rospkg
import rospy
from std_msgs.msg import UInt64
from std_srvs.srv import Empty
import sys
class AMM:
def __init__(self,AM_list_file,trace_label):
# Read list of assertion monitors to run (from file?):
rospack = rospkg.RosPack()
path = rospack.get_path('bert2_simulator')
path = path+'/sim_step_monitors/'
print("--- Assertion monitors to run:")
self.AM_names = [line.rstrip('\n') for line in open(path+AM_list_file)]
print(self.AM_names)
# Instantiate assertion monitors:
self.AMs = [] # Initialise empty list of AMs.
for idx, class_name in enumerate(self.AM_names):
print(class_name)
print path+class_name+'.py'
module = imp.load_source(class_name, path+class_name+'.py')
#module = __import__(path+class_name) # N.B. These two lines imply that we
class_ = getattr(module, class_name) # require the AM to be defined in a
# file with the same name as the class.
self.AMs.append(class_(trace_label))
# Check AM has the mandatory attributes:
mand_attrs = ['step']
for attr in mand_attrs:
if not hasattr(self.AMs[idx],attr):
rospy.logerr("Assertion monitor specification '%s' does not define the attribute \
'%s', which is required by AMM (assertion_monitor_manager.py). \
Does %s inherite from an assertion monitor base class?",
self.AMs[idx].__name__, attr, self.AMs[idx].__name__)
# Get service
self.unpause_gazebo = rospy.ServiceProxy('gazebo/unpause_physics',Empty)
# Subscriber to triggers, which come on each sim step:
rospy.Subscriber("AM_trigger", UInt64, self.trigger_AMs)
def trigger_AMs(self,data):
iteration = data.data
sim_time = rospy.get_time()
# Step all assertion monitors:
for idx, AM in enumerate(self.AMs):
AM.step(iteration,sim_time)
# Release gazebo now we've finished the checks for this step:
#print "unpausing"
#self.unpause_gazebo()
# Problem: This line prevents Gazebo's pause button from working (unless you
# get a lucky click).
if __name__ == '__main__':
try:
if len(sys.argv) < 3:
print("usage: rosrun [package_name] assertion_monitor_manager.py AM_list_file.txt report_file_name")
else:
rospy.init_node('AMM')
AMMInst = AMM(sys.argv[1],sys.argv[2])
rospy.spin()
except rospy.ROSInterruptException: #to stop the code when pressing Ctr+c
pass
|
gpl-3.0
| 1,416,007,924,697,365,000
| 31.906977
| 109
| 0.602473
| false
| 3.723684
| false
| false
| false
|
eayunstack/eayunstack-upgrade
|
ansible/library/keystone_v2_endpoint.py
|
1
|
9178
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
#
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Based on Jimmy Tang's implementation
DOCUMENTATION = """
---
module: keystone_v2_endpoint
short_description:
- Manage OpenStack Identity (keystone) v2 endpoint.
description:
- Manage OpenStack Identity (keystone) v2 endpoint.
endpoints.
options:
token:
description:
- The token to be uses in case the password is not specified
required: true
default: None
endpoint:
description:
- The keystone url for authentication
required: true
service_name:
description:
- Name of the service.
required: true
default: None
region_name:
description:
- Name of the region.
required: true
default: None
service_type:
description:
- Type of service.
required: true
default: None
endpoint_dict:
description:
- Dict of endpoint urls to add to keystone for a service
required: true
default: None
type: dict
state:
description:
- Ensuring the endpoint is either present, absent.
- It always ensures endpoint is updated to latest url.
required: False
default: 'present'
requirements: [ python-keystoneclient ]
"""
EXAMPLES = """
# Create an endpoint
- keystone_v2_endpoint:
region_name: "RegionOne"
service_name: "glance"
service_type: "image"
endpoint: "http://127.0.0.1:5000/v2.0/"
token: "ChangeMe"
endpoint_dict:
publicurl: "http://127.0.0.1:9292"
adminurl: "http://127.0.0.1:9292"
internalurl: "http://127.0.0.1:9292"
"""
try:
from keystoneclient.v2_0 import client
except ImportError:
keystoneclient_found = False
else:
keystoneclient_found = True
class ManageKeystoneV2Endpoint(object):
def __init__(self, module):
"""Manage Keystone via Ansible."""
self.state_change = False
self.keystone = None
# Load AnsibleModule
self.module = module
@staticmethod
def _facts(facts):
"""Return a dict for our Ansible facts.
:param facts: ``dict`` Dict with data to return
"""
return {'keystone_facts': facts}
def failure(self, error, rc, msg):
"""Return a Failure when running an Ansible command.
:param error: ``str`` Error that occurred.
:param rc: ``int`` Return code while executing an Ansible command.
:param msg: ``str`` Message to report.
"""
self.module.fail_json(msg=msg, rc=rc, err=error)
def _authenticate(self):
"""Return a keystone client object."""
endpoint = self.module.params.get('endpoint')
token = self.module.params.get('token')
if token is None:
self.failure(
error='Missing Auth Token',
rc=2,
msg='Auto token is required!'
)
if token:
self.keystone = client.Client(
endpoint=endpoint,
token=token
)
def _get_service(self, name, srv_type=None):
for entry in self.keystone.services.list():
if srv_type is not None:
if entry.type == srv_type and name == entry.name:
return entry
elif entry.name == name:
return entry
else:
return None
def _get_endpoint(self, region, service_id):
""" Getting endpoints per complete definition
Returns the endpoint details for an endpoint matching
region, service id.
:param service_id: service to which the endpoint belongs
:param region: geographic location of the endpoint
"""
for entry in self.keystone.endpoints.list():
check = [
entry.region == region,
entry.service_id == service_id,
]
if all(check):
return entry
else:
return None
def _compare_endpoint_info(self, endpoint, endpoint_dict):
""" Compare existed endpoint with module parameters
Return True if public, admin, internal urls are all the same.
:param endpoint: endpoint existed
:param endpoint_dict: endpoint info passed in
"""
check = [
endpoint.adminurl == endpoint_dict.get('adminurl'),
endpoint.publicurl == endpoint_dict.get('publicurl'),
endpoint.internalurl == endpoint_dict.get('internalurl')
]
if all(check):
return True
else:
return False
def ensure_endpoint(self):
"""Ensures the deletion/modification/addition of endpoints
within Keystone.
Returns the endpoint ID on a successful run.
"""
self._authenticate()
service_name = self.module.params.get('service_name')
service_type = self.module.params.get('service_type')
region = self.module.params.get('region_name')
endpoint_dict = self.module.params.get('endpoint_dict')
state = self.module.params.get('state')
endpoint_dict = {
'adminurl': endpoint_dict.get('adminurl', ''),
'publicurl': endpoint_dict.get('publicurl', ''),
'internalurl': endpoint_dict.get('internalurl', '')
}
service = self._get_service(name=service_name, srv_type=service_type)
if service is None:
self.failure(
error='service [ %s ] was not found.' % service_name,
rc=2,
msg='Service was not found, does it exist?'
)
existed_endpoint = self._get_endpoint(
region=region,
service_id=service.id,
)
delete_existed = False
if state == 'present':
''' Creating an endpoint (if it does
not exist) or creating a new one,
and then deleting the existing
endpoint that matches the service
type, name, and region.
'''
if existed_endpoint:
if not self._compare_endpoint_info(existed_endpoint,
endpoint_dict):
delete_existed = True
else:
endpoint = existed_endpoint
if (not existed_endpoint or
delete_existed):
self.state_change = True
endpoint = self.keystone.endpoints.create(
region=region,
service_id=service.id,
**endpoint_dict
)
elif state == 'absent':
if existed_endpoint is not None:
self.state_change = True
delete_existed = True
if delete_existed:
result = self.keystone.endpoints.delete(existed_endpoint.id)
if result[0].status_code != 204:
self.module.fail()
if state != 'absent':
facts = self._facts(endpoint.to_dict())
else:
facts = self._facts({})
self.module.exit_json(
changed=self.state_change,
ansible_facts=facts
)
# TODO(evrardjp): Deprecate state=update in Q.
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(
required=True
),
endpoint=dict(
required=True,
),
region_name=dict(
required=True
),
service_name=dict(
required=True
),
service_type=dict(
required=True
),
endpoint_dict=dict(
required=True,
type='dict'
),
state=dict(
choices=['present', 'absent'],
required=False,
default='present'
)
),
supports_check_mode=False,
)
km = ManageKeystoneV2Endpoint(module=module)
if not keystoneclient_found:
km.failure(
error='python-keystoneclient is missing',
rc=2,
msg='keystone client was not importable, is it installed?'
)
facts = km.ensure_endpoint()
# import module snippets
from ansible.module_utils.basic import * # NOQA
if __name__ == '__main__':
main()
|
apache-2.0
| -101,433,040,338,243,950
| 28.322684
| 78
| 0.552953
| false
| 4.541316
| false
| false
| false
|
Kivvix/stage-LPC
|
compareSrc/searchSDSSdata.py
|
1
|
4221
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import os
import glob
from config import *
import data.calexp
import data.src
## @def attributs
# @brief attributs which we select in SDSS DB and src fits file
attributs = 'objid,run,camcol,field,ra,dec,u,g,r,i,z'
## Calexp treatment ##
def coordCalexp( fitsNum , calexpFits , first=True ):
coordMin, coordMax = data.calexp.coord( calexpFits , first )
if ( first ):
return coordMin
else:
return coordMax
def savCalexp( coordMin , coordMax , fitsNum ):
global attributs , PATH_OUTPUT
calexpLines = data.calexp.query( coordMin , coordMax , attributs , fitsNum )
data.calexp.write( calexpLines , attributs , fitsNum , PATH_OUTPUT , True )
def calexp( fitsNum , calexpFits , first=True ):
"""
find and write calexp data (id,ra,dec,mag)
:param fitsNum: number of fits file (``rrrrrr-bc-ffff``)
:param calexpFits: name of calexp fits file
:param first: take all the picture or less 128 first pixels
:type fitsNum: string
:type calexpFits: string
:type first: boolean
"""
global attributs , PATH_OUTPUT
coordMin, coordMax = data.calexp.coord( calexpFits , first )
calexpLines = data.calexp.query( coordMin , coordMax , attributs , fitsNum )
data.calexp.write( calexpLines , attributs , fitsNum[0:9] , PATH_OUTPUT , first )
## Src treatment ##
def src( fitsNum , srcFits , first=True ):
"""
find and write src data (id,ra,dec,mag)
:param fitsNum: number of fits file (``rrrrrr-bc-ffff``)
:param srcFits: name of src fits file
:param first: take all the picture or less 128 first pixels
:type fitsNum: string
:type srcFits: string
:type first: boolean
"""
global attributs , PATH_OUTPUT
srcCoord,srcMag = data.src.coord( srcFits , fitsNum , first )
srcLines = data.src.map( srcCoord , srcMag )
data.src.write( srcLines , attributs , fitsNum[0:9] , PATH_OUTPUT , first )
def analyCol( runNum , c ):
"""
function threaded calling research of data
:param runNum_c: tupe with run number and column of the CCD (1-6)
:type runNum_c: tuple of string
"""
global b , PATH_DATA , PWD
print " " + str(c) + " ",
# data of each pair of fits files
first = True
for fits in glob.glob( c + "/" + b + "/calexp/calexp*.fits" ):
fitsNum = fits[18:32]
## @def calexpFits
# @brief path and name of calexp fits file
calexpFits = PATH_DATA + "/" + runNum + "/" + c + "/" + b + "/calexp/calexp-" + fitsNum + ".fits"
## @def srcFits
# @brief path and name of src fits file
#srcFits = PATH_DATA + "/" + runNum + "/" + c + "/" + b + "/src/src-" + fitsNum + ".fits"
#calexp( fitsNum , calexpFits , first )
if ( first ):
coordMin = coordCalexp( fitsNum , calexpFits , first )
else:
coordMax = coordCalexp( fitsNum , calexpFits , first )
#src( fitsNum , srcFits , first )
first = False
savCalexp( coordMin , coordMax , "%06d" % int(runNum) + "-" + b + c )
def analyRun( runNum ):
global b , PWD , PATH_DATA , PATH_OUTPUT , attributs
print "run : " + str(runNum ) + " : ",
os.chdir( PATH_DATA + "/" + runNum )
columns = glob.glob( "*" )
for c in columns :
analyCol( runNum , c )
if __name__ == '__main__':
os.chdir( PATH_DATA )
runs = glob.glob( "*" )
#runs = ( 7158, 7112, 5924, 5566, 6421, 7057, 6430, 4895, 5895, 6474, 6383, 7038, 5642, 6409, 6513, 6501, 6552, 2650, 6559, 6355, 7177, 7121, 3465, 7170, 7051, 6283, 6458, 5853, 6484, 5765, 2708, 5786, 4253, 6934, 6508, 2662, 6518, 6584, 4188, 6976, 7202, 7173, 4153, 5820, 2649, 7140, 6330, 3388, 7117, 6504, 6314, 4128, 6596, 6564, 5807, 6367, 6373, 5622, 5882, 7034, 7136, 6577, 6600, 2768, 3437, 4927, 6414, 3434, 5813, 7084, 4858, 7124, 6982, 4917, 4192, 5898, 6479, 4868, 7106, 7195, 5744, 3360, 4198, 6963, 6533, 4933, 5603, 3384, 7155, 5619, 4207, 4849, 5582, 7024, 1755, 5709, 5781, 5770, 7145, 5754, 5646, 5800, 5759, 6287, 6568, 7054, 4203, 5776, 6433, 4247, 5823, 5052, 3325, 5836, 5590, 6580, 7161, 2728, 4145, 5633, 6461, 6555, 6955, 4874, 5792, 5918, 6425, 6377, 4263, 5878, 6441, 6447, 7080, 5905, 5713, 6618, 6537, 5637, 6402, 6530, 7047, 6524, 7101, 6293 )
for r in runs :
analyRun( r )
print " "
time.sleep(60)
|
mit
| -5,985,041,647,614,164,000
| 33.040323
| 875
| 0.644871
| false
| 2.594345
| false
| false
| false
|
xaled/wunderous-analytics
|
wunderous/drive.py
|
1
|
5688
|
import os
import sys
import httplib2
from oauth2client.file import Storage
from apiclient import discovery
from oauth2client.client import OAuth2WebServerFlow
from wunderous.config import config
OAUTH_SCOPE = 'https://www.googleapis.com/auth/drive'
SHEETS_OAUTH_SCOPE = 'https://www.googleapis.com/auth/drive https://www.googleapis.com/auth/drive.readonly https://www.googleapis.com/auth/drive.file https://www.googleapis.com/auth/spreadsheets https://www.googleapis.com/auth/spreadsheets.readonly'
REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
CREDS_FILE = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), 'credentials.json')
SHEETS_CREDS_FILE = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), 'sheets_credentials.json')
# CONFIG_FILE = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), "wunderous.config.json")
sheet_service = None
drive_service = None
# def load_configs():
# client_secret = config['client_secret']
# client_id = config['client_id']
# return client_id, client_secret
def init_drive_service():
global drive_service
if drive_service:
return drive_service
storage = Storage(CREDS_FILE)
credentials = storage.get()
if credentials is None:
# Run through the OAuth flow and retrieve credentials
# client_id, client_secret = load_configs()
flow = OAuth2WebServerFlow(config['drive']['client_id'], config['drive']['client_secret'], OAUTH_SCOPE, REDIRECT_URI)
authorize_url = flow.step1_get_authorize_url()
print('Go to the following link in your browser: ' + authorize_url)
code = input('Enter verification code: ').strip()
credentials = flow.step2_exchange(code)
storage.put(credentials)
# Create an httplib2.Http object and authorize it with our credentials
http = httplib2.Http()
http = credentials.authorize(http)
drive_service = discovery.build('drive', 'v2', http=http)
return drive_service
def init_sheet_service():
global sheet_service
if sheet_service:
return sheet_service
storage = Storage(SHEETS_CREDS_FILE)
credentials = storage.get()
if credentials is None:
# Run through the OAuth flow and retrieve credentials
# client_id, client_secret = load_configs()
flow = OAuth2WebServerFlow(config['drive']['client_id'], config['drive']['client_secret'], OAUTH_SCOPE, REDIRECT_URI)
authorize_url = flow.step1_get_authorize_url()
print('Go to the following link in your browser: ' + authorize_url)
code = input('Enter verification code: ').strip()
credentials = flow.step2_exchange(code)
storage.put(credentials)
# Create an httplib2.Http object and authorize it with our credentials
http = httplib2.Http()
http = credentials.authorize(http)
sheet_service = discovery.build('sheets', 'v4', http=http)
return sheet_service
def list_files(service):
page_token = None
while True:
param = {}
if page_token:
param['pageToken'] = page_token
files = service.files().list(**param).execute()
for item in files['items']:
yield item
page_token = files.get('nextPageToken')
if not page_token:
break
def _download_file(drive_service, download_url, outfile):
resp, content = drive_service._http.request(download_url)
if resp.status == 200:
with open(outfile, 'wb') as f:
f.write(content)
print("OK")
return
else:
raise Exception("ERROR downloading %s, response code is not 200!" % outfile)
def download_file(outfile, fileid):
drive_service = init_drive_service()
for item in list_files(drive_service):
if fileid == item.get('id'):
if 'downloadUrl' in item:
_download_file(drive_service, item['downloadUrl'], outfile)
return
else:
raise Exception("No download link is found for file: %s" % item['title'])
raise Exception("No file with id: %s is found " % fileid)
def get_sheet_metadata(spreadsheet_id):
sheet_service = init_sheet_service()
sheet_metadata = sheet_service.spreadsheets().get(spreadsheetId=spreadsheet_id).execute()
return sheet_metadata
def get_sheet_values(spreadsheet_id, range_):
sheet_service = init_sheet_service()
request = sheet_service.spreadsheets().values().get(spreadsheetId=spreadsheet_id, range=range_,
valueRenderOption='FORMATTED_VALUE',
dateTimeRenderOption='SERIAL_NUMBER')
response = request.execute()
return response
def get_sheet_value(spreadsheet_id, range_):
response = get_sheet_values(spreadsheet_id, range_)
try:
return response['values'][0][0]
except:
return ''
def update_sheet_values(spreadsheet_id, range_, values):
sheet_service = init_sheet_service()
body = {'values': values}
result = sheet_service.spreadsheets().values().update(spreadsheetId=spreadsheet_id, range=range_, body=body,
valueInputOption='USER_ENTERED').execute()
return result.get('updatedCells')
def append_sheet_values(spreadsheet_id, range_, values):
sheet_service = init_sheet_service()
body = {'values': values}
result = sheet_service.spreadsheets().values().append(spreadsheetId=spreadsheet_id, range=range_, body=body,
valueInputOption='USER_ENTERED').execute()
return result.get('updates').get('updatedCells')
|
mit
| 7,538,950,005,650,568,000
| 37.174497
| 249
| 0.651371
| false
| 3.93361
| true
| false
| false
|
Goamaral/SCC
|
inputWindow.py
|
1
|
31922
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'inputWindow.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(708, 428)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.List = QtGui.QVBoxLayout()
self.List.setObjectName(_fromUtf8("List"))
self.listItem_3 = QtGui.QWidget(self.centralwidget)
self.listItem_3.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_3.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_3.setObjectName(_fromUtf8("listItem_3"))
self.horizontalLayout_5 = QtGui.QHBoxLayout(self.listItem_3)
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.nameLabel_3 = QtGui.QLabel(self.listItem_3)
self.nameLabel_3.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_3.setMaximumSize(QtCore.QSize(75, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_3.setFont(font)
self.nameLabel_3.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_3.setObjectName(_fromUtf8("nameLabel_3"))
self.horizontalLayout_5.addWidget(self.nameLabel_3)
self.nameLabel_27 = QtGui.QLabel(self.listItem_3)
self.nameLabel_27.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_27.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_27.setFont(font)
self.nameLabel_27.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_27.setObjectName(_fromUtf8("nameLabel_27"))
self.horizontalLayout_5.addWidget(self.nameLabel_27)
self.mediaChegadaA = QtGui.QLineEdit(self.listItem_3)
self.mediaChegadaA.setMinimumSize(QtCore.QSize(50, 25))
self.mediaChegadaA.setMaximumSize(QtCore.QSize(50, 25))
self.mediaChegadaA.setText(_fromUtf8(""))
self.mediaChegadaA.setObjectName(_fromUtf8("mediaChegadaA"))
self.horizontalLayout_5.addWidget(self.mediaChegadaA)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem)
self.List.addWidget(self.listItem_3)
self.listItem_6 = QtGui.QWidget(self.centralwidget)
self.listItem_6.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_6.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_6.setObjectName(_fromUtf8("listItem_6"))
self.horizontalLayout_7 = QtGui.QHBoxLayout(self.listItem_6)
self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7"))
self.nameLabel_7 = QtGui.QLabel(self.listItem_6)
self.nameLabel_7.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_7.setMaximumSize(QtCore.QSize(75, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_7.setFont(font)
self.nameLabel_7.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_7.setObjectName(_fromUtf8("nameLabel_7"))
self.horizontalLayout_7.addWidget(self.nameLabel_7)
self.nameLabel_8 = QtGui.QLabel(self.listItem_6)
self.nameLabel_8.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_8.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_8.setFont(font)
self.nameLabel_8.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_8.setObjectName(_fromUtf8("nameLabel_8"))
self.horizontalLayout_7.addWidget(self.nameLabel_8)
self.mediaPerfuracaoA = QtGui.QLineEdit(self.listItem_6)
self.mediaPerfuracaoA.setMinimumSize(QtCore.QSize(50, 25))
self.mediaPerfuracaoA.setMaximumSize(QtCore.QSize(50, 25))
self.mediaPerfuracaoA.setText(_fromUtf8(""))
self.mediaPerfuracaoA.setObjectName(_fromUtf8("mediaPerfuracaoA"))
self.horizontalLayout_7.addWidget(self.mediaPerfuracaoA)
self.nameLabel_9 = QtGui.QLabel(self.listItem_6)
self.nameLabel_9.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_9.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_9.setFont(font)
self.nameLabel_9.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_9.setObjectName(_fromUtf8("nameLabel_9"))
self.horizontalLayout_7.addWidget(self.nameLabel_9)
self.desvioPerfuracaoA = QtGui.QLineEdit(self.listItem_6)
self.desvioPerfuracaoA.setMinimumSize(QtCore.QSize(50, 25))
self.desvioPerfuracaoA.setMaximumSize(QtCore.QSize(50, 25))
self.desvioPerfuracaoA.setText(_fromUtf8(""))
self.desvioPerfuracaoA.setObjectName(_fromUtf8("desvioPerfuracaoA"))
self.horizontalLayout_7.addWidget(self.desvioPerfuracaoA)
self.nameLabel_10 = QtGui.QLabel(self.listItem_6)
self.nameLabel_10.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_10.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_10.setFont(font)
self.nameLabel_10.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_10.setObjectName(_fromUtf8("nameLabel_10"))
self.horizontalLayout_7.addWidget(self.nameLabel_10)
self.nMaquinasPerfuracaoA = QtGui.QLineEdit(self.listItem_6)
self.nMaquinasPerfuracaoA.setMinimumSize(QtCore.QSize(50, 25))
self.nMaquinasPerfuracaoA.setMaximumSize(QtCore.QSize(50, 25))
self.nMaquinasPerfuracaoA.setText(_fromUtf8(""))
self.nMaquinasPerfuracaoA.setObjectName(_fromUtf8("nMaquinasPerfuracaoA"))
self.horizontalLayout_7.addWidget(self.nMaquinasPerfuracaoA)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem1)
self.List.addWidget(self.listItem_6)
self.listItem_7 = QtGui.QWidget(self.centralwidget)
self.listItem_7.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_7.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_7.setObjectName(_fromUtf8("listItem_7"))
self.horizontalLayout_8 = QtGui.QHBoxLayout(self.listItem_7)
self.horizontalLayout_8.setObjectName(_fromUtf8("horizontalLayout_8"))
self.nameLabel_11 = QtGui.QLabel(self.listItem_7)
self.nameLabel_11.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_11.setMaximumSize(QtCore.QSize(75, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_11.setFont(font)
self.nameLabel_11.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_11.setObjectName(_fromUtf8("nameLabel_11"))
self.horizontalLayout_8.addWidget(self.nameLabel_11)
self.nameLabel_12 = QtGui.QLabel(self.listItem_7)
self.nameLabel_12.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_12.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_12.setFont(font)
self.nameLabel_12.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_12.setObjectName(_fromUtf8("nameLabel_12"))
self.horizontalLayout_8.addWidget(self.nameLabel_12)
self.mediaPolimentoA = QtGui.QLineEdit(self.listItem_7)
self.mediaPolimentoA.setMinimumSize(QtCore.QSize(50, 25))
self.mediaPolimentoA.setMaximumSize(QtCore.QSize(50, 25))
self.mediaPolimentoA.setText(_fromUtf8(""))
self.mediaPolimentoA.setObjectName(_fromUtf8("mediaPolimentoA"))
self.horizontalLayout_8.addWidget(self.mediaPolimentoA)
self.nameLabel_13 = QtGui.QLabel(self.listItem_7)
self.nameLabel_13.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_13.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_13.setFont(font)
self.nameLabel_13.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_13.setObjectName(_fromUtf8("nameLabel_13"))
self.horizontalLayout_8.addWidget(self.nameLabel_13)
self.desvioPolimentoA = QtGui.QLineEdit(self.listItem_7)
self.desvioPolimentoA.setMinimumSize(QtCore.QSize(50, 25))
self.desvioPolimentoA.setMaximumSize(QtCore.QSize(50, 25))
self.desvioPolimentoA.setText(_fromUtf8(""))
self.desvioPolimentoA.setObjectName(_fromUtf8("desvioPolimentoA"))
self.horizontalLayout_8.addWidget(self.desvioPolimentoA)
self.nameLabel_14 = QtGui.QLabel(self.listItem_7)
self.nameLabel_14.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_14.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_14.setFont(font)
self.nameLabel_14.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_14.setObjectName(_fromUtf8("nameLabel_14"))
self.horizontalLayout_8.addWidget(self.nameLabel_14)
self.nMaquinasPolimentoA = QtGui.QLineEdit(self.listItem_7)
self.nMaquinasPolimentoA.setMinimumSize(QtCore.QSize(50, 25))
self.nMaquinasPolimentoA.setMaximumSize(QtCore.QSize(50, 25))
self.nMaquinasPolimentoA.setText(_fromUtf8(""))
self.nMaquinasPolimentoA.setObjectName(_fromUtf8("nMaquinasPolimentoA"))
self.horizontalLayout_8.addWidget(self.nMaquinasPolimentoA)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_8.addItem(spacerItem2)
self.List.addWidget(self.listItem_7)
self.line_2 = QtGui.QFrame(self.centralwidget)
self.line_2.setMinimumSize(QtCore.QSize(5, 0))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.line_2.setFont(font)
self.line_2.setFrameShape(QtGui.QFrame.HLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.List.addWidget(self.line_2)
self.listItem_4 = QtGui.QWidget(self.centralwidget)
self.listItem_4.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_4.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_4.setObjectName(_fromUtf8("listItem_4"))
self.horizontalLayout_6 = QtGui.QHBoxLayout(self.listItem_4)
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
self.nameLabel_4 = QtGui.QLabel(self.listItem_4)
self.nameLabel_4.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_4.setMaximumSize(QtCore.QSize(75, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_4.setFont(font)
self.nameLabel_4.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_4.setObjectName(_fromUtf8("nameLabel_4"))
self.horizontalLayout_6.addWidget(self.nameLabel_4)
self.nameLabel_31 = QtGui.QLabel(self.listItem_4)
self.nameLabel_31.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_31.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_31.setFont(font)
self.nameLabel_31.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_31.setObjectName(_fromUtf8("nameLabel_31"))
self.horizontalLayout_6.addWidget(self.nameLabel_31)
self.mediaChegadaB = QtGui.QLineEdit(self.listItem_4)
self.mediaChegadaB.setMinimumSize(QtCore.QSize(50, 25))
self.mediaChegadaB.setMaximumSize(QtCore.QSize(50, 25))
self.mediaChegadaB.setText(_fromUtf8(""))
self.mediaChegadaB.setObjectName(_fromUtf8("mediaChegadaB"))
self.horizontalLayout_6.addWidget(self.mediaChegadaB)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem3)
self.List.addWidget(self.listItem_4)
self.listItem_9 = QtGui.QWidget(self.centralwidget)
self.listItem_9.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_9.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_9.setObjectName(_fromUtf8("listItem_9"))
self.horizontalLayout_13 = QtGui.QHBoxLayout(self.listItem_9)
self.horizontalLayout_13.setObjectName(_fromUtf8("horizontalLayout_13"))
self.nameLabel_36 = QtGui.QLabel(self.listItem_9)
self.nameLabel_36.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_36.setMaximumSize(QtCore.QSize(75, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_36.setFont(font)
self.nameLabel_36.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_36.setObjectName(_fromUtf8("nameLabel_36"))
self.horizontalLayout_13.addWidget(self.nameLabel_36)
self.nameLabel_37 = QtGui.QLabel(self.listItem_9)
self.nameLabel_37.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_37.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_37.setFont(font)
self.nameLabel_37.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_37.setObjectName(_fromUtf8("nameLabel_37"))
self.horizontalLayout_13.addWidget(self.nameLabel_37)
self.mediaPerfuracaoB = QtGui.QLineEdit(self.listItem_9)
self.mediaPerfuracaoB.setMinimumSize(QtCore.QSize(50, 25))
self.mediaPerfuracaoB.setMaximumSize(QtCore.QSize(50, 25))
self.mediaPerfuracaoB.setText(_fromUtf8(""))
self.mediaPerfuracaoB.setObjectName(_fromUtf8("mediaPerfuracaoB"))
self.horizontalLayout_13.addWidget(self.mediaPerfuracaoB)
self.nameLabel_38 = QtGui.QLabel(self.listItem_9)
self.nameLabel_38.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_38.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_38.setFont(font)
self.nameLabel_38.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_38.setObjectName(_fromUtf8("nameLabel_38"))
self.horizontalLayout_13.addWidget(self.nameLabel_38)
self.desvioPerfuracaoB = QtGui.QLineEdit(self.listItem_9)
self.desvioPerfuracaoB.setMinimumSize(QtCore.QSize(50, 25))
self.desvioPerfuracaoB.setMaximumSize(QtCore.QSize(50, 25))
self.desvioPerfuracaoB.setText(_fromUtf8(""))
self.desvioPerfuracaoB.setObjectName(_fromUtf8("desvioPerfuracaoB"))
self.horizontalLayout_13.addWidget(self.desvioPerfuracaoB)
self.nameLabel_39 = QtGui.QLabel(self.listItem_9)
self.nameLabel_39.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_39.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_39.setFont(font)
self.nameLabel_39.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_39.setObjectName(_fromUtf8("nameLabel_39"))
self.horizontalLayout_13.addWidget(self.nameLabel_39)
self.nMaquinasPerfuracaoB = QtGui.QLineEdit(self.listItem_9)
self.nMaquinasPerfuracaoB.setMinimumSize(QtCore.QSize(50, 25))
self.nMaquinasPerfuracaoB.setMaximumSize(QtCore.QSize(50, 25))
self.nMaquinasPerfuracaoB.setText(_fromUtf8(""))
self.nMaquinasPerfuracaoB.setObjectName(_fromUtf8("nMaquinasPerfuracaoB"))
self.horizontalLayout_13.addWidget(self.nMaquinasPerfuracaoB)
spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_13.addItem(spacerItem4)
self.List.addWidget(self.listItem_9)
self.listItem_8 = QtGui.QWidget(self.centralwidget)
self.listItem_8.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_8.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_8.setObjectName(_fromUtf8("listItem_8"))
self.horizontalLayout_10 = QtGui.QHBoxLayout(self.listItem_8)
self.horizontalLayout_10.setObjectName(_fromUtf8("horizontalLayout_10"))
self.nameLabel_19 = QtGui.QLabel(self.listItem_8)
self.nameLabel_19.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_19.setMaximumSize(QtCore.QSize(75, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_19.setFont(font)
self.nameLabel_19.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_19.setObjectName(_fromUtf8("nameLabel_19"))
self.horizontalLayout_10.addWidget(self.nameLabel_19)
self.nameLabel_20 = QtGui.QLabel(self.listItem_8)
self.nameLabel_20.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_20.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_20.setFont(font)
self.nameLabel_20.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_20.setObjectName(_fromUtf8("nameLabel_20"))
self.horizontalLayout_10.addWidget(self.nameLabel_20)
self.mediaPolimentoB = QtGui.QLineEdit(self.listItem_8)
self.mediaPolimentoB.setMinimumSize(QtCore.QSize(50, 25))
self.mediaPolimentoB.setMaximumSize(QtCore.QSize(50, 25))
self.mediaPolimentoB.setText(_fromUtf8(""))
self.mediaPolimentoB.setObjectName(_fromUtf8("mediaPolimentoB"))
self.horizontalLayout_10.addWidget(self.mediaPolimentoB)
self.nameLabel_21 = QtGui.QLabel(self.listItem_8)
self.nameLabel_21.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_21.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_21.setFont(font)
self.nameLabel_21.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_21.setObjectName(_fromUtf8("nameLabel_21"))
self.horizontalLayout_10.addWidget(self.nameLabel_21)
self.desvioPolimentoB = QtGui.QLineEdit(self.listItem_8)
self.desvioPolimentoB.setMinimumSize(QtCore.QSize(50, 25))
self.desvioPolimentoB.setMaximumSize(QtCore.QSize(50, 25))
self.desvioPolimentoB.setText(_fromUtf8(""))
self.desvioPolimentoB.setObjectName(_fromUtf8("desvioPolimentoB"))
self.horizontalLayout_10.addWidget(self.desvioPolimentoB)
self.nameLabel_22 = QtGui.QLabel(self.listItem_8)
self.nameLabel_22.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_22.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_22.setFont(font)
self.nameLabel_22.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_22.setObjectName(_fromUtf8("nameLabel_22"))
self.horizontalLayout_10.addWidget(self.nameLabel_22)
self.nMaquinasPolimentoB = QtGui.QLineEdit(self.listItem_8)
self.nMaquinasPolimentoB.setMinimumSize(QtCore.QSize(50, 25))
self.nMaquinasPolimentoB.setMaximumSize(QtCore.QSize(50, 25))
self.nMaquinasPolimentoB.setText(_fromUtf8(""))
self.nMaquinasPolimentoB.setObjectName(_fromUtf8("nMaquinasPolimentoB"))
self.horizontalLayout_10.addWidget(self.nMaquinasPolimentoB)
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_10.addItem(spacerItem5)
self.List.addWidget(self.listItem_8)
self.line = QtGui.QFrame(self.centralwidget)
self.line.setMinimumSize(QtCore.QSize(0, 5))
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.List.addWidget(self.line)
self.listItem_11 = QtGui.QWidget(self.centralwidget)
self.listItem_11.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_11.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_11.setObjectName(_fromUtf8("listItem_11"))
self.horizontalLayout_12 = QtGui.QHBoxLayout(self.listItem_11)
self.horizontalLayout_12.setObjectName(_fromUtf8("horizontalLayout_12"))
self.nameLabel_23 = QtGui.QLabel(self.listItem_11)
self.nameLabel_23.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_23.setMaximumSize(QtCore.QSize(125, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_23.setFont(font)
self.nameLabel_23.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_23.setObjectName(_fromUtf8("nameLabel_23"))
self.horizontalLayout_12.addWidget(self.nameLabel_23)
self.nameLabel_24 = QtGui.QLabel(self.listItem_11)
self.nameLabel_24.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_24.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_24.setFont(font)
self.nameLabel_24.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_24.setObjectName(_fromUtf8("nameLabel_24"))
self.horizontalLayout_12.addWidget(self.nameLabel_24)
self.mediaEnvernizamento = QtGui.QLineEdit(self.listItem_11)
self.mediaEnvernizamento.setMinimumSize(QtCore.QSize(50, 25))
self.mediaEnvernizamento.setMaximumSize(QtCore.QSize(50, 25))
self.mediaEnvernizamento.setText(_fromUtf8(""))
self.mediaEnvernizamento.setObjectName(_fromUtf8("mediaEnvernizamento"))
self.horizontalLayout_12.addWidget(self.mediaEnvernizamento)
self.nameLabel_25 = QtGui.QLabel(self.listItem_11)
self.nameLabel_25.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_25.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_25.setFont(font)
self.nameLabel_25.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_25.setObjectName(_fromUtf8("nameLabel_25"))
self.horizontalLayout_12.addWidget(self.nameLabel_25)
self.desvioEnvernizamento = QtGui.QLineEdit(self.listItem_11)
self.desvioEnvernizamento.setMinimumSize(QtCore.QSize(50, 25))
self.desvioEnvernizamento.setMaximumSize(QtCore.QSize(50, 25))
self.desvioEnvernizamento.setText(_fromUtf8(""))
self.desvioEnvernizamento.setObjectName(_fromUtf8("desvioEnvernizamento"))
self.horizontalLayout_12.addWidget(self.desvioEnvernizamento)
self.nameLabel_26 = QtGui.QLabel(self.listItem_11)
self.nameLabel_26.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_26.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_26.setFont(font)
self.nameLabel_26.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_26.setObjectName(_fromUtf8("nameLabel_26"))
self.horizontalLayout_12.addWidget(self.nameLabel_26)
self.nMaquinasEnvernizamento = QtGui.QLineEdit(self.listItem_11)
self.nMaquinasEnvernizamento.setMinimumSize(QtCore.QSize(50, 25))
self.nMaquinasEnvernizamento.setMaximumSize(QtCore.QSize(50, 25))
self.nMaquinasEnvernizamento.setText(_fromUtf8(""))
self.nMaquinasEnvernizamento.setObjectName(_fromUtf8("nMaquinasEnvernizamento"))
self.horizontalLayout_12.addWidget(self.nMaquinasEnvernizamento)
spacerItem6 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_12.addItem(spacerItem6)
self.List.addWidget(self.listItem_11)
self.verticalLayout_4.addLayout(self.List)
self.footer = QtGui.QWidget(self.centralwidget)
self.footer.setMaximumSize(QtCore.QSize(100000, 50))
self.footer.setObjectName(_fromUtf8("footer"))
self.horizontalLayout = QtGui.QHBoxLayout(self.footer)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.nameLabel_30 = QtGui.QLabel(self.footer)
self.nameLabel_30.setMinimumSize(QtCore.QSize(130, 0))
self.nameLabel_30.setMaximumSize(QtCore.QSize(130, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_30.setFont(font)
self.nameLabel_30.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_30.setObjectName(_fromUtf8("nameLabel_30"))
self.horizontalLayout.addWidget(self.nameLabel_30)
self.tipoLimite = QtGui.QComboBox(self.footer)
self.tipoLimite.setMinimumSize(QtCore.QSize(125, 0))
self.tipoLimite.setMaximumSize(QtCore.QSize(125, 16777215))
self.tipoLimite.setObjectName(_fromUtf8("tipoLimite"))
self.tipoLimite.addItem(_fromUtf8(""))
self.tipoLimite.addItem(_fromUtf8(""))
self.horizontalLayout.addWidget(self.tipoLimite)
self.nameLabel_28 = QtGui.QLabel(self.footer)
self.nameLabel_28.setMinimumSize(QtCore.QSize(50, 0))
self.nameLabel_28.setMaximumSize(QtCore.QSize(50, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_28.setFont(font)
self.nameLabel_28.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_28.setObjectName(_fromUtf8("nameLabel_28"))
self.horizontalLayout.addWidget(self.nameLabel_28)
self.valorLimite = QtGui.QLineEdit(self.footer)
self.valorLimite.setMinimumSize(QtCore.QSize(75, 25))
self.valorLimite.setMaximumSize(QtCore.QSize(75, 25))
self.valorLimite.setText(_fromUtf8(""))
self.valorLimite.setObjectName(_fromUtf8("valorLimite"))
self.horizontalLayout.addWidget(self.valorLimite)
spacerItem7 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem7)
self.nameLabel_29 = QtGui.QLabel(self.footer)
self.nameLabel_29.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_29.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_29.setFont(font)
self.nameLabel_29.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_29.setObjectName(_fromUtf8("nameLabel_29"))
self.horizontalLayout.addWidget(self.nameLabel_29)
self.nRepeticoes = QtGui.QLineEdit(self.footer)
self.nRepeticoes.setMinimumSize(QtCore.QSize(50, 25))
self.nRepeticoes.setMaximumSize(QtCore.QSize(50, 25))
self.nRepeticoes.setText(_fromUtf8(""))
self.nRepeticoes.setObjectName(_fromUtf8("nRepeticoes"))
self.horizontalLayout.addWidget(self.nRepeticoes)
self.botaoSimular = QtGui.QPushButton(self.footer)
self.botaoSimular.setMinimumSize(QtCore.QSize(100, 25))
self.botaoSimular.setMaximumSize(QtCore.QSize(100, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.botaoSimular.setFont(font)
self.botaoSimular.setLayoutDirection(QtCore.Qt.RightToLeft)
self.botaoSimular.setAutoFillBackground(False)
self.botaoSimular.setStyleSheet(_fromUtf8(""))
self.botaoSimular.setFlat(False)
self.botaoSimular.setObjectName(_fromUtf8("botaoSimular"))
self.horizontalLayout.addWidget(self.botaoSimular)
self.verticalLayout_4.addWidget(self.footer)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Descriçao da simulaçao", None))
self.nameLabel_3.setText(_translate("MainWindow", "Peças grandes (A)", None))
self.nameLabel_27.setText(_translate("MainWindow", "Media chegada", None))
self.nameLabel_7.setText(_translate("MainWindow", "Perfuraçao", None))
self.nameLabel_8.setText(_translate("MainWindow", "Media", None))
self.nameLabel_9.setText(_translate("MainWindow", "Desvio padrao", None))
self.nameLabel_10.setText(_translate("MainWindow", "Nº maquinas", None))
self.nameLabel_11.setText(_translate("MainWindow", "Polimento", None))
self.nameLabel_12.setText(_translate("MainWindow", "Media", None))
self.nameLabel_13.setText(_translate("MainWindow", "Desvio padrao", None))
self.nameLabel_14.setText(_translate("MainWindow", "Nº maquinas", None))
self.nameLabel_4.setText(_translate("MainWindow", "Peças grandes (B)", None))
self.nameLabel_31.setText(_translate("MainWindow", "Media chegada", None))
self.nameLabel_36.setText(_translate("MainWindow", "Perfuraçao", None))
self.nameLabel_37.setText(_translate("MainWindow", "Media", None))
self.nameLabel_38.setText(_translate("MainWindow", "Desvio padrao", None))
self.nameLabel_39.setText(_translate("MainWindow", "Nº maquinas", None))
self.nameLabel_19.setText(_translate("MainWindow", "Polimento", None))
self.nameLabel_20.setText(_translate("MainWindow", "Media", None))
self.nameLabel_21.setText(_translate("MainWindow", "Desvio padrao", None))
self.nameLabel_22.setText(_translate("MainWindow", "Nº maquinas", None))
self.nameLabel_23.setText(_translate("MainWindow", "Envernizamento", None))
self.nameLabel_24.setText(_translate("MainWindow", "Media", None))
self.nameLabel_25.setText(_translate("MainWindow", "Desvio padrao", None))
self.nameLabel_26.setText(_translate("MainWindow", "Nº maquinas", None))
self.nameLabel_30.setText(_translate("MainWindow", "Limites da simulacao", None))
self.tipoLimite.setItemText(0, _translate("MainWindow", "Tempo simulacao", None))
self.tipoLimite.setItemText(1, _translate("MainWindow", "Nº Clientes", None))
self.nameLabel_28.setText(_translate("MainWindow", "Valor", None))
self.nameLabel_29.setText(_translate("MainWindow", "Nº Repeticoes", None))
self.botaoSimular.setText(_translate("MainWindow", "Simular", None))
|
mit
| -5,630,341,833,755,718,000
| 55.576241
| 105
| 0.701934
| false
| 3.376614
| false
| false
| false
|
toinbis/369old
|
src/web369/conf/base.py
|
1
|
2325
|
from pkg_resources import resource_filename
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'web369',
'USER': 'root',
'PASSWORD': '',
}
}
TIME_ZONE = 'Europe/Vilnius'
LANGUAGE_CODE = 'lt'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
STATIC_URL = '/static/'
STATIC_ROOT = resource_filename('web369', '../../var/htdocs/static')
STATICFILES_DIRS = (
resource_filename('web369', 'static'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = resource_filename('web369', '../../var/htdocs/media')
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
SECRET_KEY = 'SBX*YTL!cANetM&uFTf6R5Je(@PX3!rtgo)kgwNT'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'web369.urls.default'
TEMPLATE_DIRS = (
resource_filename('web369', 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
# 'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# 'south',
'web369',
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/tmp/django_cache',
'TIMEOUT': 60,
'OPTIONS': {
'MAX_ENTRIES': 1000
}
}
}
# Word count will be updated when new documents are scrapped:
LIVE_WORD_COUNT = True
|
bsd-3-clause
| -2,580,828,059,716,364,300
| 23.734043
| 73
| 0.667097
| false
| 3.31669
| false
| true
| false
|
smallyear/linuxLearn
|
salt/salt/client/ssh/state.py
|
1
|
6047
|
# -*- coding: utf-8 -*-
'''
Create ssh executor system
'''
from __future__ import absolute_import
# Import python libs
import os
import tarfile
import tempfile
import json
import shutil
from contextlib import closing
# Import salt libs
import salt.client.ssh.shell
import salt.client.ssh
import salt.utils
import salt.utils.thin
import salt.utils.url
import salt.roster
import salt.state
import salt.loader
import salt.minion
class SSHState(salt.state.State):
'''
Create a State object which wraps the SSH functions for state operations
'''
def __init__(self, opts, pillar=None, wrapper=None):
self.wrapper = wrapper
super(SSHState, self).__init__(opts, pillar)
def load_modules(self, data=None, proxy=None):
'''
Load up the modules for remote compilation via ssh
'''
self.functions = self.wrapper
self.utils = salt.loader.utils(self.opts)
locals_ = salt.loader.minion_mods(self.opts, utils=self.utils)
self.states = salt.loader.states(self.opts, locals_, self.utils)
self.rend = salt.loader.render(self.opts, self.functions)
def check_refresh(self, data, ret):
'''
Stub out check_refresh
'''
return
def module_refresh(self):
'''
Module refresh is not needed, stub it out
'''
return
class SSHHighState(salt.state.BaseHighState):
'''
Used to compile the highstate on the master
'''
stack = []
def __init__(self, opts, pillar=None, wrapper=None, fsclient=None):
self.client = fsclient
salt.state.BaseHighState.__init__(self, opts)
self.state = SSHState(opts, pillar, wrapper)
self.matcher = salt.minion.Matcher(self.opts)
def load_dynamic(self, matches):
'''
Stub out load_dynamic
'''
return
def lowstate_file_refs(chunks, extras=''):
'''
Create a list of file ref objects to reconcile
'''
refs = {}
for chunk in chunks:
if not isinstance(chunk, dict):
continue
saltenv = 'base'
crefs = []
for state in chunk:
if state == '__env__':
saltenv = chunk[state]
elif state.startswith('__'):
continue
crefs.extend(salt_refs(chunk[state]))
if crefs:
if saltenv not in refs:
refs[saltenv] = []
refs[saltenv].append(crefs)
if extras:
extra_refs = extras.split(',')
if extra_refs:
for env in refs:
for x in extra_refs:
refs[env].append([x])
return refs
def salt_refs(data, ret=None):
'''
Pull salt file references out of the states
'''
proto = 'salt://'
if ret is None:
ret = []
if isinstance(data, str):
if data.startswith(proto) and data not in ret:
ret.append(data)
if isinstance(data, list):
for comp in data:
salt_refs(comp, ret)
if isinstance(data, dict):
for comp in data:
salt_refs(data[comp], ret)
return ret
def prep_trans_tar(file_client, chunks, file_refs, pillar=None, id_=None):
'''
Generate the execution package from the saltenv file refs and a low state
data structure
'''
gendir = tempfile.mkdtemp()
trans_tar = salt.utils.mkstemp()
lowfn = os.path.join(gendir, 'lowstate.json')
pillarfn = os.path.join(gendir, 'pillar.json')
sync_refs = [
[salt.utils.url.create('_modules')],
[salt.utils.url.create('_states')],
[salt.utils.url.create('_grains')],
[salt.utils.url.create('_renderers')],
[salt.utils.url.create('_returners')],
[salt.utils.url.create('_output')],
[salt.utils.url.create('_utils')],
]
with salt.utils.fopen(lowfn, 'w+') as fp_:
fp_.write(json.dumps(chunks))
if pillar:
with salt.utils.fopen(pillarfn, 'w+') as fp_:
fp_.write(json.dumps(pillar))
cachedir = os.path.join('salt-ssh', id_)
for saltenv in file_refs:
file_refs[saltenv].extend(sync_refs)
env_root = os.path.join(gendir, saltenv)
if not os.path.isdir(env_root):
os.makedirs(env_root)
for ref in file_refs[saltenv]:
for name in ref:
short = salt.utils.url.parse(name)[0]
path = file_client.cache_file(name, saltenv, cachedir=cachedir)
if path:
tgt = os.path.join(env_root, short)
tgt_dir = os.path.dirname(tgt)
if not os.path.isdir(tgt_dir):
os.makedirs(tgt_dir)
shutil.copy(path, tgt)
continue
files = file_client.cache_dir(name, saltenv, cachedir=cachedir)
if files:
for filename in files:
fn = filename[filename.find(short) + len(short):]
if fn.startswith('/'):
fn = fn.strip('/')
tgt = os.path.join(
env_root,
short,
fn,
)
tgt_dir = os.path.dirname(tgt)
if not os.path.isdir(tgt_dir):
os.makedirs(tgt_dir)
shutil.copy(filename, tgt)
continue
try: # cwd may not exist if it was removed but salt was run from it
cwd = os.getcwd()
except OSError:
cwd = None
os.chdir(gendir)
with closing(tarfile.open(trans_tar, 'w:gz')) as tfp:
for root, dirs, files in os.walk(gendir):
for name in files:
full = os.path.join(root, name)
tfp.add(full[len(gendir):].lstrip(os.sep))
if cwd:
os.chdir(cwd)
shutil.rmtree(gendir)
return trans_tar
|
apache-2.0
| 7,802,137,446,918,748,000
| 30.331606
| 79
| 0.539937
| false
| 3.952288
| false
| false
| false
|
rapidpro/chatpro
|
chatpro/rooms/models.py
|
1
|
2494
|
from __future__ import absolute_import, unicode_literals
from chatpro.profiles.tasks import sync_org_contacts
from dash.orgs.models import Org
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Room(models.Model):
"""
Corresponds to a RapidPro contact group
"""
uuid = models.CharField(max_length=36, unique=True)
org = models.ForeignKey(Org, verbose_name=_("Organization"), related_name='rooms')
name = models.CharField(verbose_name=_("Name"), max_length=128, blank=True,
help_text=_("Name of this room"))
users = models.ManyToManyField(User, verbose_name=_("Users"), related_name='rooms',
help_text=_("Users who can chat in this room"))
managers = models.ManyToManyField(User, verbose_name=_("Managers"), related_name='manage_rooms',
help_text=_("Users who can manage contacts in this room"))
is_active = models.BooleanField(default=True, help_text="Whether this room is active")
@classmethod
def create(cls, org, name, uuid):
return cls.objects.create(org=org, name=name, uuid=uuid)
@classmethod
def get_all(cls, org):
return cls.objects.filter(org=org, is_active=True)
@classmethod
def update_room_groups(cls, org, group_uuids):
"""
Updates an org's chat rooms based on the selected groups UUIDs
"""
# de-activate rooms not included
org.rooms.exclude(uuid__in=group_uuids).update(is_active=False)
# fetch group details
groups = org.get_temba_client().get_groups()
group_names = {group.uuid: group.name for group in groups}
for group_uuid in group_uuids:
existing = org.rooms.filter(uuid=group_uuid).first()
if existing:
existing.name = group_names[group_uuid]
existing.is_active = True
existing.save()
else:
cls.create(org, group_names[group_uuid], group_uuid)
sync_org_contacts.delay(org.id)
def get_contacts(self):
return self.contacts.filter(is_active=True)
def get_users(self):
return self.users.filter(is_active=True).select_related('profile')
def get_managers(self):
return self.managers.filter(is_active=True).select_related('profile')
def __unicode__(self):
return self.name
|
bsd-3-clause
| -7,098,435,995,584,484,000
| 34.628571
| 100
| 0.631917
| false
| 4.048701
| false
| false
| false
|
hkemmel/tal
|
affichage.py
|
1
|
2209
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 25 14:34:25 2017
@author: manfred.madelaine
"""
import time
def affStart():
msg1 = "*** Binvenue dans i-Opinion ou Opinion Way ***"
msg2 = "Le logiciel d'analyse et de classification des revues cinématographiques !"
listMsg = []
listMsg.append("")
listMsg.append(msg1)
listMsg.append("")
listMsg.append(msg2)
listMsg.append("")
print(affBox(listMsg, 1, 1, len(msg2)))
delai()
def affEnd():
msg1 = "*** Opinion Way vous remercie de votre viste, à bientôt ! ***"
msg = []
msg.append(msg1)
box = affBox(msg, 1, 1, len(msg1)-1)
print(box)
def affMessage(msg):
deb = "\n\t--- "
fin = " ---\n\n"
print(deb + msg + fin)
delai()
def delai():
time.sleep(0.8)
"""
Affiche un message dans une boite
msg : message à afficher
x : décalage horizontal
y : décalage vertical
L : largeur de la boite
"""
def affBox(msg, x, y, L):
box = ""
#décalage vertical
box += multChaine("\n", y)
indiceLine = 0
#gestion d'une ligne
for txt in msg:
#bord suppérieur
if(indiceLine == 0):
#décalage horizontal
box += "\n" + multChaine("\t", x)
box += multChaine("-", L+3)
#décalage horizontal
box += "\n" + multChaine("\t", x)
esp = ""
mult = 1
#message
if(len(txt) < L ):
esp = " "
mult = (L - len(txt)) / 2
box += "| " + multChaine(esp, mult) + txt + multChaine(esp, mult) + " |"
#bord inférieur
if(indiceLine == len(msg) - 1 ):
#décalage horizontal
box += "\n" + multChaine("\t", x)
box += multChaine("-", L+3)
indiceLine += 1
box+="\n"
return(box)
def affErr():
affMessage("Votre réponse est incorrecte !")
def multChaine(chaine, mult):
i = 0
msg = ""
while i < mult:
msg += chaine
i += 1
return msg
|
gpl-3.0
| 181,424,775,493,336,640
| 19.342593
| 87
| 0.474954
| false
| 3.132668
| false
| false
| false
|
LuizGsa21/p4-conference-central
|
models.py
|
1
|
7226
|
#!/usr/bin/env python
"""models.py
Udacity conference server-side Python App Engine data & ProtoRPC models
$Id: models.py,v 1.1 2014/05/24 22:01:10 wesc Exp $
created/forked from conferences.py by wesc on 2014 may 24
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
import httplib
import endpoints
from protorpc import messages
from google.appengine.ext import ndb
import datetime
class ConflictException(endpoints.ServiceException):
"""ConflictException -- exception mapped to HTTP 409 response"""
http_status = httplib.CONFLICT
class StringMessage(messages.Message):
"""StringMessage-- outbound (single) string message"""
data = messages.StringField(1, required=True)
class BooleanMessage(messages.Message):
"""BooleanMessage-- outbound Boolean value message"""
data = messages.BooleanField(1)
class TeeShirtSize(messages.Enum):
"""TeeShirtSize -- t-shirt size enumeration value"""
NOT_SPECIFIED = 1
XS_M = 2
XS_W = 3
S_M = 4
S_W = 5
M_M = 6
M_W = 7
L_M = 8
L_W = 9
XL_M = 10
XL_W = 11
XXL_M = 12
XXL_W = 13
XXXL_M = 14
XXXL_W = 15
class Profile(ndb.Model):
"""Profile -- User profile object"""
displayName = ndb.StringProperty(default='')
mainEmail = ndb.StringProperty()
teeShirtSize = ndb.StringProperty(default='NOT_SPECIFIED')
conferenceKeysToAttend = ndb.KeyProperty(kind='Conference', repeated=True)
wishList = ndb.KeyProperty(kind='Session', repeated=True)
def toForm(self):
form = ProfileForm(
displayName=self.displayName,
mainEmail=self.mainEmail,
teeShirtSize=getattr(TeeShirtSize, self.teeShirtSize),
conferenceKeysToAttend=[key.urlsafe() for key in self.conferenceKeysToAttend]
)
form.check_initialized()
return form
def toMiniForm(self):
form = ProfileMiniForm(
displayName=self.displayName,
teeShirtSize=getattr(TeeShirtSize, self.teeShirtSize)
)
form.check_initialized()
return form
class ProfileMiniForm(messages.Message):
"""ProfileMiniForm -- update Profile form message"""
displayName = messages.StringField(1)
teeShirtSize = messages.EnumField('TeeShirtSize', 2)
class ProfileForm(messages.Message):
"""ProfileForm -- Profile outbound form message"""
displayName = messages.StringField(1)
mainEmail = messages.StringField(2)
teeShirtSize = messages.EnumField('TeeShirtSize', 3)
conferenceKeysToAttend = messages.StringField(4, repeated=True)
class Conference(ndb.Model):
"""Conference -- Conference object"""
required_fields_schema = ('name', 'organizerUserId', 'startDate', 'endDate')
name = ndb.StringProperty(required=True)
description = ndb.StringProperty()
organizerUserId = ndb.StringProperty(required=True)
topics = ndb.StringProperty(repeated=True)
city = ndb.StringProperty()
startDate = ndb.DateProperty(required=True)
month = ndb.IntegerProperty()
endDate = ndb.DateProperty(required=True)
maxAttendees = ndb.IntegerProperty()
seatsAvailable = ndb.IntegerProperty()
@property
def sessions(self):
return Session.query(ancestor=self.key)
def toForm(self, display_name=''):
form = ConferenceForm(
websafeKey=self.key.urlsafe(),
name=self.name,
description=self.description,
organizerUserId=self.organizerUserId,
topics=self.topics,
city=self.city,
startDate=self.startDate.strftime('%Y-%m-%d'),
month=self.month,
endDate=self.endDate.strftime('%Y-%m-%d'),
maxAttendees=self.maxAttendees,
seatsAvailable=self.seatsAvailable,
organizerDisplayName=display_name
)
form.check_initialized()
return form
class ConferenceForm(messages.Message):
"""ConferenceForm -- Conference outbound form message"""
name = messages.StringField(1)
description = messages.StringField(2)
organizerUserId = messages.StringField(3)
topics = messages.StringField(4, repeated=True)
city = messages.StringField(5)
startDate = messages.StringField(6) # DateTimeField()
month = messages.IntegerField(7)
maxAttendees = messages.IntegerField(8)
seatsAvailable = messages.IntegerField(9)
endDate = messages.StringField(10) # DateTimeField()
websafeKey = messages.StringField(11)
organizerDisplayName = messages.StringField(12)
class ConferenceForms(messages.Message):
"""ConferenceForms -- multiple Conference outbound form message"""
items = messages.MessageField(ConferenceForm, 1, repeated=True)
class ConferenceQueryForm(messages.Message):
"""ConferenceQueryForm -- Conference query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class ConferenceQueryForms(messages.Message):
"""ConferenceQueryForms -- multiple ConferenceQueryForm inbound form message"""
filters = messages.MessageField(ConferenceQueryForm, 1, repeated=True)
class Speaker(ndb.Model):
"""Speaker -- Speaker object"""
name = ndb.StringProperty(required=True)
class Session(ndb.Model):
"""Session -- Session object"""
required_fields_schema = ('name', 'speaker', 'duration', 'typeOfSession', 'date', 'startTime')
name = ndb.StringProperty(required=True)
highlights = ndb.StringProperty()
speaker = ndb.StructuredProperty(modelclass=Speaker, required=True)
duration = ndb.IntegerProperty(required=True)
typeOfSession = ndb.StringProperty(required=True)
date = ndb.DateProperty(required=True)
startTime = ndb.TimeProperty(required=True)
def toForm(self):
form = SessionForm(
websafeKey=self.key.urlsafe(),
name=self.name,
highlights=self.highlights,
speaker=self.speaker.name,
duration=self.duration,
typeOfSession=self.typeOfSession,
date=self.date.strftime('%Y-%m-%d'),
startTime=self.startTime.strftime('%H:%M')
)
form.check_initialized()
return form
class SessionForm(messages.Message):
"""SessionForm -- Session outbound form message"""
websafeKey = messages.StringField(1)
name = messages.StringField(2)
highlights = messages.StringField(3)
speaker = messages.StringField(4)
duration = messages.IntegerField(5)
typeOfSession = messages.StringField(6)
date = messages.StringField(7)
startTime = messages.StringField(8)
class SessionForms(messages.Message):
"""SessionForm -- multiple SessionForm outbound form message"""
items = messages.MessageField(SessionForm, 1, repeated=True)
class SessionQueryForm(messages.Message):
"""SessionQueryForm -- Session query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class SessionQueryForms(messages.Message):
"""SessionQueryForms -- multiple SessionQueryForm inbound form message"""
filters = messages.MessageField(SessionQueryForm, 1, repeated=True)
|
apache-2.0
| 4,306,855,950,322,396,700
| 31.696833
| 98
| 0.687517
| false
| 3.887036
| false
| false
| false
|
PyBossa/pybossa
|
pybossa/default_settings.py
|
1
|
4813
|
# -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2015 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
DEBUG = False
# webserver host and port
HOST = '0.0.0.0'
PORT = 5000
SECRET = 'foobar'
SECRET_KEY = 'my-session-secret'
ITSDANGEROUSKEY = 'its-dangerous-key'
## project configuration
BRAND = 'PYBOSSA'
TITLE = 'PYBOSSA'
COPYRIGHT = 'Set Your Institution'
DESCRIPTION = 'Set the description in your config'
TERMSOFUSE = 'http://okfn.org/terms-of-use/'
DATAUSE = 'http://opendatacommons.org/licenses/by/'
LOGO = ''
DEFAULT_LOCALE = 'en'
LOCALES = [('en', 'English'), ('es', u'Español'),
('it', 'Italiano'), ('fr', u'Français'),
('ja', u'日本語'), ('el', u'ελληνικά')]
## Default THEME
THEME = 'default'
## Default number of apps per page
APPS_PER_PAGE = 20
## Default allowed extensions
ALLOWED_EXTENSIONS = ['js', 'css', 'png', 'jpg', 'jpeg', 'gif', 'zip']
UPLOAD_METHOD = 'local'
## Default number of users shown in the leaderboard
LEADERBOARD = 20
## Default configuration for debug toolbar
ENABLE_DEBUG_TOOLBAR = False
# Cache default key prefix
REDIS_SENTINEL = [('localhost', 26379)]
REDIS_MASTER = 'mymaster'
REDIS_DB = 0
REDIS_KEYPREFIX = 'pybossa_cache'
## Default cache timeouts
# Project cache
AVATAR_TIMEOUT = 30 * 24 * 60 * 60
APP_TIMEOUT = 15 * 60
REGISTERED_USERS_TIMEOUT = 15 * 60
ANON_USERS_TIMEOUT = 5 * 60 * 60
STATS_FRONTPAGE_TIMEOUT = APP_TIMEOUT
STATS_APP_TIMEOUT = 12 * 60 * 60
STATS_DRAFT_TIMEOUT = 24 * 60 * 60
N_APPS_PER_CATEGORY_TIMEOUT = 60 * 60
BROWSE_TASKS_TIMEOUT = 3 * 60 * 60
# Category cache
CATEGORY_TIMEOUT = 24 * 60 * 60
# User cache
USER_TIMEOUT = 15 * 60
USER_TOP_TIMEOUT = 24 * 60 * 60
USER_TOTAL_TIMEOUT = 24 * 60 * 60
# Project Presenters
PRESENTERS = ["basic", "image", "sound", "video", "map", "pdf"]
# Default Google Docs spreadsheet template tasks URLs
TEMPLATE_TASKS = {
'image': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdHFEN29mZUF0czJWMUhIejF6dWZXdkE&usp=sharing",
'sound': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdEczcWduOXRUb1JUc1VGMmJtc2xXaXc&usp=sharing",
'video': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdGZ2UGhxSTJjQl9YNVhfUVhGRUdoRWc&usp=sharing",
'map': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdGZnbjdwcnhKRVNlN1dGXy0tTnNWWXc&usp=sharing",
'pdf': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdEVVamc0R0hrcjlGdXRaUXlqRXlJMEE&usp=sharing"}
# Rate limits default values
LIMIT = 300
PER = 15 * 60
# Expiration time for password protected project cookies
PASSWD_COOKIE_TIMEOUT = 60 * 30
# Expiration time for account confirmation / password recovery links
ACCOUNT_LINK_EXPIRATION = 5 * 60 * 60
# Rate limits default values
LIMIT = 300
PER = 15 * 60
# Disable new account confirmation (via email)
ACCOUNT_CONFIRMATION_DISABLED = True
# Send emails weekly update every
WEEKLY_UPDATE_STATS = 'Sunday'
# Enable Server Sent Events
SSE = False
# Pro user features. False will make the feature available to all regular users,
# while True will make it available only to pro users
PRO_FEATURES = {
'auditlog': True,
'webhooks': True,
'updated_exports': True,
'notify_blog_updates': True,
'project_weekly_report': True,
'autoimporter': True,
'better_stats': True
}
CORS_RESOURCES = {r"/api/*": {"origins": "*",
"allow_headers": ['Content-Type',
'Authorization'],
"max_age": 21600
}}
FAILED_JOBS_RETRIES = 3
FAILED_JOBS_MAILS = 7
FULLTEXTSEARCH_LANGUAGE = 'english'
STRICT_SLASHES = True
# Background jobs default time outs
MINUTE = 60
TIMEOUT = 10 * MINUTE
# OneSignal GCM Sender ID
# DO NOT MODIFY THIS
GCM_SENDER_ID = "482941778795"
# Unpublish inactive projects
UNPUBLISH_PROJECTS = True
# TTL for ZIP files of personal data
TTL_ZIP_SEC_FILES = 3
# Default cryptopan key
CRYPTOPAN_KEY = '32-char-str-for-AES-key-and-pad.'
# Instruct PYBOSSA to generate absolute paths or not for avatars
AVATAR_ABSOLUTE = True
# Spam accounts to avoid
SPAM = []
|
agpl-3.0
| 167,789,960,090,039,200
| 28.429448
| 116
| 0.689806
| false
| 3.051527
| false
| false
| false
|
Naoto-Imamachi/MIRAGE
|
scripts/module/preparation/phastcons_score_list.py
|
1
|
3683
|
#!usr/bin/env python
import sys
import re
import shelve
from parameter.common_parameters import common_parameters
import utils.setting_utils as utils
utils.now_time("phastcons_score_list script starting...")
p = utils.Bunch(common_parameters)
def main():
utils.now_time("Input_file: " + p.phastcons_score_list_db_input)
utils.now_time("Reference_file: " + p.phastcons_score_list_reference)
utils.now_time("Output_file: " + p.phastcons_score_list_db_output)
output_merge = p.phastcons_score_list_db_output + 'phastCons46way_Refseq_for_MIRAGE_CDS.db' #'phastCons46way_miRBase_v21_hg38Tohg19_for_MIRAGE.db'
output_merge_shelve = shelve.open(output_merge)
#for x in ['chr21']:
for x in ['chr1','chr2','chr3','chr4','chr5','chr6','chr7','chr8','chr9','chr10','chr11','chr12','chr13','chr14','chr15','chr16','chr17','chr18','chr19','chr20','chr21','chr22','chrX','chrY','chrM']:
ref_s = p.phastcons_score_list_reference #mirBase, Refseq etc...
ref_file = open(ref_s,'r')
input_s = p.phastcons_score_list_db_input + x + '.phastCons46way_Refseq_CDS.db' #'.phastCons46way_miRBase_v21_hg38Tohg19.db'
output_s = p.phastcons_score_list_db_output + x + '.phastCons46way_Refseq_for_MIRAGE_CDS.db' #'.phastCons46way_miRBase_v21_hg38Tohg19_for_MIRAGE.db'
input_shelve = shelve.open(input_s)
output_shelve = shelve.open(output_s)
score_list_dict = {}
for line in ref_file:
line = line.rstrip()
data = line.split("\t")
chrom = data[0]
if not chrom == x:
continue
strand = data[5]
if len(data) >= 12: #12bed format
exon_block = data[10].split(',')
exon_block.pop() #Remove the last item ''
exon_st = data[11].split(',')
exon_st.pop() #Remove the last item ''
name = data[3]
score_list_dict[name] = []
for y in range(len(exon_block)):
st = int(data[1]) + int(exon_st[y])
ed = int(data[1]) + int(exon_st[y]) + int(exon_block[y])
length = ed - st
for z in range(length):
score = input_shelve[str(st)]
score_list_dict[name].append(score)
st += 1
if strand == '-':
rev_score = score_list_dict[name][::-1]
score_list_dict[name] = rev_score
elif len(data) >= 3: #6bed format
st = int(data[1])
ed = int(data[2])
length = ed - st
name = data[3]
score_list_dict[name] = []
for z in range(length):
score = input_shelve[str(st)]
score_list_dict[name].append(score)
st += 1
if strand == '-':
rev_score = score_list_dict[name][::-1]
score_list_dict[name] = rev_score
else:
print('ERROR: Your BED format file have less than three column.')
print ('BED format file need to have at least three column [chr, st, ed]...')
sys.exit(1)
output_shelve.update(score_list_dict)
output_merge_shelve.update(score_list_dict)
input_shelve.close()
output_shelve.close()
utils.now_time("phastcons_score_list script was successfully finished!!")
output_merge_shelve.close()
if __name__ == '__main__':
main()
|
mit
| -2,538,876,580,107,515,400
| 41.329412
| 203
| 0.524572
| false
| 3.435634
| false
| false
| false
|
DerekK88/PICwriter
|
picwriter/components/stripslotconverter.py
|
1
|
9317
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import gdspy
import picwriter.toolkit as tk
class StripSlotConverter(tk.Component):
"""Strip-to-Slot Side Converter Cell class. Adiabatically transforms a strip to a slot waveguide mode, with two sections. Section 1 introduces a narrow waveguide alongside the input strip waveguide and gradually lowers the gap between the strip waveguide and narrow side waveguide. Section 2 gradually converts the widths of the two waveguides until they are equal to the slot rail widths.
Args:
* **wgt_input** (WaveguideTemplate): WaveguideTemplate object for the input waveguide (should be either of type `strip` or `slot`).
* **wgt_output** (WaveguideTemplate): WaveguideTemplate object for the output waveguide (should be either of type `strip` or `slot`, opposite of the input type).
* **length1** (float): Length of section 1 that gradually changes the distance between the two waveguides.
* **length2** (float): Length of section 2 that gradually changes the widths of the two waveguides until equal to the slot waveguide rail widths.
* **start_rail_width** (float): Width of the narrow waveguide appearing next to the strip waveguide.
* **end_strip_width** (float): Width of the strip waveguide at the end of `length1` and before `length2`
* **d** (float): Distance between the outer edge of the strip waveguide and the start of the slot waveguide rail.
Keyword Args:
* **input_strip** (Boolean): If `True`, sets the input port to be the strip waveguide side. If `False`, slot waveguide is on the input. Defaults to `None`, in which case the input port waveguide template is used to choose.
* **port** (tuple): Cartesian coordinate of the input port. Defaults to (0,0).
* **direction** (string): Direction that the component will point *towards*, can be of type `'NORTH'`, `'WEST'`, `'SOUTH'`, `'EAST'`, OR an angle (float, in radians)
Members:
* **portlist** (dict): Dictionary with the relevant port information
Portlist format:
* portlist['input'] = {'port': (x1,y1), 'direction': 'dir1'}
* portlist['output'] = {'port': (x2, y2), 'direction': 'dir2'}
Where in the above (x1,y1) is the same as the 'port' input, (x2, y2) is the end of the taper, and 'dir1', 'dir2' are of type `'NORTH'`, `'WEST'`, `'SOUTH'`, `'EAST'`, *or* an angle in *radians*.
'Direction' points *towards* the waveguide that will connect to it.
Note: The waveguide and cladding layer/datatype are taken from the `wgt_slot` by default.
"""
def __init__(
self,
wgt_input,
wgt_output,
length1,
length2,
start_rail_width,
end_strip_width,
d,
input_strip=None,
port=(0, 0),
direction="EAST",
):
tk.Component.__init__(self, "StripSlotConverter", locals())
self.portlist = {}
if (not isinstance(input_strip, bool)) and (input_strip != None):
raise ValueError(
"Invalid input provided for `input_strip`. Please specify a boolean."
)
if input_strip == None:
# Auto-detect based on wgt_input
self.input_strip = (
wgt_input.wg_type == "strip" or wgt_input.wg_type == "swg"
)
else:
# User-override
self.input_strip = input_strip
if self.input_strip:
self.wgt_strip = wgt_input
self.wgt_slot = wgt_output
else:
self.wgt_strip = wgt_output
self.wgt_slot = wgt_input
self.wg_spec = {
"layer": wgt_output.wg_layer,
"datatype": wgt_output.wg_datatype,
}
self.clad_spec = {
"layer": wgt_output.clad_layer,
"datatype": wgt_output.clad_datatype,
}
self.length1 = length1
self.length2 = length2
self.d = d
self.start_rail_width = start_rail_width
self.end_strip_width = end_strip_width
self.port = port
self.direction = direction
self.__build_cell()
self.__build_ports()
""" Translate & rotate the ports corresponding to this specific component object
"""
self._auto_transform_()
def __build_cell(self):
# Sequentially build all the geometric shapes using polygons
# Add strip waveguide taper for region 1
x0, y0 = (0, 0)
pts = [
(x0, y0 - self.wgt_strip.wg_width / 2.0),
(x0, y0 + self.wgt_strip.wg_width / 2.0),
(
x0 + self.length1,
y0 - self.wgt_strip.wg_width / 2.0 + self.end_strip_width,
),
(x0 + self.length1, y0 - self.wgt_strip.wg_width / 2.0),
]
strip1 = gdspy.Polygon(
pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype
)
# Add the thin side waveguide for region 1
pts = [
(x0, y0 + self.wgt_strip.wg_width / 2.0 + self.d),
(x0, y0 + self.wgt_strip.wg_width / 2.0 + self.d + self.start_rail_width),
(
x0 + self.length1,
y0
- self.wgt_strip.wg_width / 2.0
+ self.end_strip_width
+ self.wgt_slot.slot
+ self.start_rail_width,
),
(
x0 + self.length1,
y0
- self.wgt_strip.wg_width / 2.0
+ self.end_strip_width
+ self.wgt_slot.slot,
),
]
thin_strip = gdspy.Polygon(
pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype
)
# Add the bottom rail for region 2
pts = [
(
x0 + self.length1,
y0 - self.wgt_strip.wg_width / 2.0 + self.end_strip_width,
),
(x0 + self.length1, y0 - self.wgt_strip.wg_width / 2.0),
(x0 + self.length1 + self.length2, y0 - self.wgt_slot.wg_width / 2.0),
(
x0 + self.length1 + self.length2,
y0 - self.wgt_slot.wg_width / 2.0 + self.wgt_slot.rail,
),
]
rail1 = gdspy.Polygon(
pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype
)
# Add the top rail for region 2
pts = [
(
x0 + self.length1,
y0
- self.wgt_strip.wg_width / 2.0
+ self.end_strip_width
+ self.wgt_slot.slot
+ self.start_rail_width,
),
(
x0 + self.length1,
y0
- self.wgt_strip.wg_width / 2.0
+ self.end_strip_width
+ self.wgt_slot.slot,
),
(
x0 + self.length1 + self.length2,
y0 + self.wgt_slot.wg_width / 2.0 - self.wgt_slot.rail,
),
(x0 + self.length1 + self.length2, y0 + self.wgt_slot.wg_width / 2.0),
]
rail2 = gdspy.Polygon(
pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype
)
# Add a cladding polygon
pts = [
(x0, y0 + self.wgt_strip.clad_width + self.wgt_strip.wg_width / 2.0),
(
x0 + self.length1 + self.length2,
y0 + self.wgt_slot.clad_width + self.wgt_slot.wg_width / 2.0,
),
(
x0 + self.length1 + self.length2,
y0 - self.wgt_slot.clad_width - self.wgt_slot.wg_width / 2.0,
),
(x0, y0 - self.wgt_strip.clad_width - self.wgt_strip.wg_width / 2.0),
]
clad = gdspy.Polygon(
pts, layer=self.wgt_strip.clad_layer, datatype=self.wgt_strip.clad_datatype
)
self.add(strip1)
self.add(thin_strip)
self.add(rail1)
self.add(rail2)
self.add(clad)
def __build_ports(self):
# Portlist format:
# example: example: {'port':(x_position, y_position), 'direction': 'NORTH'}
self.portlist["input"] = {"port": (0, 0), "direction": "WEST"}
self.portlist["output"] = {
"port": (self.length1 + self.length2, 0),
"direction": "EAST",
}
if __name__ == "__main__":
from . import *
top = gdspy.Cell("top")
wgt_strip = WaveguideTemplate(bend_radius=50, wg_type="strip", wg_width=0.7)
wgt_slot = WaveguideTemplate(bend_radius=50, wg_type="slot", wg_width=0.7, slot=0.2)
wg1 = Waveguide([(0, 0), (100, 0)], wgt_strip)
tk.add(top, wg1)
ssc = StripSlotConverter(
wgt_strip,
wgt_slot,
length1=15.0,
length2=15.0,
start_rail_width=0.1,
end_strip_width=0.4,
d=1.0,
**wg1.portlist["output"]
)
tk.add(top, ssc)
(x1, y1) = ssc.portlist["output"]["port"]
wg2 = Waveguide([(x1, y1), (x1 + 100, y1)], wgt_slot)
tk.add(top, wg2)
gdspy.LayoutViewer(cells=top)
# gdspy.write_gds('StripSlotConverter.gds', unit=1.0e-6, precision=1.0e-9)
|
mit
| 7,310,835,208,231,276,000
| 36.268
| 396
| 0.545347
| false
| 3.359899
| false
| false
| false
|
dzamie/weasyl
|
weasyl/blocktag.py
|
1
|
4024
|
# blocktag.py
from error import PostgresError
import define as d
import profile
import searchtag
from libweasyl import ratings
from weasyl.cache import region
# For blocked tags, `rating` refers to the lowest rating for which that tag is
# blocked; for example, (X, Y, 10) would block tag Y for all ratings, whereas
# (X, Y, 30) would block tag Y for only adult ratings.
def check(userid, submitid=None, charid=None, journalid=None):
"""
Returns True if the submission, character, or journal contains a search tag
that the user has blocked, else False.
"""
if not userid:
return False
if submitid:
map_table = "searchmapsubmit"
content_table = "submission"
id_field = "submitid"
target = submitid
elif charid:
map_table = "searchmapchar"
content_table = "character"
id_field = "charid"
target = charid
else:
map_table = "searchmapjournal"
content_table = "journal"
id_field = "journalid"
target = journalid
query = """
SELECT EXISTS (
SELECT 0 FROM {map_table} searchmap
INNER JOIN {content_table} content ON searchmap.targetid = content.{id_field}
WHERE searchmap.targetid = %(id)s
AND content.userid != %(user)s
AND searchmap.tagid IN (
SELECT blocktag.tagid FROM blocktag
WHERE userid = %(user)s AND blocktag.rating <= content.rating)) AS block
""".format(map_table=map_table, content_table=content_table, id_field=id_field)
return d.engine.execute(query, id=target, user=userid).first().block
def check_list(rating, tags, blocked_tags):
return any(rating >= b['rating'] and b['title'] in tags for b in blocked_tags)
def suggest(userid, target):
if not target:
return []
return d.execute("SELECT title FROM searchtag"
" WHERE title LIKE '%s%%' AND tagid NOT IN (SELECT tagid FROM blocktag WHERE userid = %i)"
" ORDER BY title LIMIT 10", [target, userid], options="within")
def select(userid):
return [{
"title": i[0],
"rating": i[1],
} for i in d.execute("SELECT st.title, bt.rating FROM searchtag st "
" INNER JOIN blocktag bt ON st.tagid = bt.tagid"
" WHERE bt.userid = %i"
" ORDER BY st.title", [userid])]
@region.cache_on_arguments()
@d.record_timing
def cached_select(userid):
return select(userid)
def insert(userid, tagid=None, title=None, rating=None):
if rating not in ratings.CODE_MAP:
rating = ratings.GENERAL.code
profile.check_user_rating_allowed(userid, rating)
if tagid:
tag = int(tagid)
try:
d.engine.execute("INSERT INTO blocktag VALUES (%s, %s, %s)", userid, tag, rating)
except PostgresError:
return
elif title:
tag_name = d.get_search_tag(title)
try:
d.engine.execute("""
INSERT INTO blocktag (userid, tagid, rating)
VALUES (
%(user)s,
(SELECT tagid FROM searchtag WHERE title = %(tag_name)s),
%(rating)s
)
""", user=userid, tag_name=tag_name, rating=rating)
except PostgresError:
try:
tag = searchtag.create(title)
except PostgresError:
return
d.engine.execute("INSERT INTO blocktag VALUES (%s, %s, %s)", userid, tag, rating)
cached_select.invalidate(userid)
def remove(userid, tagid=None, title=None):
if tagid:
d.execute("DELETE FROM blocktag WHERE (userid, tagid) = (%i, %i)", [userid, tagid])
elif title:
d.execute("DELETE FROM blocktag WHERE (userid, tagid) = (%i, (SELECT tagid FROM searchtag WHERE title = '%s'))",
[userid, d.get_search_tag(title)])
cached_select.invalidate(userid)
|
apache-2.0
| -8,802,983,478,453,803,000
| 30.193798
| 120
| 0.587227
| false
| 3.850718
| false
| false
| false
|
EndyKaufman/django-postgres-angularjs-blog
|
app/manager/migrations/0006_properties.py
|
1
|
1170
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-04-24 14:05
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('manager', '0005_add_fields_and_set_defaults'),
]
operations = [
migrations.CreateModel(
name='Properties',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=512, unique=True)),
('value', models.TextField(blank=True, null=True)),
('created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='date created')),
('updated', models.DateTimeField(auto_now=True, null=True, verbose_name='date updated')),
('created_user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
|
mit
| -7,519,078,480,483,792,000
| 39.344828
| 150
| 0.62906
| false
| 4.020619
| false
| false
| false
|
bert9bert/statsmodels
|
statsmodels/tsa/statespace/kalman_filter.py
|
2
|
86079
|
"""
State Space Representation and Kalman Filter
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
from warnings import warn
import numpy as np
from .representation import OptionWrapper, Representation, FrozenRepresentation
from .tools import (validate_vector_shape, validate_matrix_shape,
reorder_missing_matrix, reorder_missing_vector)
from . import tools
from statsmodels.tools.sm_exceptions import ValueWarning
# Define constants
FILTER_CONVENTIONAL = 0x01 # Durbin and Koopman (2012), Chapter 4
FILTER_EXACT_INITIAL = 0x02 # ibid., Chapter 5.6
FILTER_AUGMENTED = 0x04 # ibid., Chapter 5.7
FILTER_SQUARE_ROOT = 0x08 # ibid., Chapter 6.3
FILTER_UNIVARIATE = 0x10 # ibid., Chapter 6.4
FILTER_COLLAPSED = 0x20 # ibid., Chapter 6.5
FILTER_EXTENDED = 0x40 # ibid., Chapter 10.2
FILTER_UNSCENTED = 0x80 # ibid., Chapter 10.3
INVERT_UNIVARIATE = 0x01
SOLVE_LU = 0x02
INVERT_LU = 0x04
SOLVE_CHOLESKY = 0x08
INVERT_CHOLESKY = 0x10
STABILITY_FORCE_SYMMETRY = 0x01
MEMORY_STORE_ALL = 0
MEMORY_NO_FORECAST = 0x01
MEMORY_NO_PREDICTED = 0x02
MEMORY_NO_FILTERED = 0x04
MEMORY_NO_LIKELIHOOD = 0x08
MEMORY_NO_GAIN = 0x10
MEMORY_NO_SMOOTHING = 0x20
MEMORY_NO_STD_FORECAST = 0x40
MEMORY_CONSERVE = (
MEMORY_NO_FORECAST | MEMORY_NO_PREDICTED | MEMORY_NO_FILTERED |
MEMORY_NO_LIKELIHOOD | MEMORY_NO_GAIN | MEMORY_NO_SMOOTHING |
MEMORY_NO_STD_FORECAST
)
TIMING_INIT_PREDICTED = 0
TIMING_INIT_FILTERED = 1
class KalmanFilter(Representation):
r"""
State space representation of a time series process, with Kalman filter
Parameters
----------
k_endog : array_like or integer
The observed time-series process :math:`y` if array like or the
number of variables in the process if an integer.
k_states : int
The dimension of the unobserved state process.
k_posdef : int, optional
The dimension of a guaranteed positive definite covariance matrix
describing the shocks in the measurement equation. Must be less than
or equal to `k_states`. Default is `k_states`.
loglikelihood_burn : int, optional
The number of initial periods during which the loglikelihood is not
recorded. Default is 0.
tolerance : float, optional
The tolerance at which the Kalman filter determines convergence to
steady-state. Default is 1e-19.
results_class : class, optional
Default results class to use to save filtering output. Default is
`FilterResults`. If specified, class must extend from `FilterResults`.
**kwargs
Keyword arguments may be used to provide values for the filter,
inversion, and stability methods. See `set_filter_method`,
`set_inversion_method`, and `set_stability_method`.
Keyword arguments may be used to provide default values for state space
matrices. See `Representation` for more details.
Notes
-----
There are several types of options available for controlling the Kalman
filter operation. All options are internally held as bitmasks, but can be
manipulated by setting class attributes, which act like boolean flags. For
more information, see the `set_*` class method documentation. The options
are:
filter_method
The filtering method controls aspects of which
Kalman filtering approach will be used.
inversion_method
The Kalman filter may contain one matrix inversion: that of the
forecast error covariance matrix. The inversion method controls how and
if that inverse is performed.
stability_method
The Kalman filter is a recursive algorithm that may in some cases
suffer issues with numerical stability. The stability method controls
what, if any, measures are taken to promote stability.
conserve_memory
By default, the Kalman filter computes a number of intermediate
matrices at each iteration. The memory conservation options control
which of those matrices are stored.
filter_timing
By default, the Kalman filter follows Durbin and Koopman, 2012, in
initializing the filter with predicted values. Kim and Nelson, 1999,
instead initialize the filter with filtered values, which is
essentially just a different timing convention.
The `filter_method` and `inversion_method` options intentionally allow
the possibility that multiple methods will be indicated. In the case that
multiple methods are selected, the underlying Kalman filter will attempt to
select the optional method given the input data.
For example, it may be that INVERT_UNIVARIATE and SOLVE_CHOLESKY are
indicated (this is in fact the default case). In this case, if the
endogenous vector is 1-dimensional (`k_endog` = 1), then INVERT_UNIVARIATE
is used and inversion reduces to simple division, and if it has a larger
dimension, the Cholesky decomposition along with linear solving (rather
than explicit matrix inversion) is used. If only SOLVE_CHOLESKY had been
set, then the Cholesky decomposition method would *always* be used, even in
the case of 1-dimensional data.
See Also
--------
FilterResults
statsmodels.tsa.statespace.representation.Representation
"""
filter_methods = [
'filter_conventional', 'filter_exact_initial', 'filter_augmented',
'filter_square_root', 'filter_univariate', 'filter_collapsed',
'filter_extended', 'filter_unscented'
]
filter_conventional = OptionWrapper('filter_method', FILTER_CONVENTIONAL)
"""
(bool) Flag for conventional Kalman filtering.
"""
filter_exact_initial = OptionWrapper('filter_method', FILTER_EXACT_INITIAL)
"""
(bool) Flag for exact initial Kalman filtering. Not implemented.
"""
filter_augmented = OptionWrapper('filter_method', FILTER_AUGMENTED)
"""
(bool) Flag for augmented Kalman filtering. Not implemented.
"""
filter_square_root = OptionWrapper('filter_method', FILTER_SQUARE_ROOT)
"""
(bool) Flag for square-root Kalman filtering. Not implemented.
"""
filter_univariate = OptionWrapper('filter_method', FILTER_UNIVARIATE)
"""
(bool) Flag for univariate filtering of multivariate observation vector.
"""
filter_collapsed = OptionWrapper('filter_method', FILTER_COLLAPSED)
"""
(bool) Flag for Kalman filtering with collapsed observation vector.
"""
filter_extended = OptionWrapper('filter_method', FILTER_EXTENDED)
"""
(bool) Flag for extended Kalman filtering. Not implemented.
"""
filter_unscented = OptionWrapper('filter_method', FILTER_UNSCENTED)
"""
(bool) Flag for unscented Kalman filtering. Not implemented.
"""
inversion_methods = [
'invert_univariate', 'solve_lu', 'invert_lu', 'solve_cholesky',
'invert_cholesky'
]
invert_univariate = OptionWrapper('inversion_method', INVERT_UNIVARIATE)
"""
(bool) Flag for univariate inversion method (recommended).
"""
solve_lu = OptionWrapper('inversion_method', SOLVE_LU)
"""
(bool) Flag for LU and linear solver inversion method.
"""
invert_lu = OptionWrapper('inversion_method', INVERT_LU)
"""
(bool) Flag for LU inversion method.
"""
solve_cholesky = OptionWrapper('inversion_method', SOLVE_CHOLESKY)
"""
(bool) Flag for Cholesky and linear solver inversion method (recommended).
"""
invert_cholesky = OptionWrapper('inversion_method', INVERT_CHOLESKY)
"""
(bool) Flag for Cholesky inversion method.
"""
stability_methods = ['stability_force_symmetry']
stability_force_symmetry = (
OptionWrapper('stability_method', STABILITY_FORCE_SYMMETRY)
)
"""
(bool) Flag for enforcing covariance matrix symmetry
"""
memory_options = [
'memory_store_all', 'memory_no_forecast', 'memory_no_predicted',
'memory_no_filtered', 'memory_no_likelihood', 'memory_no_gain',
'memory_no_smoothing', 'memory_no_std_forecast', 'memory_conserve'
]
memory_store_all = OptionWrapper('conserve_memory', MEMORY_STORE_ALL)
"""
(bool) Flag for storing all intermediate results in memory (default).
"""
memory_no_forecast = OptionWrapper('conserve_memory', MEMORY_NO_FORECAST)
"""
(bool) Flag to prevent storing forecasts.
"""
memory_no_predicted = OptionWrapper('conserve_memory', MEMORY_NO_PREDICTED)
"""
(bool) Flag to prevent storing predicted state and covariance matrices.
"""
memory_no_filtered = OptionWrapper('conserve_memory', MEMORY_NO_FILTERED)
"""
(bool) Flag to prevent storing filtered state and covariance matrices.
"""
memory_no_likelihood = (
OptionWrapper('conserve_memory', MEMORY_NO_LIKELIHOOD)
)
"""
(bool) Flag to prevent storing likelihood values for each observation.
"""
memory_no_gain = OptionWrapper('conserve_memory', MEMORY_NO_GAIN)
"""
(bool) Flag to prevent storing the Kalman gain matrices.
"""
memory_no_smoothing = OptionWrapper('conserve_memory', MEMORY_NO_SMOOTHING)
"""
(bool) Flag to prevent storing likelihood values for each observation.
"""
memory_no_std_forecast = (
OptionWrapper('conserve_memory', MEMORY_NO_STD_FORECAST))
"""
(bool) Flag to prevent storing standardized forecast errors.
"""
memory_conserve = OptionWrapper('conserve_memory', MEMORY_CONSERVE)
"""
(bool) Flag to conserve the maximum amount of memory.
"""
timing_options = [
'timing_init_predicted', 'timing_init_filtered'
]
timing_init_predicted = OptionWrapper('filter_timing',
TIMING_INIT_PREDICTED)
"""
(bool) Flag for the default timing convention (Durbin and Koopman, 2012).
"""
timing_init_filtered = OptionWrapper('filter_timing', TIMING_INIT_FILTERED)
"""
(bool) Flag for the alternate timing convention (Kim and Nelson, 2012).
"""
# Default filter options
filter_method = FILTER_CONVENTIONAL
"""
(int) Filtering method bitmask.
"""
inversion_method = INVERT_UNIVARIATE | SOLVE_CHOLESKY
"""
(int) Inversion method bitmask.
"""
stability_method = STABILITY_FORCE_SYMMETRY
"""
(int) Stability method bitmask.
"""
conserve_memory = MEMORY_STORE_ALL
"""
(int) Memory conservation bitmask.
"""
filter_timing = TIMING_INIT_PREDICTED
"""
(int) Filter timing.
"""
def __init__(self, k_endog, k_states, k_posdef=None,
loglikelihood_burn=0, tolerance=1e-19, results_class=None,
kalman_filter_classes=None, **kwargs):
super(KalmanFilter, self).__init__(
k_endog, k_states, k_posdef, **kwargs
)
# Setup the underlying Kalman filter storage
self._kalman_filters = {}
# Filter options
self.loglikelihood_burn = loglikelihood_burn
self.results_class = (
results_class if results_class is not None else FilterResults
)
# Options
self.prefix_kalman_filter_map = (
kalman_filter_classes
if kalman_filter_classes is not None
else tools.prefix_kalman_filter_map.copy())
self.set_filter_method(**kwargs)
self.set_inversion_method(**kwargs)
self.set_stability_method(**kwargs)
self.set_conserve_memory(**kwargs)
self.set_filter_timing(**kwargs)
self.tolerance = tolerance
@property
def _kalman_filter(self):
prefix = self.prefix
if prefix in self._kalman_filters:
return self._kalman_filters[prefix]
return None
def _initialize_filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None,
tolerance=None, filter_timing=None,
loglikelihood_burn=None):
if filter_method is None:
filter_method = self.filter_method
if inversion_method is None:
inversion_method = self.inversion_method
if stability_method is None:
stability_method = self.stability_method
if conserve_memory is None:
conserve_memory = self.conserve_memory
if loglikelihood_burn is None:
loglikelihood_burn = self.loglikelihood_burn
if filter_timing is None:
filter_timing = self.filter_timing
if tolerance is None:
tolerance = self.tolerance
# Make sure we have endog
if self.endog is None:
raise RuntimeError('Must bind a dataset to the model before'
' filtering or smoothing.')
# Initialize the representation matrices
prefix, dtype, create_statespace = self._initialize_representation()
# Determine if we need to (re-)create the filter
# (definitely need to recreate if we recreated the _statespace object)
create_filter = create_statespace or prefix not in self._kalman_filters
if not create_filter:
kalman_filter = self._kalman_filters[prefix]
create_filter = (
not kalman_filter.conserve_memory == conserve_memory or
not kalman_filter.loglikelihood_burn == loglikelihood_burn
)
# If the dtype-specific _kalman_filter does not exist (or if we need
# to re-create it), create it
if create_filter:
if prefix in self._kalman_filters:
# Delete the old filter
del self._kalman_filters[prefix]
# Setup the filter
cls = self.prefix_kalman_filter_map[prefix]
self._kalman_filters[prefix] = cls(
self._statespaces[prefix], filter_method, inversion_method,
stability_method, conserve_memory, filter_timing, tolerance,
loglikelihood_burn
)
# Otherwise, update the filter parameters
else:
kalman_filter = self._kalman_filters[prefix]
kalman_filter.set_filter_method(filter_method, False)
kalman_filter.inversion_method = inversion_method
kalman_filter.stability_method = stability_method
kalman_filter.filter_timing = filter_timing
kalman_filter.tolerance = tolerance
# conserve_memory and loglikelihood_burn changes always lead to
# re-created filters
return prefix, dtype, create_filter, create_statespace
def set_filter_method(self, filter_method=None, **kwargs):
r"""
Set the filtering method
The filtering method controls aspects of which Kalman filtering
approach will be used.
Parameters
----------
filter_method : integer, optional
Bitmask value to set the filter method to. See notes for details.
**kwargs
Keyword arguments may be used to influence the filter method by
setting individual boolean flags. See notes for details.
Notes
-----
The filtering method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
FILTER_CONVENTIONAL = 0x01
Conventional Kalman filter.
FILTER_UNIVARIATE = 0x10
Univariate approach to Kalman filtering. Overrides conventional
method if both are specified.
FILTER_COLLAPSED = 0x20
Collapsed approach to Kalman filtering. Will be used *in addition*
to conventional or univariate filtering.
Note that only the first method is available if using a Scipy version
older than 0.16.
If the bitmask is set directly via the `filter_method` argument, then
the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the filter method may also be specified by directly modifying
the class attributes which are defined similarly to the keyword
arguments.
The default filtering method is FILTER_CONVENTIONAL.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm.filter_method
1
>>> mod.ssm.filter_conventional
True
>>> mod.ssm.filter_univariate = True
>>> mod.ssm.filter_method
17
>>> mod.ssm.set_filter_method(filter_univariate=False,
... filter_collapsed=True)
>>> mod.ssm.filter_method
33
>>> mod.ssm.set_filter_method(filter_method=1)
>>> mod.ssm.filter_conventional
True
>>> mod.ssm.filter_univariate
False
>>> mod.ssm.filter_collapsed
False
>>> mod.ssm.filter_univariate = True
>>> mod.ssm.filter_method
17
"""
if filter_method is not None:
self.filter_method = filter_method
for name in KalmanFilter.filter_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
if self._compatibility_mode and not self.filter_method == 1:
raise NotImplementedError('Only conventional Kalman filtering'
' is available. Consider updating'
' dependencies for more options.')
def set_inversion_method(self, inversion_method=None, **kwargs):
r"""
Set the inversion method
The Kalman filter may contain one matrix inversion: that of the
forecast error covariance matrix. The inversion method controls how and
if that inverse is performed.
Parameters
----------
inversion_method : integer, optional
Bitmask value to set the inversion method to. See notes for
details.
**kwargs
Keyword arguments may be used to influence the inversion method by
setting individual boolean flags. See notes for details.
Notes
-----
The inversion method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
INVERT_UNIVARIATE = 0x01
If the endogenous time series is univariate, then inversion can be
performed by simple division. If this flag is set and the time
series is univariate, then division will always be used even if
other flags are also set.
SOLVE_LU = 0x02
Use an LU decomposition along with a linear solver (rather than
ever actually inverting the matrix).
INVERT_LU = 0x04
Use an LU decomposition along with typical matrix inversion.
SOLVE_CHOLESKY = 0x08
Use a Cholesky decomposition along with a linear solver.
INVERT_CHOLESKY = 0x10
Use an Cholesky decomposition along with typical matrix inversion.
If the bitmask is set directly via the `inversion_method` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the inversion method may also be specified by directly
modifying the class attributes which are defined similarly to the
keyword arguments.
The default inversion method is `INVERT_UNIVARIATE | SOLVE_CHOLESKY`
Several things to keep in mind are:
- If the filtering method is specified to be univariate, then simple
division is always used regardless of the dimension of the endogenous
time series.
- Cholesky decomposition is about twice as fast as LU decomposition,
but it requires that the matrix be positive definite. While this
should generally be true, it may not be in every case.
- Using a linear solver rather than true matrix inversion is generally
faster and is numerically more stable.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm.inversion_method
1
>>> mod.ssm.solve_cholesky
True
>>> mod.ssm.invert_univariate
True
>>> mod.ssm.invert_lu
False
>>> mod.ssm.invert_univariate = False
>>> mod.ssm.inversion_method
8
>>> mod.ssm.set_inversion_method(solve_cholesky=False,
... invert_cholesky=True)
>>> mod.ssm.inversion_method
16
"""
if inversion_method is not None:
self.inversion_method = inversion_method
for name in KalmanFilter.inversion_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_stability_method(self, stability_method=None, **kwargs):
r"""
Set the numerical stability method
The Kalman filter is a recursive algorithm that may in some cases
suffer issues with numerical stability. The stability method controls
what, if any, measures are taken to promote stability.
Parameters
----------
stability_method : integer, optional
Bitmask value to set the stability method to. See notes for
details.
**kwargs
Keyword arguments may be used to influence the stability method by
setting individual boolean flags. See notes for details.
Notes
-----
The stability method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
STABILITY_FORCE_SYMMETRY = 0x01
If this flag is set, symmetry of the predicted state covariance
matrix is enforced at each iteration of the filter, where each
element is set to the average of the corresponding elements in the
upper and lower triangle.
If the bitmask is set directly via the `stability_method` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the stability method may also be specified by directly
modifying the class attributes which are defined similarly to the
keyword arguments.
The default stability method is `STABILITY_FORCE_SYMMETRY`
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm.stability_method
1
>>> mod.ssm.stability_force_symmetry
True
>>> mod.ssm.stability_force_symmetry = False
>>> mod.ssm.stability_method
0
"""
if stability_method is not None:
self.stability_method = stability_method
for name in KalmanFilter.stability_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_conserve_memory(self, conserve_memory=None, **kwargs):
r"""
Set the memory conservation method
By default, the Kalman filter computes a number of intermediate
matrices at each iteration. The memory conservation options control
which of those matrices are stored.
Parameters
----------
conserve_memory : integer, optional
Bitmask value to set the memory conservation method to. See notes
for details.
**kwargs
Keyword arguments may be used to influence the memory conservation
method by setting individual boolean flags. See notes for details.
Notes
-----
The memory conservation method is defined by a collection of boolean
flags, and is internally stored as a bitmask. The methods available
are:
MEMORY_STORE_ALL = 0
Store all intermediate matrices. This is the default value.
MEMORY_NO_FORECAST = 0x01
Do not store the forecast, forecast error, or forecast error
covariance matrices. If this option is used, the `predict` method
from the results class is unavailable.
MEMORY_NO_PREDICTED = 0x02
Do not store the predicted state or predicted state covariance
matrices.
MEMORY_NO_FILTERED = 0x04
Do not store the filtered state or filtered state covariance
matrices.
MEMORY_NO_LIKELIHOOD = 0x08
Do not store the vector of loglikelihood values for each
observation. Only the sum of the loglikelihood values is stored.
MEMORY_NO_GAIN = 0x10
Do not store the Kalman gain matrices.
MEMORY_NO_SMOOTHING = 0x20
Do not store temporary variables related to Klaman smoothing. If
this option is used, smoothing is unavailable.
MEMORY_NO_SMOOTHING = 0x20
Do not store standardized forecast errors.
MEMORY_CONSERVE
Do not store any intermediate matrices.
Note that if using a Scipy version less than 0.16, the options
MEMORY_NO_GAIN, MEMORY_NO_SMOOTHING, and MEMORY_NO_STD_FORECAST
have no effect.
If the bitmask is set directly via the `conserve_memory` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the memory conservation method may also be specified by
directly modifying the class attributes which are defined similarly to
the keyword arguments.
The default memory conservation method is `MEMORY_STORE_ALL`, so that
all intermediate matrices are stored.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm..conserve_memory
0
>>> mod.ssm.memory_no_predicted
False
>>> mod.ssm.memory_no_predicted = True
>>> mod.ssm.conserve_memory
2
>>> mod.ssm.set_conserve_memory(memory_no_filtered=True,
... memory_no_forecast=True)
>>> mod.ssm.conserve_memory
7
"""
if conserve_memory is not None:
self.conserve_memory = conserve_memory
for name in KalmanFilter.memory_options:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_filter_timing(self, alternate_timing=None, **kwargs):
r"""
Set the filter timing convention
By default, the Kalman filter follows Durbin and Koopman, 2012, in
initializing the filter with predicted values. Kim and Nelson, 1999,
instead initialize the filter with filtered values, which is
essentially just a different timing convention.
Parameters
----------
alternate_timing : integer, optional
Whether or not to use the alternate timing convention. Default is
unspecified.
**kwargs
Keyword arguments may be used to influence the memory conservation
method by setting individual boolean flags. See notes for details.
"""
if alternate_timing is not None:
self.filter_timing = int(alternate_timing)
if 'timing_init_predicted' in kwargs:
self.filter_timing = int(not kwargs['timing_init_predicted'])
if 'timing_init_filtered' in kwargs:
self.filter_timing = int(kwargs['timing_init_filtered'])
if (self._compatibility_mode and
self.filter_timing == TIMING_INIT_FILTERED):
raise NotImplementedError('Only "predicted" Kalman filter'
' timing is available. Consider'
' updating dependencies for more'
' options.')
def _filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None,
filter_timing=None, tolerance=None, loglikelihood_burn=None,
complex_step=False):
# Initialize the filter
prefix, dtype, create_filter, create_statespace = (
self._initialize_filter(
filter_method, inversion_method, stability_method,
conserve_memory, filter_timing, tolerance, loglikelihood_burn
)
)
kfilter = self._kalman_filters[prefix]
# Initialize the state
self._initialize_state(prefix=prefix, complex_step=complex_step)
# Run the filter
kfilter()
tmp = np.array(kfilter.loglikelihood)
tmp2 = np.array(kfilter.predicted_state)
return kfilter
def filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None, filter_timing=None,
tolerance=None, loglikelihood_burn=None, complex_step=False):
r"""
Apply the Kalman filter to the statespace model.
Parameters
----------
filter_method : int, optional
Determines which Kalman filter to use. Default is conventional.
inversion_method : int, optional
Determines which inversion technique to use. Default is by Cholesky
decomposition.
stability_method : int, optional
Determines which numerical stability techniques to use. Default is
to enforce symmetry of the predicted state covariance matrix.
conserve_memory : int, optional
Determines what output from the filter to store. Default is to
store everything.
filter_timing : int, optional
Determines the timing convention of the filter. Default is that
from Durbin and Koopman (2012), in which the filter is initialized
with predicted values.
tolerance : float, optional
The tolerance at which the Kalman filter determines convergence to
steady-state. Default is 1e-19.
loglikelihood_burn : int, optional
The number of initial periods during which the loglikelihood is not
recorded. Default is 0.
Notes
-----
This function by default does not compute variables required for
smoothing.
"""
if conserve_memory is None:
conserve_memory = self.conserve_memory | MEMORY_NO_SMOOTHING
# Run the filter
kfilter = self._filter(
filter_method, inversion_method, stability_method, conserve_memory,
filter_timing, tolerance, loglikelihood_burn, complex_step)
tmp = np.array(kfilter.loglikelihood)
# Create the results object
results = self.results_class(self)
results.update_representation(self)
results.update_filter(kfilter)
return results
def loglike(self, **kwargs):
r"""
Calculate the loglikelihood associated with the statespace model.
Parameters
----------
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
Returns
-------
loglike : float
The joint loglikelihood.
"""
if self.memory_no_likelihood:
raise RuntimeError('Cannot compute loglikelihood if'
' MEMORY_NO_LIKELIHOOD option is selected.')
kwargs['conserve_memory'] = MEMORY_CONSERVE ^ MEMORY_NO_LIKELIHOOD
kfilter = self._filter(**kwargs)
loglikelihood_burn = kwargs.get('loglikelihood_burn',
self.loglikelihood_burn)
return np.sum(kfilter.loglikelihood[loglikelihood_burn:])
def loglikeobs(self, **kwargs):
r"""
Calculate the loglikelihood for each observation associated with the
statespace model.
Parameters
----------
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
Notes
-----
If `loglikelihood_burn` is positive, then the entries in the returned
loglikelihood vector are set to be zero for those initial time periods.
Returns
-------
loglike : array of float
Array of loglikelihood values for each observation.
"""
if self.memory_no_likelihood:
raise RuntimeError('Cannot compute loglikelihood if'
' MEMORY_NO_LIKELIHOOD option is selected.')
kwargs['conserve_memory'] = MEMORY_CONSERVE ^ MEMORY_NO_LIKELIHOOD
kfilter = self._filter(**kwargs)
llf_obs = np.array(kfilter.loglikelihood, copy=True)
# Set any burned observations to have zero likelihood
loglikelihood_burn = kwargs.get('loglikelihood_burn',
self.loglikelihood_burn)
llf_obs[:loglikelihood_burn] = 0
return llf_obs
def simulate(self, nsimulations, measurement_shocks=None,
state_shocks=None, initial_state=None):
r"""
Simulate a new time series following the state space model
Parameters
----------
nsimulations : int
The number of observations to simulate. If the model is
time-invariant this can be any number. If the model is
time-varying, then this number must be less than or equal to the
number
measurement_shocks : array_like, optional
If specified, these are the shocks to the measurement equation,
:math:`\varepsilon_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_endog`, where `k_endog` is the
same as in the state space model.
state_shocks : array_like, optional
If specified, these are the shocks to the state equation,
:math:`\eta_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_posdef` where `k_posdef` is the
same as in the state space model.
initial_state : array_like, optional
If specified, this is the state vector at time zero, which should
be shaped (`k_states` x 1), where `k_states` is the same as in the
state space model. If unspecified, but the model has been
initialized, then that initialization is used. If unspecified and
the model has not been initialized, then a vector of zeros is used.
Note that this is not included in the returned `simulated_states`
array.
Returns
-------
simulated_obs : array
An (nsimulations x k_endog) array of simulated observations.
simulated_states : array
An (nsimulations x k_states) array of simulated states.
"""
time_invariant = self.time_invariant
# Check for valid number of simulations
if not time_invariant and nsimulations > self.nobs:
raise ValueError('In a time-varying model, cannot create more'
' simulations than there are observations.')
# Check / generate measurement shocks
if measurement_shocks is not None:
measurement_shocks = np.array(measurement_shocks)
if measurement_shocks.ndim == 0:
measurement_shocks = measurement_shocks[np.newaxis, np.newaxis]
elif measurement_shocks.ndim == 1:
measurement_shocks = measurement_shocks[:, np.newaxis]
if not measurement_shocks.shape == (nsimulations, self.k_endog):
raise ValueError('Invalid shape of provided measurement'
' shocks. Required (%d, %d)'
% (nsimulations, self.k_endog))
elif self.shapes['obs_cov'][-1] == 1:
measurement_shocks = np.random.multivariate_normal(
mean=np.zeros(self.k_endog), cov=self['obs_cov'],
size=nsimulations)
# Check / generate state shocks
if state_shocks is not None:
state_shocks = np.array(state_shocks)
if state_shocks.ndim == 0:
state_shocks = state_shocks[np.newaxis, np.newaxis]
elif state_shocks.ndim == 1:
state_shocks = state_shocks[:, np.newaxis]
if not state_shocks.shape == (nsimulations, self.k_posdef):
raise ValueError('Invalid shape of provided state shocks.'
' Required (%d, %d).'
% (nsimulations, self.k_posdef))
elif self.shapes['state_cov'][-1] == 1:
state_shocks = np.random.multivariate_normal(
mean=np.zeros(self.k_posdef), cov=self['state_cov'],
size=nsimulations)
# Get the initial states
if initial_state is not None:
initial_state = np.array(initial_state)
if initial_state.ndim == 0:
initial_state = initial_state[np.newaxis]
elif (initial_state.ndim > 1 and
not initial_state.shape == (self.k_states, 1)):
raise ValueError('Invalid shape of provided initial state'
' vector. Required (%d, 1)' % self.k_states)
elif self.initialization == 'known':
initial_state = np.random.multivariate_normal(
self._initial_state, self._initial_state_cov)
elif self.initialization == 'stationary':
from scipy.linalg import solve_discrete_lyapunov
# (I - T)^{-1} c = x => (I - T) x = c
initial_state_mean = np.linalg.solve(
np.eye(self.k_states) - self['transition', :, :, 0],
self['state_intercept', :, 0])
R = self['selection', :, :, 0]
Q = self['state_cov', :, :, 0]
selected_state_cov = R.dot(Q).dot(R.T)
initial_state_cov = solve_discrete_lyapunov(
self['transition', :, :, 0], selected_state_cov)
initial_state = np.random.multivariate_normal(
initial_state_mean, initial_state_cov)
elif self.initialization == 'approximate_diffuse':
initial_state = np.zeros(self.k_states)
else:
initial_state = np.zeros(self.k_states)
return self._simulate(nsimulations, measurement_shocks, state_shocks,
initial_state)
def _simulate(self, nsimulations, measurement_shocks, state_shocks,
initial_state):
time_invariant = self.time_invariant
# Holding variables for the simulations
simulated_obs = np.zeros((nsimulations, self.k_endog),
dtype=self.dtype)
simulated_states = np.zeros((nsimulations+1, self.k_states),
dtype=self.dtype)
simulated_states[0] = initial_state
# Perform iterations to create the new time series
obs_intercept_t = 0
design_t = 0
state_intercept_t = 0
transition_t = 0
selection_t = 0
for t in range(nsimulations):
# Get the current shocks (this accomodates time-varying matrices)
if measurement_shocks is None:
measurement_shock = np.random.multivariate_normal(
mean=np.zeros(self.k_endog), cov=self['obs_cov', :, :, t])
else:
measurement_shock = measurement_shocks[t]
if state_shocks is None:
state_shock = np.random.multivariate_normal(
mean=np.zeros(self.k_posdef),
cov=self['state_cov', :, :, t])
else:
state_shock = state_shocks[t]
# Get current-iteration matrices
if not time_invariant:
obs_intercept_t = 0 if self.obs_intercept.shape[-1] == 1 else t
design_t = 0 if self.design.shape[-1] == 1 else t
state_intercept_t = (
0 if self.state_intercept.shape[-1] == 1 else t)
transition_t = 0 if self.transition.shape[-1] == 1 else t
selection_t = 0 if self.selection.shape[-1] == 1 else t
obs_intercept = self['obs_intercept', :, obs_intercept_t]
design = self['design', :, :, design_t]
state_intercept = self['state_intercept', :, state_intercept_t]
transition = self['transition', :, :, transition_t]
selection = self['selection', :, :, selection_t]
# Iterate the measurement equation
simulated_obs[t] = (
obs_intercept + np.dot(design, simulated_states[t]) +
measurement_shock)
# Iterate the state equation
simulated_states[t+1] = (
state_intercept + np.dot(transition, simulated_states[t]) +
np.dot(selection, state_shock))
return simulated_obs, simulated_states[:-1]
def impulse_responses(self, steps=10, impulse=0, orthogonalized=False,
cumulative=False, **kwargs):
r"""
Impulse response function
Parameters
----------
steps : int, optional
The number of steps for which impulse responses are calculated.
Default is 10. Note that the initial impulse is not counted as a
step, so if `steps=1`, the output will have 2 entries.
impulse : int or array_like
If an integer, the state innovation to pulse; must be between 0
and `k_posdef-1` where `k_posdef` is the same as in the state
space model. Alternatively, a custom impulse vector may be
provided; must be a column vector with shape `(k_posdef, 1)`.
orthogonalized : boolean, optional
Whether or not to perform impulse using orthogonalized innovations.
Note that this will also affect custum `impulse` vectors. Default
is False.
cumulative : boolean, optional
Whether or not to return cumulative impulse responses. Default is
False.
**kwargs
If the model is time-varying and `steps` is greater than the number
of observations, any of the state space representation matrices
that are time-varying must have updated values provided for the
out-of-sample steps.
For example, if `design` is a time-varying component, `nobs` is 10,
and `steps` is 15, a (`k_endog` x `k_states` x 5) matrix must be
provided with the new design matrix values.
Returns
-------
impulse_responses : array
Responses for each endogenous variable due to the impulse
given by the `impulse` argument. A (steps + 1 x k_endog) array.
Notes
-----
Intercepts in the measurement and state equation are ignored when
calculating impulse responses.
"""
# Since the first step is the impulse itself, we actually want steps+1
steps += 1
# Check for what kind of impulse we want
if type(impulse) == int:
if impulse >= self.k_posdef or impulse < 0:
raise ValueError('Invalid value for `impulse`. Must be the'
' index of one of the state innovations.')
# Create the (non-orthogonalized) impulse vector
idx = impulse
impulse = np.zeros(self.k_posdef)
impulse[idx] = 1
else:
impulse = np.array(impulse)
if impulse.ndim > 1:
impulse = np.squeeze(impulse)
if not impulse.shape == (self.k_posdef,):
raise ValueError('Invalid impulse vector. Must be shaped'
' (%d,)' % self.k_posdef)
# Orthogonalize the impulses, if requested, using Cholesky on the
# first state covariance matrix
if orthogonalized:
state_chol = np.linalg.cholesky(self.state_cov[:, :, 0])
impulse = np.dot(state_chol, impulse)
# If we have a time-invariant system, we can solve for the IRF directly
if self.time_invariant:
# Get the state space matrices
design = self.design[:, :, 0]
transition = self.transition[:, :, 0]
selection = self.selection[:, :, 0]
# Holding arrays
irf = np.zeros((steps, self.k_endog), dtype=self.dtype)
states = np.zeros((steps, self.k_states), dtype=self.dtype)
# First iteration
states[0] = np.dot(selection, impulse)
irf[0] = np.dot(design, states[0])
# Iterations
for t in range(1, steps):
states[t] = np.dot(transition, states[t-1])
irf[t] = np.dot(design, states[t])
# Otherwise, create a new model
else:
# Get the basic model components
representation = {}
for name, shape in self.shapes.items():
if name in ['obs', 'obs_intercept', 'state_intercept']:
continue
representation[name] = getattr(self, name)
# Allow additional specification
warning = ('Model has time-invariant %s matrix, so the %s'
' argument to `irf` has been ignored.')
exception = ('Impulse response functions for models with'
' time-varying %s matrix requires an updated'
' time-varying matrix for any periods beyond those in'
' the original model.')
for name, shape in self.shapes.items():
if name in ['obs', 'obs_intercept', 'state_intercept']:
continue
if representation[name].shape[-1] == 1:
if name in kwargs:
warn(warning % (name, name), ValueWarning)
elif name not in kwargs:
raise ValueError(exception % name)
else:
mat = np.asarray(kwargs[name])
validate_matrix_shape(name, mat.shape, shape[0],
shape[1], steps)
if mat.ndim < 3 or not mat.shape[2] == steps:
raise ValueError(exception % name)
representation[name] = np.c_[representation[name], mat]
# Setup the new statespace representation
model_kwargs = {
'filter_method': self.filter_method,
'inversion_method': self.inversion_method,
'stability_method': self.stability_method,
'conserve_memory': self.conserve_memory,
'tolerance': self.tolerance,
'loglikelihood_burn': self.loglikelihood_burn
}
model_kwargs.update(representation)
model = KalmanFilter(np.zeros(self.endog.T.shape), self.k_states,
self.k_posdef, **model_kwargs)
model.initialize_approximate_diffuse()
model._initialize_filter()
model._initialize_state()
# Get the impulse response function via simulation of the state
# space model, but with other shocks set to zero
# Since simulate returns the zero-th period, we need to simulate
# steps + 1 periods and exclude the zero-th observation.
steps += 1
measurement_shocks = np.zeros((steps, self.k_endog))
state_shocks = np.zeros((steps, self.k_posdef))
state_shocks[0] = impulse
irf, _ = model.simulate(
steps, measurement_shocks=measurement_shocks,
state_shocks=state_shocks)
irf = irf[1:]
# Get the cumulative response if requested
if cumulative:
irf = np.cumsum(irf, axis=0)
return irf
class FilterResults(FrozenRepresentation):
"""
Results from applying the Kalman filter to a state space model.
Parameters
----------
model : Representation
A Statespace representation
Attributes
----------
nobs : int
Number of observations.
k_endog : int
The dimension of the observation series.
k_states : int
The dimension of the unobserved state process.
k_posdef : int
The dimension of a guaranteed positive definite
covariance matrix describing the shocks in the
measurement equation.
dtype : dtype
Datatype of representation matrices
prefix : str
BLAS prefix of representation matrices
shapes : dictionary of name,tuple
A dictionary recording the shapes of each of the
representation matrices as tuples.
endog : array
The observation vector.
design : array
The design matrix, :math:`Z`.
obs_intercept : array
The intercept for the observation equation, :math:`d`.
obs_cov : array
The covariance matrix for the observation equation :math:`H`.
transition : array
The transition matrix, :math:`T`.
state_intercept : array
The intercept for the transition equation, :math:`c`.
selection : array
The selection matrix, :math:`R`.
state_cov : array
The covariance matrix for the state equation :math:`Q`.
missing : array of bool
An array of the same size as `endog`, filled
with boolean values that are True if the
corresponding entry in `endog` is NaN and False
otherwise.
nmissing : array of int
An array of size `nobs`, where the ith entry
is the number (between 0 and `k_endog`) of NaNs in
the ith row of the `endog` array.
time_invariant : bool
Whether or not the representation matrices are time-invariant
initialization : str
Kalman filter initialization method.
initial_state : array_like
The state vector used to initialize the Kalamn filter.
initial_state_cov : array_like
The state covariance matrix used to initialize the Kalamn filter.
filter_method : int
Bitmask representing the Kalman filtering method
inversion_method : int
Bitmask representing the method used to
invert the forecast error covariance matrix.
stability_method : int
Bitmask representing the methods used to promote
numerical stability in the Kalman filter
recursions.
conserve_memory : int
Bitmask representing the selected memory conservation method.
filter_timing : int
Whether or not to use the alternate timing convention.
tolerance : float
The tolerance at which the Kalman filter
determines convergence to steady-state.
loglikelihood_burn : int
The number of initial periods during which
the loglikelihood is not recorded.
converged : bool
Whether or not the Kalman filter converged.
period_converged : int
The time period in which the Kalman filter converged.
filtered_state : array
The filtered state vector at each time period.
filtered_state_cov : array
The filtered state covariance matrix at each time period.
predicted_state : array
The predicted state vector at each time period.
predicted_state_cov : array
The predicted state covariance matrix at each time period.
kalman_gain : array
The Kalman gain at each time period.
forecasts : array
The one-step-ahead forecasts of observations at each time period.
forecasts_error : array
The forecast errors at each time period.
forecasts_error_cov : array
The forecast error covariance matrices at each time period.
llf_obs : array
The loglikelihood values at each time period.
"""
_filter_attributes = [
'filter_method', 'inversion_method', 'stability_method',
'conserve_memory', 'filter_timing', 'tolerance', 'loglikelihood_burn',
'converged', 'period_converged', 'filtered_state',
'filtered_state_cov', 'predicted_state', 'predicted_state_cov',
'tmp1', 'tmp2', 'tmp3', 'tmp4', 'forecasts',
'forecasts_error', 'forecasts_error_cov', 'llf_obs',
'collapsed_forecasts', 'collapsed_forecasts_error',
'collapsed_forecasts_error_cov',
]
_filter_options = (
KalmanFilter.filter_methods + KalmanFilter.stability_methods +
KalmanFilter.inversion_methods + KalmanFilter.memory_options
)
_attributes = FrozenRepresentation._model_attributes + _filter_attributes
def __init__(self, model):
super(FilterResults, self).__init__(model)
# Setup caches for uninitialized objects
self._kalman_gain = None
self._standardized_forecasts_error = None
def update_representation(self, model, only_options=False):
"""
Update the results to match a given model
Parameters
----------
model : Representation
The model object from which to take the updated values.
only_options : boolean, optional
If set to true, only the filter options are updated, and the state
space representation is not updated. Default is False.
Notes
-----
This method is rarely required except for internal usage.
"""
if not only_options:
super(FilterResults, self).update_representation(model)
# Save the options as boolean variables
for name in self._filter_options:
setattr(self, name, getattr(model, name, None))
def update_filter(self, kalman_filter):
"""
Update the filter results
Parameters
----------
kalman_filter : KalmanFilter
The model object from which to take the updated values.
Notes
-----
This method is rarely required except for internal usage.
"""
# State initialization
self.initial_state = np.array(
kalman_filter.model.initial_state, copy=True
)
self.initial_state_cov = np.array(
kalman_filter.model.initial_state_cov, copy=True
)
# Save Kalman filter parameters
self.filter_method = kalman_filter.filter_method
self.inversion_method = kalman_filter.inversion_method
self.stability_method = kalman_filter.stability_method
self.conserve_memory = kalman_filter.conserve_memory
self.filter_timing = kalman_filter.filter_timing
self.tolerance = kalman_filter.tolerance
self.loglikelihood_burn = kalman_filter.loglikelihood_burn
# Save Kalman filter output
self.converged = bool(kalman_filter.converged)
self.period_converged = kalman_filter.period_converged
self.filtered_state = np.array(kalman_filter.filtered_state, copy=True)
self.filtered_state_cov = np.array(
kalman_filter.filtered_state_cov, copy=True
)
self.predicted_state = np.array(
kalman_filter.predicted_state, copy=True
)
self.predicted_state_cov = np.array(
kalman_filter.predicted_state_cov, copy=True
)
# Reset caches
has_missing = np.sum(self.nmissing) > 0
if not self._compatibility_mode and not (self.memory_no_std_forecast or
self.invert_lu or
self.solve_lu or
self.filter_collapsed):
if has_missing:
self._standardized_forecasts_error = np.array(
reorder_missing_vector(
kalman_filter.standardized_forecast_error,
self.missing, prefix=self.prefix))
else:
self._standardized_forecasts_error = np.array(
kalman_filter.standardized_forecast_error, copy=True)
else:
self._standardized_forecasts_error = None
if not self._compatibility_mode:
# In the partially missing data case, all entries will
# be in the upper left submatrix rather than the correct placement
# Re-ordering does not make sense in the collapsed case.
if has_missing and (not self.memory_no_gain and
not self.filter_collapsed):
self._kalman_gain = np.array(reorder_missing_matrix(
kalman_filter.kalman_gain, self.missing, reorder_cols=True,
prefix=self.prefix))
self.tmp1 = np.array(reorder_missing_matrix(
kalman_filter.tmp1, self.missing, reorder_cols=True,
prefix=self.prefix))
self.tmp2 = np.array(reorder_missing_vector(
kalman_filter.tmp2, self.missing, prefix=self.prefix))
self.tmp3 = np.array(reorder_missing_matrix(
kalman_filter.tmp3, self.missing, reorder_rows=True,
prefix=self.prefix))
self.tmp4 = np.array(reorder_missing_matrix(
kalman_filter.tmp4, self.missing, reorder_cols=True,
reorder_rows=True, prefix=self.prefix))
else:
self._kalman_gain = np.array(
kalman_filter.kalman_gain, copy=True)
self.tmp1 = np.array(kalman_filter.tmp1, copy=True)
self.tmp2 = np.array(kalman_filter.tmp2, copy=True)
self.tmp3 = np.array(kalman_filter.tmp3, copy=True)
self.tmp4 = np.array(kalman_filter.tmp4, copy=True)
else:
self._kalman_gain = None
# Note: use forecasts rather than forecast, so as not to interfer
# with the `forecast` methods in subclasses
self.forecasts = np.array(kalman_filter.forecast, copy=True)
self.forecasts_error = np.array(
kalman_filter.forecast_error, copy=True
)
self.forecasts_error_cov = np.array(
kalman_filter.forecast_error_cov, copy=True
)
self.llf_obs = np.array(kalman_filter.loglikelihood, copy=True)
# If there was missing data, save the original values from the Kalman
# filter output, since below will set the values corresponding to
# the missing observations to nans.
self.missing_forecasts = None
self.missing_forecasts_error = None
self.missing_forecasts_error_cov = None
if np.sum(self.nmissing) > 0:
# Copy the provided arrays (which are as the Kalman filter dataset)
# into new variables
self.missing_forecasts = np.copy(self.forecasts)
self.missing_forecasts_error = np.copy(self.forecasts_error)
self.missing_forecasts_error_cov = (
np.copy(self.forecasts_error_cov)
)
# Save the collapsed values
self.collapsed_forecasts = None
self.collapsed_forecasts_error = None
self.collapsed_forecasts_error_cov = None
if self.filter_collapsed:
# Copy the provided arrays (which are from the collapsed dataset)
# into new variables
self.collapsed_forecasts = self.forecasts[:self.k_states, :]
self.collapsed_forecasts_error = (
self.forecasts_error[:self.k_states, :]
)
self.collapsed_forecasts_error_cov = (
self.forecasts_error_cov[:self.k_states, :self.k_states, :]
)
# Recreate the original arrays (which should be from the original
# dataset) in the appropriate dimension
self.forecasts = np.zeros((self.k_endog, self.nobs))
self.forecasts_error = np.zeros((self.k_endog, self.nobs))
self.forecasts_error_cov = (
np.zeros((self.k_endog, self.k_endog, self.nobs))
)
# Fill in missing values in the forecast, forecast error, and
# forecast error covariance matrix (this is required due to how the
# Kalman filter implements observations that are either partly or
# completely missing)
# Construct the predictions, forecasts
if not (self.memory_no_forecast or self.memory_no_predicted):
for t in range(self.nobs):
design_t = 0 if self.design.shape[2] == 1 else t
obs_cov_t = 0 if self.obs_cov.shape[2] == 1 else t
obs_intercept_t = 0 if self.obs_intercept.shape[1] == 1 else t
# For completely missing observations, the Kalman filter will
# produce forecasts, but forecast errors and the forecast
# error covariance matrix will be zeros - make them nan to
# improve clarity of results.
if self.nmissing[t] > 0:
mask = ~self.missing[:, t].astype(bool)
# We can recover forecasts
# For partially missing observations, the Kalman filter
# will produce all elements (forecasts, forecast errors,
# forecast error covariance matrices) as usual, but their
# dimension will only be equal to the number of non-missing
# elements, and their location in memory will be in the
# first blocks (e.g. for the forecasts_error, the first
# k_endog - nmissing[t] columns will be filled in),
# regardless of which endogenous variables they refer to
# (i.e. the non- missing endogenous variables for that
# observation). Furthermore, the forecast error covariance
# matrix is only valid for those elements. What is done is
# to set all elements to nan for these observations so that
# they are flagged as missing. The variables
# missing_forecasts, etc. then provide the forecasts, etc.
# provided by the Kalman filter, from which the data can be
# retrieved if desired.
self.forecasts[:, t] = np.dot(
self.design[:, :, design_t], self.predicted_state[:, t]
) + self.obs_intercept[:, obs_intercept_t]
self.forecasts_error[:, t] = np.nan
self.forecasts_error[mask, t] = (
self.endog[mask, t] - self.forecasts[mask, t])
self.forecasts_error_cov[:, :, t] = np.dot(
np.dot(self.design[:, :, design_t],
self.predicted_state_cov[:, :, t]),
self.design[:, :, design_t].T
) + self.obs_cov[:, :, obs_cov_t]
# In the collapsed case, everything just needs to be rebuilt
# for the original observed data, since the Kalman filter
# produced these values for the collapsed data.
elif self.filter_collapsed:
self.forecasts[:, t] = np.dot(
self.design[:, :, design_t], self.predicted_state[:, t]
) + self.obs_intercept[:, obs_intercept_t]
self.forecasts_error[:, t] = (
self.endog[:, t] - self.forecasts[:, t]
)
self.forecasts_error_cov[:, :, t] = np.dot(
np.dot(self.design[:, :, design_t],
self.predicted_state_cov[:, :, t]),
self.design[:, :, design_t].T
) + self.obs_cov[:, :, obs_cov_t]
@property
def kalman_gain(self):
"""
Kalman gain matrices
"""
if self._kalman_gain is None:
# k x n
self._kalman_gain = np.zeros(
(self.k_states, self.k_endog, self.nobs), dtype=self.dtype)
for t in range(self.nobs):
# In the case of entirely missing observations, let the Kalman
# gain be zeros.
if self.nmissing[t] == self.k_endog:
continue
design_t = 0 if self.design.shape[2] == 1 else t
transition_t = 0 if self.transition.shape[2] == 1 else t
if self.nmissing[t] == 0:
self._kalman_gain[:, :, t] = np.dot(
np.dot(
self.transition[:, :, transition_t],
self.predicted_state_cov[:, :, t]
),
np.dot(
np.transpose(self.design[:, :, design_t]),
np.linalg.inv(self.forecasts_error_cov[:, :, t])
)
)
else:
mask = ~self.missing[:, t].astype(bool)
F = self.forecasts_error_cov[np.ix_(mask, mask, [t])]
self._kalman_gain[:, mask, t] = np.dot(
np.dot(
self.transition[:, :, transition_t],
self.predicted_state_cov[:, :, t]
),
np.dot(
np.transpose(self.design[mask, :, design_t]),
np.linalg.inv(F[:, :, 0])
)
)
return self._kalman_gain
@property
def standardized_forecasts_error(self):
"""
Standardized forecast errors
Notes
-----
The forecast errors produced by the Kalman filter are
.. math::
v_t \sim N(0, F_t)
Hypothesis tests are usually applied to the standardized residuals
.. math::
v_t^s = B_t v_t \sim N(0, I)
where :math:`B_t = L_t^{-1}` and :math:`F_t = L_t L_t'`; then
:math:`F_t^{-1} = (L_t')^{-1} L_t^{-1} = B_t' B_t`; :math:`B_t`
and :math:`L_t` are lower triangular. Finally,
:math:`B_t v_t \sim N(0, B_t F_t B_t')` and
:math:`B_t F_t B_t' = L_t^{-1} L_t L_t' (L_t')^{-1} = I`.
Thus we can rewrite :math:`v_t^s = L_t^{-1} v_t` or
:math:`L_t v_t^s = v_t`; the latter equation is the form required to
use a linear solver to recover :math:`v_t^s`. Since :math:`L_t` is
lower triangular, we can use a triangular solver (?TRTRS).
"""
if self._standardized_forecasts_error is None:
if self.k_endog == 1:
self._standardized_forecasts_error = (
self.forecasts_error /
self.forecasts_error_cov[0, 0, :]**0.5)
else:
from scipy import linalg
self._standardized_forecasts_error = np.zeros(
self.forecasts_error.shape, dtype=self.dtype)
for t in range(self.forecasts_error_cov.shape[2]):
if self.nmissing[t] > 0:
self._standardized_forecasts_error[:, t] = np.nan
if self.nmissing[t] < self.k_endog:
mask = ~self.missing[:, t].astype(bool)
F = self.forecasts_error_cov[np.ix_(mask, mask, [t])]
upper, _ = linalg.cho_factor(F[:, :, 0])
self._standardized_forecasts_error[mask, t] = (
linalg.solve_triangular(
upper, self.forecasts_error[mask, t], trans=1))
return self._standardized_forecasts_error
def predict(self, start=None, end=None, dynamic=None, **kwargs):
r"""
In-sample and out-of-sample prediction for state space models generally
Parameters
----------
start : int, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast will be at start.
end : int, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast will be at end.
dynamic : int, optional
Offset relative to `start` at which to begin dynamic prediction.
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
**kwargs
If the prediction range is outside of the sample range, any
of the state space representation matrices that are time-varying
must have updated values provided for the out-of-sample range.
For example, of `obs_intercept` is a time-varying component and
the prediction range extends 10 periods beyond the end of the
sample, a (`k_endog` x 10) matrix must be provided with the new
intercept values.
Returns
-------
results : PredictionResults
A PredictionResults object.
Notes
-----
All prediction is performed by applying the deterministic part of the
measurement equation using the predicted state variables.
Out-of-sample prediction first applies the Kalman filter to missing
data for the number of periods desired to obtain the predicted states.
"""
# Cannot predict if we do not have appropriate arrays
if self.memory_no_forecast or self.memory_no_predicted:
raise ValueError('Predict is not possible if memory conservation'
' has been used to avoid storing forecasts or'
' predicted values.')
# Get the start and the end of the entire prediction range
if start is None:
start = 0
elif start < 0:
raise ValueError('Cannot predict values previous to the sample.')
if end is None:
end = self.nobs
# Prediction and forecasting is performed by iterating the Kalman
# Kalman filter through the entire range [0, end]
# Then, everything is returned corresponding to the range [start, end].
# In order to perform the calculations, the range is separately split
# up into the following categories:
# - static: (in-sample) the Kalman filter is run as usual
# - dynamic: (in-sample) the Kalman filter is run, but on missing data
# - forecast: (out-of-sample) the Kalman filter is run, but on missing
# data
# Short-circuit if end is before start
if end <= start:
raise ValueError('End of prediction must be after start.')
# Get the number of forecasts to make after the end of the sample
nforecast = max(0, end - self.nobs)
# Get the number of dynamic prediction periods
# If `dynamic=True`, then assume that we want to begin dynamic
# prediction at the start of the sample prediction.
if dynamic is True:
dynamic = 0
# If `dynamic=False`, then assume we want no dynamic prediction
if dynamic is False:
dynamic = None
ndynamic = 0
if dynamic is not None:
# Replace the relative dynamic offset with an absolute offset
dynamic = start + dynamic
# Validate the `dynamic` parameter
if dynamic < 0:
raise ValueError('Dynamic prediction cannot begin prior to the'
' first observation in the sample.')
elif dynamic > end:
warn('Dynamic prediction specified to begin after the end of'
' prediction, and so has no effect.', ValueWarning)
dynamic = None
elif dynamic > self.nobs:
warn('Dynamic prediction specified to begin during'
' out-of-sample forecasting period, and so has no'
' effect.', ValueWarning)
dynamic = None
# Get the total size of the desired dynamic forecasting component
# Note: the first `dynamic` periods of prediction are actually
# *not* dynamic, because dynamic prediction begins at observation
# `dynamic`.
if dynamic is not None:
ndynamic = max(0, min(end, self.nobs) - dynamic)
# Get the number of in-sample static predictions
nstatic = min(end, self.nobs) if dynamic is None else dynamic
# Construct the design and observation intercept and covariance
# matrices for start-npadded:end. If not time-varying in the original
# model, then they will be copied over if none are provided in
# `kwargs`. Otherwise additional matrices must be provided in `kwargs`.
representation = {}
for name, shape in self.shapes.items():
if name == 'obs':
continue
representation[name] = getattr(self, name)
# Update the matrices from kwargs for forecasts
warning = ('Model has time-invariant %s matrix, so the %s'
' argument to `predict` has been ignored.')
exception = ('Forecasting for models with time-varying %s matrix'
' requires an updated time-varying matrix for the'
' period to be forecasted.')
if nforecast > 0:
for name, shape in self.shapes.items():
if name == 'obs':
continue
if representation[name].shape[-1] == 1:
if name in kwargs:
warn(warning % (name, name), ValueWarning)
elif name not in kwargs:
raise ValueError(exception % name)
else:
mat = np.asarray(kwargs[name])
if len(shape) == 2:
validate_vector_shape(name, mat.shape,
shape[0], nforecast)
if mat.ndim < 2 or not mat.shape[1] == nforecast:
raise ValueError(exception % name)
representation[name] = np.c_[representation[name], mat]
else:
validate_matrix_shape(name, mat.shape, shape[0],
shape[1], nforecast)
if mat.ndim < 3 or not mat.shape[2] == nforecast:
raise ValueError(exception % name)
representation[name] = np.c_[representation[name], mat]
# Update the matrices from kwargs for dynamic prediction in the case
# that `end` is less than `nobs` and `dynamic` is less than `end`. In
# this case, any time-varying matrices in the default `representation`
# will be too long, causing an error to be thrown below in the
# KalmanFilter(...) construction call, because the endog has length
# nstatic + ndynamic + nforecast, whereas the time-varying matrices
# from `representation` have length nobs.
if ndynamic > 0 and end < self.nobs:
for name, shape in self.shapes.items():
if not name == 'obs' and representation[name].shape[-1] > 1:
representation[name] = representation[name][..., :end]
# Construct the predicted state and covariance matrix for each time
# period depending on whether that time period corresponds to
# one-step-ahead prediction, dynamic prediction, or out-of-sample
# forecasting.
# If we only have simple prediction, then we can use the already saved
# Kalman filter output
if ndynamic == 0 and nforecast == 0:
results = self
else:
# Construct the new endogenous array.
endog = np.empty((self.k_endog, ndynamic + nforecast))
endog.fill(np.nan)
endog = np.asfortranarray(np.c_[self.endog[:, :nstatic], endog])
# Setup the new statespace representation
model_kwargs = {
'filter_method': self.filter_method,
'inversion_method': self.inversion_method,
'stability_method': self.stability_method,
'conserve_memory': self.conserve_memory,
'filter_timing': self.filter_timing,
'tolerance': self.tolerance,
'loglikelihood_burn': self.loglikelihood_burn
}
model_kwargs.update(representation)
model = KalmanFilter(
endog, self.k_states, self.k_posdef, **model_kwargs
)
model.initialize_known(
self.initial_state,
self.initial_state_cov
)
model._initialize_filter()
model._initialize_state()
results = self._predict(nstatic, ndynamic, nforecast, model)
return PredictionResults(results, start, end, nstatic, ndynamic,
nforecast)
def _predict(self, nstatic, ndynamic, nforecast, model):
# Note: this doesn't use self, and can either be a static method or
# moved outside the class altogether.
# Get the underlying filter
kfilter = model._kalman_filter
# Save this (which shares memory with the memoryview on which the
# Kalman filter will be operating) so that we can replace actual data
# with predicted data during dynamic forecasting
endog = model._representations[model.prefix]['obs']
for t in range(kfilter.model.nobs):
# Run the Kalman filter for the first `nstatic` periods (for
# which dynamic computation will not be performed)
if t < nstatic:
next(kfilter)
# Perform dynamic prediction
elif t < nstatic + ndynamic:
design_t = 0 if model.design.shape[2] == 1 else t
obs_intercept_t = 0 if model.obs_intercept.shape[1] == 1 else t
# Unconditional value is the intercept (often zeros)
endog[:, t] = model.obs_intercept[:, obs_intercept_t]
# If t > 0, then we can condition the forecast on the state
if t > 0:
# Predict endog[:, t] given `predicted_state` calculated in
# previous iteration (i.e. t-1)
endog[:, t] += np.dot(
model.design[:, :, design_t],
kfilter.predicted_state[:, t]
)
# Advance Kalman filter
next(kfilter)
# Perform any (one-step-ahead) forecasting
else:
next(kfilter)
# Return the predicted state and predicted state covariance matrices
results = FilterResults(model)
results.update_representation(model)
results.update_filter(kfilter)
return results
class PredictionResults(FilterResults):
r"""
Results of in-sample and out-of-sample prediction for state space models
generally
Parameters
----------
results : FilterResults
Output from filtering, corresponding to the prediction desired
start : int
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast will be at start.
end : int
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast will be at end.
nstatic : int
Number of in-sample static predictions (these are always the first
elements of the prediction output).
ndynamic : int
Number of in-sample dynamic predictions (these always follow the static
predictions directly, and are directly followed by the forecasts).
nforecast : int
Number of in-sample forecasts (these always follow the dynamic
predictions directly).
Attributes
----------
npredictions : int
Number of observations in the predicted series; this is not necessarily
the same as the number of observations in the original model from which
prediction was performed.
start : int
Zero-indexed observation number at which to start prediction,
i.e., the first predict will be at `start`; this is relative to the
original model from which prediction was performed.
end : int
Zero-indexed observation number at which to end prediction,
i.e., the last predict will be at `end`; this is relative to the
original model from which prediction was performed.
nstatic : int
Number of in-sample static predictions.
ndynamic : int
Number of in-sample dynamic predictions.
nforecast : int
Number of in-sample forecasts.
endog : array
The observation vector.
design : array
The design matrix, :math:`Z`.
obs_intercept : array
The intercept for the observation equation, :math:`d`.
obs_cov : array
The covariance matrix for the observation equation :math:`H`.
transition : array
The transition matrix, :math:`T`.
state_intercept : array
The intercept for the transition equation, :math:`c`.
selection : array
The selection matrix, :math:`R`.
state_cov : array
The covariance matrix for the state equation :math:`Q`.
filtered_state : array
The filtered state vector at each time period.
filtered_state_cov : array
The filtered state covariance matrix at each time period.
predicted_state : array
The predicted state vector at each time period.
predicted_state_cov : array
The predicted state covariance matrix at each time period.
forecasts : array
The one-step-ahead forecasts of observations at each time period.
forecasts_error : array
The forecast errors at each time period.
forecasts_error_cov : array
The forecast error covariance matrices at each time period.
Notes
-----
The provided ranges must be conformable, meaning that it must be that
`end - start == nstatic + ndynamic + nforecast`.
This class is essentially a view to the FilterResults object, but
returning the appropriate ranges for everything.
"""
representation_attributes = [
'endog', 'design', 'design', 'obs_intercept',
'obs_cov', 'transition', 'state_intercept', 'selection',
'state_cov'
]
filter_attributes = [
'filtered_state', 'filtered_state_cov',
'predicted_state', 'predicted_state_cov',
'forecasts', 'forecasts_error', 'forecasts_error_cov'
]
def __init__(self, results, start, end, nstatic, ndynamic, nforecast):
# Save the filter results object
self.results = results
# Save prediction ranges
self.npredictions = start - end
self.start = start
self.end = end
self.nstatic = nstatic
self.ndynamic = ndynamic
self.nforecast = nforecast
def __getattr__(self, attr):
"""
Provide access to the representation and filtered output in the
appropriate range (`start` - `end`).
"""
# Prevent infinite recursive lookups
if attr[0] == '_':
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, attr))
_attr = '_' + attr
# Cache the attribute
if not hasattr(self, _attr):
if attr == 'endog' or attr in self.filter_attributes:
# Get a copy
value = getattr(self.results, attr).copy()
# Subset to the correct time frame
value = value[..., self.start:self.end]
elif attr in self.representation_attributes:
value = getattr(self.results, attr).copy()
# If a time-invariant matrix, return it. Otherwise, subset to
# the correct period.
if value.shape[-1] == 1:
value = value[..., 0]
else:
value = value[..., self.start:self.end]
else:
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, attr))
setattr(self, _attr, value)
return getattr(self, _attr)
|
bsd-3-clause
| 3,883,965,358,522,059,000
| 41.340876
| 79
| 0.594628
| false
| 4.440953
| false
| false
| false
|
rjw57/cubbie
|
migrations/versions/316bb58e84f_add_user_identities.py
|
1
|
1110
|
"""add user_identities
Revision ID: 316bb58e84f
Revises: 38c8ec357e0
Create Date: 2015-03-11 01:40:12.157458
"""
# revision identifiers, used by Alembic.
revision = '316bb58e84f'
down_revision = '38c8ec357e0'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('user_identities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('provider', sa.Text(), nullable=False),
sa.Column('provider_user_id', sa.Text(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_user_identities_provider_provider_id', 'user_identities', ['provider', 'provider_user_id'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('idx_user_identities_provider_provider_id', table_name='user_identities')
op.drop_table('user_identities')
### end Alembic commands ###
|
mit
| 7,870,348,524,913,182,000
| 30.714286
| 130
| 0.684685
| false
| 3.313433
| false
| false
| false
|
cwgreene/Nanostructure-Simulator
|
utils/plot_trajectories.py
|
1
|
1140
|
import os
import sys
import re
import pylab
def parse_trajectory_line(line):
trajectory = []
for x,y in re.findall("\(([0-9.]+), ([0-9.]+)\)",line):
trajectory.append((float(x),float(y)))
return trajectory
def generate_trajectories(file):
#get rid fo two first lines
file.readline()
file.readline()
#parse each line
for line in file:
yield parse_trajectory_line(line)
def open_trajectory_file(n):
for filename in os.listdir("results"):
if re.match(str(n)+"traj",filename):
return open("results/"+filename)
raise "File not found"
def display_trajectories(n):
input =""
file = open_trajectory_file(n)
trajectory_gen = generate_trajectories(file)
trajectory = trajectory_gen.next()
interactive = True
i = 0
while input != 'q':
first = map(lambda x: x[0],trajectory)
second = map(lambda x: x[1],trajectory)
pylab.plot(first,second)
if interactive:
input = raw_input()
if input == "go":
i += 1
interactive=False
if i %100 == 0:
print i
raw_input()
try:
trajectory=trajectory_gen.next()
except:
print "Done"
break
if __name__=="__main__":
display_trajectories(sys.argv[1])
|
mit
| -5,705,216,141,029,246,000
| 20.923077
| 56
| 0.669298
| false
| 2.900763
| false
| false
| false
|
jonathansick/androcmd
|
scripts/phat_baseline_test.py
|
1
|
3612
|
#!/usr/bin/env python
# encoding: utf-8
"""
Grid computation of dust attenuation for old vs. young stellar populations.
2015-05-12 - Created by Jonathan Sick
"""
import argparse
from androcmd.phatpipeline import PhatCatalog
from androcmd.baselineexp import SolarZPipeline, ThreeZPipeline
def main():
args = parse_args()
if args.pipeline == 'solarz':
# Use the single-Z solar pipeline
Pipeline = SolarZPipeline
elif args.pipeline == 'threez':
# Use the three-metallicity track pipeline
Pipeline = ThreeZPipeline
isoc = dict(isoc_kind='parsec_CAF09_v1.2S',
photsys_version='yang')
pipeline = Pipeline(brick=23,
root_dir=args.model_name,
isoc_args=isoc)
if args.fit is not None:
dataset = PhatCatalog(args.brick)
pipeline.fit(args.fit, [args.fit], dataset)
if args.plot_hess is not None:
from androcmd.baselineexp import plot_fit_hess_grid
dataset = PhatCatalog(args.brick)
plot_fit_hess_grid(args.plot_hess, pipeline, dataset)
if args.plot_diff is not None:
from androcmd.baselineexp import plot_diff_hess_grid
dataset = PhatCatalog(args.brick)
plot_diff_hess_grid(args.plot_diff, pipeline, dataset)
if args.plot_sfh is not None:
from androcmd.baselineexp import sfh_comparison_plot
dataset = PhatCatalog(args.brick)
sfh_comparison_plot(args.plot_sfh, pipeline, dataset)
if args.plot_zsfh is not None:
from androcmd.baselineexp import plot_sfh_metallicity_trends
dataset = PhatCatalog(args.brick)
for fit_key in args.plot_zsfh:
plot_path = "{model}_b{brick:d}_zsfh_{key}".format(
model=args.model_name, brick=args.brick, key=fit_key)
plot_sfh_metallicity_trends(plot_path, pipeline, dataset, fit_key)
if args.chi_table is not None:
from androcmd.baselineexp import tabulate_fit_chi
dataset = PhatCatalog(args.brick)
tabulate_fit_chi(args.chi_table, pipeline, dataset)
if args.plot_isoc is not None:
from androcmd.baselineexp import plot_isocs, plot_isocs_lewis
dataset = PhatCatalog(args.brick)
plot_isocs(args.plot_isoc, pipeline, dataset)
plot_isocs_lewis(args.plot_isoc + '_lewis', pipeline, dataset)
if args.plot_lock is not None:
from androcmd.baselineexp import plot_lockfile
plot_lockfile(args.plot_lock, pipeline)
def parse_args():
parser = argparse.ArgumentParser(
description="Model a brick with differential old/young dust.")
parser.add_argument('model_name')
parser.add_argument('brick', type=int)
parser.add_argument('--fit',
choices=['lewis', 'acs_rgb', 'acs_all',
'oir_all', 'ir_rgb'],
default=None)
parser.add_argument('--pipeline',
choices=['solarz', 'threez'],
default='solarz')
parser.add_argument('--plot-hess', default=None)
parser.add_argument('--plot-diff', default=None)
parser.add_argument('--plot-sfh', default=None)
parser.add_argument('--chi-table', default=None)
parser.add_argument('--plot-zsfh', nargs='*', default=None,
choices=['lewis', 'acs_rgb', 'acs_all',
'oir_all', 'ir_rgb'])
parser.add_argument('--plot-isoc', default=None)
parser.add_argument('--plot-lock', default=None)
return parser.parse_args()
if __name__ == '__main__':
main()
|
mit
| -6,061,255,170,724,512,000
| 35.484848
| 78
| 0.623477
| false
| 3.544652
| false
| false
| false
|
lhellebr/spacewalk
|
backend/server/rhnLib.py
|
1
|
8211
|
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import hashlib
import string
import base64
import posixpath
from spacewalk.common.rhnLib import parseRPMName
from spacewalk.common.rhnLog import log_debug
from spacewalk.common.rhnException import rhnFault
# architecture work
from rhnMapping import check_package_arch
def computeSignature(*fields):
# Init the hash
m = hashlib.new('sha256')
for i in fields:
# use str(i) since some of the fields may be non-string
m.update(str(i))
return base64.encodestring(m.digest()).rstrip()
# 'n_n-n-v.v.v-r_r.r:e.ARCH.rpm' ---> [n,v,r,e,a]
def parseRPMFilename(pkgFilename):
"""
IN: Package Name: xxx-yyy-ver.ver.ver-rel.rel_rel:e.ARCH.rpm (string)
Understood rules:
o Name can have nearly any char, but end in a - (well seperated by).
Any character; may include - as well.
o Version cannot have a -, but ends in one.
o Release should be an actual number, and can't have any -'s.
o Release can include the Epoch, e.g.: 2:4 (4 is the epoch)
o Epoch: Can include anything except a - and the : seperator???
XXX: Is epoch info above correct?
OUT: [n,e,v,r, arch].
"""
if type(pkgFilename) != type(''):
raise rhnFault(21, str(pkgFilename)) # Invalid arg.
pkgFilename = os.path.basename(pkgFilename)
# Check that this is a package NAME (with arch.rpm) and strip
# that crap off.
pkg = string.split(pkgFilename, '.')
# 'rpm' at end?
if string.lower(pkg[-1]) not in ['rpm', 'deb']:
raise rhnFault(21, 'neither an rpm nor a deb package name: %s' % pkgFilename)
# Valid architecture next?
if check_package_arch(pkg[-2]) is None:
raise rhnFault(21, 'Incompatible architecture found: %s' % pkg[-2])
_arch = pkg[-2]
# Nuke that arch.rpm.
pkg = string.join(pkg[:-2], '.')
ret = list(parseRPMName(pkg))
if ret:
ret.append(_arch)
return ret
# XXX TBD where to place this function - it has to be accessible from several
# places
def normalize_server_arch(arch):
log_debug(4, 'server arch', arch)
if arch is None:
return ''
arch = str(arch)
if '-' in arch:
# Already normalized
return arch
# Fix the arch if need be
suffix = '-redhat-linux'
arch = arch + suffix
return arch
class InvalidAction(Exception):
""" An error class to signal when we can not handle an action """
pass
class EmptyAction(Exception):
""" An error class that signals that we encountered an internal error
trying to handle an action through no fault of the client
"""
pass
class ShadowAction(Exception):
""" An error class for actions that should not get to the client """
pass
def transpose_to_hash(arr, column_names):
""" Handy function to transpose an array from row-based to column-based,
with named columns.
"""
result = []
for c in column_names:
result.append([])
colnum = len(column_names)
for r in arr:
if len(r) != colnum:
raise Exception(
"Mismatching number of columns: expected %s, got %s; %s" % (
colnum, len(r), r))
for i in range(len(r)):
result[i].append(r[i])
# Now build the hash labeled with the column names
rh = {}
for i in range(len(column_names)):
rh[column_names[i]] = result[i]
return rh
def get_package_path(nevra, org_id, source=0, prepend="", omit_epoch=None,
package_type='rpm', checksum_type=None, checksum=None):
""" Computes a package path, optionally prepending a prefix
The path will look like
<prefix>/<org_id>/checksum[:3]/n/e:v-r/a/checksum/n-v-r.a.rpm if not omit_epoch
<prefix>/<org_id>/checksum[:3]/n/v-r/a/checksum/n-v-r.a.rpm if omit_epoch
"""
name, epoch, version, release, pkgarch = nevra
# dirarch and pkgarch are special-cased for source rpms
if source:
dirarch = 'SRPMS'
else:
dirarch = pkgarch
if org_id in ['', None]:
org = "NULL"
else:
org = org_id
if not omit_epoch and epoch not in [None, '']:
version = str(epoch) + ':' + version
# normpath sanitizes the path (removing duplicated / and such)
template = os.path.normpath(prepend +
"/%s/%s/%s/%s-%s/%s/%s/%s-%s-%s.%s.%s")
return template % (org, checksum[:3], name, version, release, dirarch, checksum,
name, nevra[2], release, pkgarch, package_type)
# bug #161989
# It seems that our software was written specifically for rpms in far too many
# ways. Here's a little bit of a hack function that will return the package path
# (as in from get_package_path) but without the filename appended.
# This enables us to append an arbitrary file name that is not restricted to the
# form: name-version-release.arch.type
def get_package_path_without_package_name(nevra, org_id, prepend="",
checksum_type=None, checksum=None):
"""return a package path without the package name appended"""
return os.path.dirname(get_package_path(nevra, org_id, prepend=prepend,
checksum_type=checksum_type, checksum=checksum))
class CallableObj:
""" Generic callable object """
def __init__(self, name, func):
self.func = func
self.name = name
def __call__(self, *args, **kwargs):
return self.func(self.name, *args, **kwargs)
def make_evr(nvre, source=False):
""" IN: 'e:name-version-release' or 'name-version-release:e'
OUT: {'name':name, 'version':version, 'release':release, 'epoch':epoch }
"""
if ":" in nvre:
nvr, epoch = nvre.rsplit(":", 1)
if "-" in epoch:
nvr, epoch = epoch, nvr
else:
nvr, epoch = nvre, ""
nvr_parts = nvr.rsplit("-", 2)
if len(nvr_parts) != 3:
raise rhnFault(err_code=21, err_text="NVRE is missing name, version, or release.")
result = dict(zip(["name", "version", "release"], nvr_parts))
result["epoch"] = epoch
if source and result["release"].endswith(".src"):
result["release"] = result["release"][:-4]
return result
def _is_secure_path(path):
path = posixpath.normpath(path)
return not (path.startswith('/') or path.startswith('../'))
def get_crash_path(org_id, system_id, crash):
"""For a given org_id, system_id and crash, return relative path to a crash directory."""
path = os.path.join('systems', org_id, system_id, 'crashes', crash)
if _is_secure_path(path):
return path
else:
return None
def get_crashfile_path(org_id, system_id, crash, filename):
"""For a given org_id, system_id, crash and filename, return relative path to a crash file."""
path = os.path.join(get_crash_path(org_id, system_id, crash), filename)
if _is_secure_path(path):
return path
else:
return None
def get_action_path(org_id, system_id, action_id):
"""For a given org_id, system_id, and action_id, return relative path to a store directory."""
path = os.path.join('systems', str(org_id), str(system_id), 'actions', str(action_id))
if _is_secure_path(path):
return path
def get_actionfile_path(org_id, system_id, action_id, filename):
"""For a given org_id, system_id, action_id, and file, return relative path to a file."""
path = os.path.join(get_action_path(org_id, system_id, action_id), str(filename))
if _is_secure_path(path):
return path
|
gpl-2.0
| 8,887,653,560,759,876,000
| 30.580769
| 98
| 0.629765
| false
| 3.543807
| false
| false
| false
|
m-r-hunt/invaders
|
enemies.py
|
1
|
6646
|
# Invaders
# Copyright (C) 2013 Maximilian Hunt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os, random, pygame, projectiles, score_counter
class EnemySprite(pygame.sprite.Sprite):
# Class for one enemy invader.
def __init__(self, image, position, bullet_group):
# image: relative path to an image pygame can load
# position: (x, y) coordinates on screen
# bullet_group: pygame.sprite.Group to put fired bullets in
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(image)
self.position = position
self.rect = self.image.get_rect()
self.rect.center = position
self.bullet_group = bullet_group
def update(self, dv, score, collisions):
# Update this enemy. Should be called once per frame.
# dv: (x, y) vector for movement this frame
# score: a Score to increment on death
# collisions: a dictionary of collisions, possibly containing this object
# Handle any collisions given
if self in collisions:
death = False
for bullet in collisions[self]:
if (bullet.origin != self):
bullet.kill()
death = True
if (death == True):
score.increment()
self.kill()
# Update position
self.position = (self.position[0] + dv[0], self.position[1] + dv[1])
self.rect.center = self.position
def y(self):
# Return height (y coordinate).
return self.position[1]
def fire(self):
# (Possibly) fire a bullet down.
if (random.randrange(100) < 2):
bounds = (0-100, 800+100, 0-100, 600+100)
bullet = projectiles.Bullet(os.path.join("Resources", "Enemy Bullet.png"), self.position, (0, 5), bounds, self)
self.bullet_group.add(bullet)
class EnemyColumn(pygame.sprite.Group):
# Class for one column in a formation of enemies.
# Exists so we can easily fire only the lowest enemy in each column
# Remembers its own x coordinate, everything else happens inside the actual enemies
def __init__(self, x_position):
# x_position: integer x coordinate
pygame.sprite.Group.__init__(self)
self.x_position = x_position
def update(self, dv, score, collisions):
# Update this column. Should be called once per frame.
# dv: (x, y) vector for movement this frame
# score: a Score to pass to contained EnemySprites
# collisions: a dictionary of collisions to pass to contained EnemySprites
# Return (x, y), x of this column and y of lowest contained Sprite.
self.x_position += dv[0]
# Update contained sprites
for i in self.sprites():
i.update(dv, score, collisions)
# Compute biggest y, ask that EnemySprite to fire.
max_y = 0
if (len(self) != 0):
for i in self.sprites():
if (i.y() > max_y):
max_y = i.y()
bottom_enemy = i
bottom_enemy.fire()
return self.x_position, max_y
class EnemyFormation(pygame.sprite.Group):
# Class for a whole formation of enemies.
# Contains both EnemyColumns and EnemySprites
# Magic numbers: Base speed stepped horizontally or vertically each frame.
H_STEP = 2
V_STEP = 10
def __init__(self, topleft, layout, bounds, bullet_group):
pygame.sprite.Group.__init__(self)
self.columns = []
columns, rows = layout
# Generate all the enemies and columns.
for i in range(0, columns):
column_x = topleft[0] + i*64
enemy_column = EnemyColumn(topleft[0] + i*64)
for j in range(0, rows):
new_enemy = EnemySprite(os.path.join("resources", "Enemy.png"), (column_x, topleft[1] + j*64), bullet_group)
enemy_column.add(new_enemy)
self.add(new_enemy)
self.columns.append(enemy_column)
# Direction: +1 for right, -1 for left (i.e. +-ve x direction)
self.current_direction = +1
self.left_bound, self.right_bound, self.bottom_bound = bounds
self.total = columns * rows
def update(self, score, collisions):
# Update this formation. Should be called once per frame.
# score: a Score to pass to contained EnemyColumns
# collisions: a dictionary of collisions to pass to contained EnemyColumns
# Returns (bool, bool). First is True if this formation is still in a good state, False if it needs resetting.
# Second is True if this is because it's now empty, False if it has reached the bottom of the screen.
direction_change = too_low = False
# Compute factor to move faster when we have fewer remaining members.
scale = int(float(self.total)/float(len(self)))
# Update columns
for i in self.columns:
x, y = i.update((scale*self.current_direction*self.H_STEP, 0), score, collisions)
# Remove empty columns
if (len(i.sprites()) == 0):
self.columns.remove(i)
# Notice if we've gone too low
elif (y > self.bottom_bound):
too_low = True
# Remember to change direction when we reach screen edges
elif (x < self.left_bound or x > self.right_bound):
direction_change = True
# Indicate we're empty
if (len(self.columns) == 0):
return False, True
# Indicate we reached the bottom of the screen.
elif too_low:
return False, False
# Drop down and change direction
elif direction_change:
self.current_direction *= -1
for i in self.columns:
i.update((scale*self.current_direction*self.H_STEP, self.V_STEP), score, [])
# If we made it here, everything's fine.
return True, True
|
gpl-2.0
| -4,189,515,760,736,269,300
| 41.06962
| 124
| 0.614354
| false
| 3.941874
| false
| false
| false
|
2Minutes/davos-dev
|
davos/core/utils.py
|
1
|
7692
|
import re
import sys
import os
import os.path as osp
from fnmatch import fnmatch
from pytd.gui.dialogs import promptDialog
from pytd.util.logutils import logMsg
from pytd.util.sysutils import importModule, toStr, inDevMode, getCaller
from pytd.util.fsutils import pathSplitDirs, pathResolve, pathNorm, pathJoin
from pytd.util.fsutils import jsonRead, jsonWrite, isDirStat, parseDirContent
from pytd.util.strutils import padded
_VERS_SPLIT_REXP = re.compile(r'-(v[0-9]+)')
def getConfigModule(sProjectName):
try:
sConfPkg = os.environ.get("DAVOS_CONF_PACKAGE", "davos.config")
sConfigModule = sConfPkg + '.' + sProjectName
modobj = importModule(sConfigModule)
except ImportError:
raise ImportError("No config module named '{}'".format(sConfigModule))
return modobj
def versionFromName(sFileName):
vers = _VERS_SPLIT_REXP.findall(sFileName)
return int(vers[-1].strip('v')) if vers else None
def mkVersionSuffix(v):
if not isinstance(v, int):
raise TypeError("argument must be of type <int>. Got {}.".format(type(v)))
return "".join(('-v', padded(v)))
def findVersionFields(s):
return _VERS_SPLIT_REXP.findall(s)
def promptForComment(**kwargs):
sComment = ""
bOk = False
result = promptDialog(title='Please...',
message='Leave a comment: ',
button=['OK', 'Cancel'],
defaultButton='OK',
cancelButton='Cancel',
dismissString='Cancel',
scrollableField=True,
**kwargs)
if result == 'Cancel':
logMsg("Cancelled !" , warning=True)
elif result == 'OK':
sComment = promptDialog(query=True, text=True)
bOk = True
return sComment, bOk
def projectNameFromPath(p):
sConfPkg = os.environ.get("DAVOS_CONF_PACKAGE", "davos.config")
pkg = importModule(sConfPkg)
sPkgDirPath = os.path.dirname(pkg.__file__)
sDirList = pathSplitDirs(p)
for sFilename in os.listdir(sPkgDirPath):
bIgnored = False
for sPatrn in ("__*", ".*", "*.pyc"):
if fnmatch(sFilename, sPatrn):
bIgnored = True
break
if bIgnored:
continue
sModName = os.path.splitext(sFilename)[0]
m = importModule(sConfPkg + '.' + sModName)
sProjDir = m.project.dir_name
if sProjDir in sDirList:
return sModName
return ""
def splitStep(sTaskName):
return sTaskName.rsplit("|", 1) if ("|" in sTaskName) else ("", sTaskName)
def damasServerPort():
return os.getenv("DAMAS_DEV_PORT", "8443") if inDevMode() else "8443"
def loadPrefs():
global DAVOS_PREFS
try:
p = pathResolve(r"%USERPROFILE%\davos_prefs.json")
DAVOS_PREFS = jsonRead(p)
except EnvironmentError:
DAVOS_PREFS = {}
return DAVOS_PREFS
def savePrefs():
global DAVOS_PREFS
if DAVOS_PREFS:
p = pathResolve(r"%USERPROFILE%\davos_prefs.json")
jsonWrite(p, DAVOS_PREFS)
def setPref(in_sKey, value):
global DAVOS_PREFS
if "|" not in in_sKey:
DAVOS_PREFS[in_sKey] = value
return
sKeyList = in_sKey.split("|")
iLastKey = len(sKeyList) - 1
currPrefs = DAVOS_PREFS
sPrevKey = ""
prevPrefs = None
for i, sKey in enumerate(sKeyList):
if not isinstance(currPrefs, dict):
prevPrefs[sPrevKey] = {}
currPrefs = prevPrefs[sPrevKey]
if i == iLastKey:
currPrefs[sKey] = value
return
if sKey not in currPrefs:
currPrefs[sKey] = {}
prevPrefs = currPrefs
sPrevKey = sKey
currPrefs = currPrefs[sKey]
def getPref(in_sKey, default=None):
global DAVOS_PREFS
if "|" not in in_sKey:
return DAVOS_PREFS.get(in_sKey, default)
sKeyList = in_sKey.split("|")
iLastKey = len(sKeyList) - 1
currPrefs = DAVOS_PREFS
for i, sKey in enumerate(sKeyList):
if not isinstance(currPrefs, dict):
k = "|".join(sKeyList[:(i + 1)])
logMsg("Not a pref dictionary: '{}'.".format(k), warning=True)
return default
if i == iLastKey:
return currPrefs.get(sKey, default)
if sKey in currPrefs:
currPrefs = currPrefs[sKey]
else:
logMsg("No such pref: '{}'.".format(in_sKey), warning=True)
return default
_ICON_DIR_PATH = ""
def mkIconPath(sRelPath):
global _ICON_DIR_PATH
if (not _ICON_DIR_PATH) or (not osp.exists(_ICON_DIR_PATH)):
p = sys.modules["davos"].__file__
p = osp.abspath(osp.join(osp.dirname(p), "..", "resources", "icon"))
_ICON_DIR_PATH = p
return pathJoin(_ICON_DIR_PATH, sRelPath)
def writePackContent(sPackDirPath, dirStat=None):
sPackDirPath = pathNorm(sPackDirPath)
if not dirStat:
dirStat = os.stat(sPackDirPath)
sJsonPath = mkPackFilePath(sPackDirPath)
iMtime = 0
if not osp.exists(sJsonPath):
iMtime = dirStat.st_mtime
iAtime = dirStat.st_atime
try:
open(sJsonPath, 'a+b').close() # create json file so it is listed by parseDirContent()
dirContent = parseDirContent(sPackDirPath)
jsonWrite(sJsonPath, dirContent, sort_keys=True)
finally:
if iMtime:
os.utime(sPackDirPath, (iAtime, iMtime))
return dirContent
def readPackContent(sPackDirPath, fail=True):
try:
dirContent = jsonRead(mkPackFilePath(sPackDirPath))
except EnvironmentError as e:
if fail:
raise
logMsg(toStr(e), warning=True)
dirContent = parseDirContent(sPackDirPath)
return dirContent
def mkPackFilePath(sPackDirPath):
return pathJoin(sPackDirPath, "_package.json")
_ISPACK_REXP = re.compile(r".+_pkg[^/\w].+", re.I)
def assertPack(p, dirStat=None):
if not dirStat:
dirStat = os.stat(pathNorm(p))
if isPack(p, fail=True, dirStat=dirStat):
return dirStat
return None
def belowPack(p):
p = pathNorm(p)
if os.environ["IN_SEB_MODE"]:
return True if _belowPack(p) else _belowOldPack(p)
else:
return _belowPack(p)
def isPack(p, fail=False, dirStat=None):
p = pathNorm(p)
if os.environ["IN_SEB_MODE"]:
bPackPath = True if _isPack(p) else _isOldPack(p)
else:
bPackPath = _isPack(p)
if not bPackPath:
if fail:
sMsg = ("Directory NOT a package (should start with 'pkg_' or 'lyr_'): '{}'."
.format(osp.basename(p)))
raise EnvironmentError(sMsg)
else:
return False
if dirStat and not isDirStat(dirStat):
if fail:
raise EnvironmentError("Package path NOT a directory: '{}'".format(p))
else:
return False
return True
def _belowPack(p):
p = osp.dirname(p)
for sDirName in pathSplitDirs(p):
if _isPack(sDirName):
return True
return False
def _isPack(p):
sBaseName = osp.basename(p) if "/" in p else p
if "_" not in sBaseName:
return False
sPrefix = sBaseName.split("_", 1)[0]
if not sPrefix:
return False
return (sPrefix.lower() + "_") in ("pkg_", "lyr_")
def _belowOldPack(p):
p = osp.dirname(p)
if "_pkg/" in p.lower():
return True
if _ISPACK_REXP.match(p):
return True
return False
def _isOldPack(p):
sName = osp.basename(p)
if sName.lower().endswith("_pkg"):
return True
if _ISPACK_REXP.match(sName):
return True
return False
|
gpl-3.0
| -7,329,362,235,480,504,000
| 23.341772
| 94
| 0.597634
| false
| 3.366302
| true
| false
| false
|
solarsail/aerosol-tools
|
clustatlib/clucsv.py
|
1
|
3752
|
import numpy as np
import os
import os.path
class csvbuilder:
def __init__(self, cs):
self.cs = cs
if not os.path.isdir('csv'):
os.mkdir('csv')
def month_type_csv(self, site = None):
label = 'all' if site == None else site
values, percentages = self.cs.month_type_stat(site)
header = ",".join(["type{},%".format(t) for t in range(1, len(values)+1)])
header = "month," + header
all = []
for i in range(len(values)):
all.append(values[i])
all.append(percentages[i])
mat = np.matrix(all)
mat = mat.transpose().tolist()
content = []
for i in range(12):
content.append("%d,%s" % (i+1, ','.join([str(field) for field in mat[i]])))
content = '\n'.join(content)
with open("csv/month_type_%s.csv" % label, 'w') as outfile:
outfile.write('\n'.join((header, content)))
def year_type_csv(self, start_year, end_year, site = None):
label = 'all' if site == None else site
values, percentages = self.cs.year_type_stat(start_year, end_year, site)
header = ",".join(["type{},%".format(t) for t in range(1, len(values)+1)])
header = "year," + header
all = []
for i in range(len(values)):
all.append(values[i])
all.append(percentages[i])
mat = np.matrix(all)
mat = mat.transpose().tolist()
content = []
for i in range(start_year, end_year+1):
content.append("%d,%s" % (i, ','.join([str(field) for field in mat[i-start_year]])))
content = '\n'.join(content)
with open("csv/year_type_%s.csv" % label, 'w') as outfile:
outfile.write('\n'.join((header, content)))
def type_csv(self):
header = "type,count,percentage%"
all = self.cs.type_stat()
content = '\n'.join([','.join([str(field) for field in row]) for row in all])
with open("csv/type_count.csv", 'w') as outfile:
outfile.write('\n'.join((header, content)))
def site_type_csv(self):
all, types = self.cs.site_type_stat()
header = ",".join(["type{},%".format(t) for t in range(1, types+1)])
header = "site," + header
content = '\n'.join([','.join([str(field) for field in row]) for row in all])
with open("csv/site_type_count.csv", 'w') as outfile:
outfile.write('\n'.join((header, content)))
def type_stat_csv(self):
header = "type,refr440,refr675,refr870,refr1020,refi440,refi675,refi870,refi1020,volmedianradf,stddevf,volconf,volmedianradc,stddevc,volconc,ssa675,ssa870,ssa1020,asy440,asy675,asy870,sphericity"
list1 = self.cs.type_means()
list2 = self.cs.type_stddev()
l = []
for i in range(len(list1)):
l.append(list1[i])
stddevline = list(list2[i])
stddevline[0] = "stddev"
l.append(stddevline)
content = '\n'.join([','.join([str(field) for field in row]) for row in l])
with open("csv/type_stat.csv", 'w') as outfile:
outfile.write('\n'.join((header, content)))
def distances_csv(self):
clus, dist_mat = self.cs.all_distances()
header = "," + ",".join([str(cid) for cid in clus])
lines = []
first = 1
cur = 0
for clu in clus:
lines.append(str(clu) + ',' * first + ','.join(str(d) for d in dist_mat[cur:cur+len(clus)-first+1]))
cur += len(clus) - first + 1
first += 1
content = '\n'.join(lines)
with open("csv/distance_stat.csv", 'w') as outfile:
outfile.write('\n'.join((header, content)))
|
gpl-3.0
| -5,397,643,248,379,671,000
| 41.647727
| 203
| 0.539179
| false
| 3.398551
| false
| false
| false
|
hawkeyexp/plugin.video.netflix
|
resources/lib/services/nfsession/session/base.py
|
1
|
2055
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Copyright (C) 2019 Stefano Gottardo - @CastagnaIT
Initialize the netflix session
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
import resources.lib.common as common
from resources.lib.database.db_utils import TABLE_SESSION
from resources.lib.globals import G
from resources.lib.utils.logging import LOG
class SessionBase(object):
"""Initialize the netflix session"""
session = None
"""The requests.session object to handle communication to Netflix"""
verify_ssl = True
"""Use SSL verification when performing requests"""
# Functions from derived classes to allow perform particular operations in parent classes
external_func_activate_profile = None # (set by nfsession_op.py)
def __init__(self):
self.verify_ssl = bool(G.ADDON.getSettingBool('ssl_verification'))
self._init_session()
def _init_session(self):
"""Initialize the session to use for all future connections"""
try:
self.session.close()
LOG.info('Session closed')
except AttributeError:
pass
from requests import session
self.session = session()
self.session.max_redirects = 10 # Too much redirects should means some problem
self.session.headers.update({
'User-Agent': common.get_user_agent(enable_android_mediaflag_fix=True),
'Accept-Encoding': 'gzip, deflate, br',
'Host': 'www.netflix.com'
})
LOG.info('Initialized new session')
@property
def auth_url(self):
"""Access rights to make HTTP requests on an endpoint"""
return G.LOCAL_DB.get_value('auth_url', table=TABLE_SESSION)
@auth_url.setter
def auth_url(self, value):
G.LOCAL_DB.set_value('auth_url', value, TABLE_SESSION)
|
mit
| 8,894,260,233,918,521,000
| 33.830508
| 93
| 0.66618
| false
| 4.021526
| false
| false
| false
|
playerNaN/NaNPyGameEngine
|
engine.py
|
1
|
5921
|
import pygame
import sys
import os
from collections import namedtuple
import time
import resourcemanager
ColorList = namedtuple("ColorList", "black white red green blue")
colors = ColorList((0,0,0),(0xFF,0xFF,0xFF),(0xFF,0,0),(0,0xFF,0),(0,0,0xFF))
PyListener = namedtuple("PyListener", "condition effect")
PyEventListener = namedtuple("PyEventListener","events condition effect")
class Pyengine:
def __init__(self,size):
pygame.init()
self.__size = size
self.__fps = 60
self.__bg = colors.white
self.__fg = colors.black
self.__on_update = []
self.__on_draw = []
self.__keys_down = {}
self.__listeners = []
self.__event_handlers = []
self.__mouse_down = {}
self.__display = None
self.__screen_centerX = size[0]/2
self.__scaleX = 1.0
self.__scaleY = 1.0
self.__screen_centerY = size[1]/2
self.__clock = pygame.time.Clock()
self.__buffer_surface = None
self.__resource_manager = resourcemanager.ResourceManager()
self.__animators = {}
def add_animator(self,name,animator):
self.__animators[name] = animator
def remove_animator(self,name):
del self.__animators[name]
def get_animator(self,name):
return self.__animators[name]
def set_scale_x(self,x):
self.__scaleX = x
def get_scale_x(self):
return self.__scaleX
def set_scale_y(self,y):
self.__scaleY = y
def get_scale_y(self):
return self.__scaleY
def set_scale(self,s):
self.__scaleX = s[0]
self.__scaleY = s[1]
def get_scale(self):
return (self.__scaleX,self.__scaleY)
def set_fg(self,fg):
self.__fg = fg
def get_fg(self):
return self.__fg
def set_bg(self,bg):
self.__bg = bg
def get_bg(self):
return self.__bg
def get_display(self):
return self.__display()
def set_screen_center_x(self,x):
self.__screen_centerX = x
def get_screen_center_x(self):
return self.__screen_centerX
def set_screen_center_y(self,y):
self.__screen_centerY = y
def get_screen_center_y(self):
return self.__screen_centerY
def set_screen_center(self,pos):
self.__screen_centerX = pos[0]
self.__screen_centerY = pos[1]
def get_screen_center(self):
return (self.__screen_centerX,self.__screen_centerY)
def get_buffer_surface(self):
return self.__buffer_surface
def get_resource_manager(self):
return self.__resource_manager
def update_all_animators(self):
ms = self.__clock.get_time()
for i in self.__animators:
self.__animators[i].update(ms)
def draw_all_animators(self):
for i in self.__animators:
self.draw_image(self.__animators[i].get_current_image(),self.__animators[i].get_position())
def handle_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.exit()
elif event.type == pygame.KEYDOWN:
self.__keys_down[event.key] = True
elif event.type == pygame.KEYUP:
self.__keys_down[event.key] = False
elif event.type == pygame.MOUSEBUTTONDOWN:
self.__mouse_down = True
elif event.type == pygame.MOUSEBUTTONUP:
self.__mouse_down = False
for handler in self.__event_handlers:
if event.type in handler.events and handler.condition(self,event):
handler.effect(self,event)
def draw_image(self,name,pos):
self.__buffer_surface.blit(self.__resource_manager.get_image(name),pos)
def is_key_down(self,key):
if not key in self.__keys_down:
return False
return self.__keys_down[key]
def is_mouse_button_down(self,button):
if not button in self.__mouse_down:
return False
return self.__mouse_down[button]
def run(self):
screen = pygame.display.set_mode(self.__size)
self.__display = screen
oldTime = time.time()
while True:
spf = 1.0 / self.__fps
self.handle_events()
self.update()
self.draw(screen)
self.__clock.tick(self.__fps)
def exit(self):
pygame.display.quit()
pygame.quit()
sys.exit()
def update(self):
self.update_all_animators()
for l in self.__listeners:
if l.condition(self):
l.effect(self)
def draw(self,display):
self.__buffer_surface = pygame.Surface(display.get_size())
display.fill(colors.red)
self.__buffer_surface.fill(self.__bg)
for od in self.__on_draw:
od(self,self.__buffer_surface)
self.draw_all_animators()
src_size = (self.__size[0]/self.__scaleX,self.__size[1]/self.__scaleY)
top = self.__screen_centerY - src_size[1] / 2
left = self.__screen_centerX - src_size[0] / 2
cropped = pygame.Surface(src_size)
cropped.blit(self.__buffer_surface,(0,0),(left,top,src_size[0],src_size[1]))
cropped = pygame.transform.scale(cropped,self.__size)
display.blit(cropped,(0,0))
pygame.display.update((0,0,self.__size[0],self.__size[1]))
def add_draw_listener(self,f):
self.__on_draw.append(f)
def add_listener(self,condition,effect):
self.__listeners.append(PyListener(condition,effect))
def add_on_update(self,effect):
self.__add_listener(lambda s:True,effect)
def add_event_listener(self,events,condition,effect):
self.__event_handlers.append(PyEventListener(events,condition,effect))
def set_fps(self,fps):
self.__fps = fps
def get_fps(self):
return self.__fps
|
unlicense
| -5,850,025,484,649,098,000
| 32.647727
| 103
| 0.575578
| false
| 3.657196
| false
| false
| false
|
muminoff/savollar
|
savollar/pipelines.py
|
1
|
2093
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don"t forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exceptions import DropItem
from scrapy.conf import settings
from scrapy import log
from elasticsearch import Elasticsearch
from uuid import uuid1
from savollar.models import SavolModel
class ElasticSearchIndexPipeline(object):
def process_item(self, item, spider):
es = Elasticsearch([
{"host": settings["ELASTICSEARCH_HOST"]},
])
valid = True
for data in item:
if not data:
raise DropItem("Missing %s of item from %s" %(data, item["link"]))
if valid:
es.index(
index=settings["ELASTICSEARCH_INDEX"],
doc_type="info",
id=str(uuid1()),
body=dict(item)
)
log.msg("Item indexed to ElasticSearch database %s:%s" %
(settings["ELASTICSEARCH_HOST"], settings["ELASTICSEARCH_PORT"]),
level=log.DEBUG, spider=spider)
return item
class CassandraExportPipleline(object):
def process_item(self, item, spider):
valid = True
for data in item:
if not data:
raise DropItem("Missing %s of item from %s" %(data, item["link"]))
if valid:
model = SavolModel()
model.title = item["title"]
model.question = item["question"]
model.answer = item["answer"]
model.author = item["author"]
model.permalink = item["permalink"]
model.year = int(item["year"])
model.month = int(item["month"])
model.date = int(item["date"])
model.tags = item["title"].split()
model.save()
log.msg("Item exported to Cassandra database %s/%s" %
(settings["CASSANDRA_HOST"], settings["CASSANDRA_KEYSPACE"]),
level=log.DEBUG, spider=spider)
return item
|
apache-2.0
| 5,381,455,535,540,653,000
| 33.883333
| 85
| 0.565695
| false
| 4.161034
| false
| false
| false
|
shanot/imp
|
modules/rmf/examples/link.py
|
2
|
1236
|
## \example rmf/link.py
# This example is like module/rmf/pdb.py except that instead of creating a
# new hierarchy from the rmf file, it simply links the existing hierarchy
# to the file. This mechanism can be used for loading multiple
# conformations for scoring or other analysis without having to set up
# restraints and things each time.
from __future__ import print_function
import IMP.atom
import IMP.rmf
import RMF
import sys
IMP.setup_from_argv(sys.argv, "link")
m = IMP.Model()
# Create a new IMP.atom.Hierarchy from the contents of the pdb file
h = IMP.atom.read_pdb(IMP.rmf.get_example_path("simple.pdb"), m)
tfn = "link.rmf"
print("File name is", tfn)
# open the file, clearing any existing contents
rh = RMF.create_rmf_file(tfn)
# add the hierarchy to the file
IMP.rmf.add_hierarchies(rh, [h])
# add the current configuration to the file as frame 0
IMP.rmf.save_frame(rh)
# close the file
del rh
# reopen it, don't clear the file when opening it
rh = RMF.open_rmf_file_read_only(tfn)
# link to the existing pdb hierarchy
IMP.rmf.link_hierarchies(rh, [h])
# load the same coordinates in, ok, that is not very exciting
IMP.rmf.load_frame(rh, RMF.FrameID(0))
print("Try running rmf_display or rmf_show on", tfn)
|
gpl-3.0
| 2,193,594,142,939,475,700
| 25.869565
| 74
| 0.7411
| false
| 3
| false
| false
| false
|
karesansui/karesansui
|
bin/restart_network.py
|
1
|
4392
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of Karesansui.
#
# Copyright (C) 2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import os
import sys
import logging
from optparse import OptionParser
from ksscommand import KssCommand, KssCommandException, KssCommandOptException
import __cmd__
try:
import karesansui
from karesansui import __version__
from karesansui.lib.virt.virt import KaresansuiVirtConnection, KaresansuiVirtException
from karesansui.lib.const import NETWORK_IFCONFIG_COMMAND, NETWORK_BRCTL_COMMAND
from karesansui.lib.utils import load_locale
from karesansui.lib.utils import execute_command
except ImportError, e:
print >>sys.stderr, "[Error] some packages not found. - %s" % e
sys.exit(1)
_ = load_locale()
usage = '%prog [options]'
def getopts():
optp = OptionParser(usage=usage, version=__version__)
optp.add_option('-n', '--name', dest='name', help=_('Network name'))
optp.add_option('-f', '--force', dest='force', action="store_true", help=_('Do everything to bring up network'))
return optp.parse_args()
def chkopts(opts):
if not opts.name:
raise KssCommandOptException('ERROR: %s option is required.' % '-n or --name')
class RestartNetwork(KssCommand):
def process(self):
(opts, args) = getopts()
chkopts(opts)
self.up_progress(10)
conn = KaresansuiVirtConnection(readonly=False)
try:
active_networks = conn.list_active_network()
inactive_networks = conn.list_inactive_network()
if not (opts.name in active_networks or opts.name in inactive_networks):
raise KssCommandException('Could not find the specified network. - net=%s' % (opts.name))
self.up_progress(10)
try:
conn.stop_network(opts.name)
except KaresansuiVirtException, e:
if opt.force is not True:
raise KssCommandException('Could not stop the specified network. - net=%s' % (opts.name))
self.up_progress(20)
try:
conn.start_network(opts.name)
except KaresansuiVirtException, e:
if opts.force is not True:
raise KssCommandException('Could not start the specified network. - net=%s' % (opts.name))
# try to bring down existing bridge
kvn = conn.search_kvn_networks(opts.name)[0]
try:
bridge_name = kvn.get_info()['bridge']['name']
except KeyError:
pass
ret, res = execute_command([NETWORK_IFCONFIG_COMMAND, bridge_name, 'down'])
ret, res = execute_command([NETWORK_BRCTL_COMMAND, 'delbr', bridge_name])
# try again
conn.start_network(opts.name)
self.up_progress(10)
if not (opts.name in conn.list_active_network()):
raise KssCommandException('Failed to start network. - net=%s' % (opts.name))
self.logger.info('Restarted network. - net=%s' % (opts.name))
print >>sys.stdout, _('Restarted network. - net=%s') % (opts.name)
return True
finally:
conn.close()
if __name__ == "__main__":
target = RestartNetwork()
sys.exit(target.run())
|
mit
| 2,224,646,137,374,195,000
| 36.538462
| 116
| 0.645492
| false
| 3.981868
| false
| false
| false
|
qnzhou/ThingiverseCrawler
|
thingiverse_crawler.py
|
1
|
9320
|
#!//usr/bin/env python
import argparse
import datetime
import os
import os.path
import requests
import re
import time
import urllib
import urlparse
from subprocess import check_call
def utc_mktime(utc_tuple):
"""Returns number of seconds elapsed since epoch
Note that no timezone are taken into consideration.
utc tuple must be: (year, month, day, hour, minute, second)
"""
if len(utc_tuple) == 6:
utc_tuple += (0, 0, 0)
return time.mktime(utc_tuple) - time.mktime((1970, 1, 1, 0, 0, 0, 0, 0, 0))
def datetime_to_timestamp(dt):
"""Converts a datetime object to UTC timestamp"""
return int(utc_mktime(dt.timetuple()))
def parse_thing_ids(text):
pattern = "thing:(\d{5,7})"
matched = re.findall(pattern, text)
return [int(val) for val in matched]
def parse_file_ids(text):
pattern = "download:(\d{5,7})"
matched = re.findall(pattern, text)
return [int(val) for val in matched]
known_licenses = [
("Creative Commons - Attribution",
re.compile("http://creativecommons.org/licenses/by/\d(.\d)?/")),
("Creative Commons - Attribution - Share Alike",
re.compile("http://creativecommons.org/licenses/by-sa/\d(.\d)?/")),
("Creative Commons - Attribution - No Derivatives",
re.compile("http://creativecommons.org/licenses/by-nd/\d(.\d)?/")),
("Creative Commons - Attribution - Non-Commercial",
re.compile("http://creativecommons.org/licenses/by-nc/\d(.\d)?/")),
("Attribution - Non-Commercial - Share Alike",
re.compile("http://creativecommons.org/licenses/by-nc-sa/\d(.\d)?/")),
("Attribution - Non-Commercial - No Derivatives",
re.compile("http://creativecommons.org/licenses/by-nc-nd/\d(.\d)?/")),
("Creative Commons - Public Domain Dedication",
re.compile("http://creativecommons.org/publicdomain/zero/\d(.\d)?/")),
("GNU - GPL",
re.compile("http://creativecommons.org/licenses/GPL/\d(.\d)?/")),
("GNU - LGPL",
re.compile("http://creativecommons.org/licenses/LGPL/\d(.\d)?/")),
("BSD License",
re.compile("http://creativecommons.org/licenses/BSD/")),
("Nokia",
re.compile("http://www.developer.nokia.com/Terms_and_conditions/3d-printing.xhtml")),
("Public Domain",
re.compile("http://creativecommons.org/licenses/publicdomain/")),
]
def parse_license(text):
for name, pattern in known_licenses:
if pattern.search(text):
return name
return "unknown_license"
def crawl_thing_ids(N, end_date=None):
""" This method extract N things that were uploaded to thingiverse.com
before end_date. If end_date is None, use today's date.
"""
baseurl = "http://www.thingiverse.com/search/recent/things/page:{}?q=&start_date=&stop_date={}&search_mode=advanced&description=&username=&tags=&license="
end_date = datetime_to_timestamp(end_date)
thing_ids = set()
for i in range(N/12 + 1):
url = baseurl.format(i, end_date)
r = requests.get(url)
assert(r.status_code==200)
thing_ids.update(parse_thing_ids(r.text))
if len(thing_ids) > N:
break
# Sleep a bit to avoid being mistaken as DoS.
time.sleep(0.5)
return thing_ids
def crawl_things(N, output_dir, term=None, category=None, source=None, organize=False):
#baseurl = "http://www.thingiverse.com/newest/page:{}"
#baseurl = "http://www.thingiverse.com/explore/popular/page:{}"
key = None
if term is None:
assert(source is not None);
url_prefix= "http://www.thingiverse.com/explore/{}/".format(source);
if category is None:
baseurl = url_prefix + "page:{}"
else:
baseurl = url_prefix + urllib.quote_plus(category) + "/page:{}"
key = category
else:
baseurl = "http://www.thingiverse.com/search/page:{}?type=things&q=" + urllib.quote_plus(term)
key = term
thing_ids = set()
file_ids = set()
records = []
num_files = 0
page = 0
previous_path = ''
while True:
url = baseurl.format(page+1)
contents = get_url(url)
page += 1
# If the previous url ends up being the same as the old one, we should stop as there are no more pages
current_path = urlparse.urlparse(contents.url).path
if previous_path == current_path:
return records
else:
previous_path = current_path
for thing_id in parse_thing_ids(contents.text):
if thing_id in thing_ids:
continue
print("thing id: {}".format(thing_id))
thing_ids.add(thing_id)
license, thing_files = get_thing(thing_id)
for file_id in thing_files:
if file_id in file_ids:
continue
file_ids.add(file_id)
print(" file id: {}".format(file_id))
result = download_file(file_id, thing_id, output_dir, organize)
if result is None: continue
filename, link = result
if filename is not None:
records.append((thing_id, file_id, filename, license, link))
if N is not None and len(records) >= N:
return records
# Sleep a bit to avoid being mistaken as DoS.
time.sleep(0.5)
save_records(records, key)
def get_thing(thing_id):
base_url = "http://www.thingiverse.com/{}:{}"
file_ids = []
url = base_url.format("thing", thing_id)
contents = get_url(url).text
license = parse_license(contents)
return license, parse_file_ids(contents)
def get_url(url, time_out=600):
r = requests.get(url)
sleep_time = 1.0
while r.status_code != 200:
print("sleep {}s".format(sleep_time))
print(url)
time.sleep(sleep_time)
r = requests.get(url)
sleep_time += 2
if (sleep_time > time_out):
# We have sleeped for over 10 minutes, the page probably does
# not exist.
break
if r.status_code != 200:
print("failed to retrieve {}".format(url))
else:
return r
# return r.text
def get_download_link(file_id):
base_url = "https://www.thingiverse.com/{}:{}"
url = base_url.format("download", file_id)
r = requests.head(url)
link = r.headers.get("Location", None)
if link is not None:
__, ext = os.path.splitext(link)
if ext.lower() not in [".stl", ".obj", ".ply", ".off"]:
return None
return link
def download_file(file_id, thing_id, output_dir, organize):
link = get_download_link(file_id)
if link is None:
return None
__, ext = os.path.splitext(link)
output_file = "{}{}".format(file_id, ext.lower())
if organize:
output_file = os.path.join(str(thing_id), output_file)
output_file = os.path.join(output_dir, output_file)
command = "wget -q --tries=20 --waitretry 20 -O {} {}".format(output_file, link)
#check_call(command.split())
return output_file, link
def save_records(records, key=None):
# Enforce kebab case file name
output_name = re.sub('(\w) (\w)', r'\1-\2', key).lower()+"-" if key else ""
output_name += "summary"
with open(output_name+".csv", 'w') as fout:
fout.write("thing_id, file_id, file, license, link\n")
for entry in records:
fout.write(",".join([str(val) for val in entry]) + "\n")
def parse_args():
parser = argparse.ArgumentParser(
description="Crawl data from thingiverse",
epilog="Written by Qingnan Zhou <qnzhou at gmail dot com> Modified by Mike Gleason")
parser.add_argument("--output-dir", "-o", help="output directories",
default=".")
parser.add_argument("--number", "-n", type=int,
help="how many files to crawl", default=None)
group = parser.add_mutually_exclusive_group()
group.add_argument("--search-term", "-s", type=str, default=None,
help="term to search for")
group.add_argument("--category", "-c", type=str, default=None,
help="catergory to search for")
parser.add_argument('--organize', dest='organized', default=False, action='store_true',
help="organize files by their main category")
parser.add_argument("--source", choices=("newest", "featured", "popular",
"verified", "made-things", "derivatives", "customizable",
"random-things", "firehose"), default="featured");
return parser
def main():
parser = parse_args()
args = parser.parse_args()
if args.number is None and (args.search_term is None and args.category is None):
parser.error('Number or Search/Category Term required')
output_dir = args.output_dir
number = args.number
records = crawl_things(
args.number,
output_dir,
args.search_term,
args.category,
args.source,
args.organized)
if args.search_term:
save_records(records, args.search_term)
elif args.category:
save_records(records, args.category)
else:
save_records(records)
if __name__ == "__main__":
main()
|
mit
| -6,989,201,447,378,768,000
| 33.64684
| 158
| 0.593777
| false
| 3.534319
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.