repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aguijarro/DataSciencePython
|
DataWrangling/CaseStudy/project/audit.py
|
1
|
2399
|
import xml.etree.cElementTree as ET
from collections import defaultdict
import re
street_type_re = re.compile(r'\S+\.?$', re.IGNORECASE)
city_type_re = re.compile(r'\S+\.?$', re.IGNORECASE)
expected = ["Street", "Avenue", "Boulevard", "Drive",
"Court", "Place", "Square", "Lane", "Road",
"Trail", "Parkway", "Commons"]
def print_sorted_dict(d):
keys = d.keys()
keys = sorted(keys, key=lambda s: s.lower())
for k in keys:
v = d[k]
print "%s: %d" % (k, v)
def audit_street_type(street_types, street_types_count, street_name):
m = street_type_re.search(street_name)
if m:
street_type = m.group()
street_types_count[street_type] += 1
if street_type not in expected:
street_types[street_type].add(street_name)
def is_street_name(elem):
return (elem.tag == "tag") and (elem.attrib['k'] == "addr:street")
def audit(input_file):
osm_file = open(input_file, "r")
street_types = defaultdict(set)
street_types_count = defaultdict(int)
for event, elem in ET.iterparse(osm_file, events=("start",)):
if elem.tag == "node" or elem.tag == "way":
for tag in elem.iter("tag"):
if is_street_name(tag):
audit_street_type(street_types,
street_types_count,
tag.attrib['v'])
osm_file.close()
return street_types, street_types_count
def audit_city_type(city_types, city_types_count, city_name):
m = city_type_re.search(city_name)
if m:
city_type = m.group()
city_types_count[city_type] += 1
if city_type not in expected:
city_types[city_type].add(city_name)
def is_city_name(elem):
return (elem.tag == "tag") and (elem.attrib['k'] == "addr:city")
def audit_city(input_file):
osm_file = open(input_file, "r")
city_types = defaultdict(set)
city_types_count = defaultdict(int)
for event, elem in ET.iterparse(osm_file, events=("start",)):
if elem.tag == "node" or elem.tag == "way":
for tag in elem.iter("tag"):
if is_city_name(tag):
audit_city_type(city_types,
city_types_count,
tag.attrib['v'])
osm_file.close()
return city_types, city_types_count
|
mit
| -636,464,583,109,244,500
| 31
| 70
| 0.557316
| false
| 3.29533
| false
| false
| false
|
ppppn/bib-generator
|
gen_bib.py
|
1
|
1967
|
#! /bin/sh
""":"
exec python3 "$0" ${1+"$@"}
"""
import argparse
import csv
import re
from datetime import datetime
from html_format import HTML_FORMAT
def readStyles(format_csv_fname):
formats = {}
f = open(format_csv_fname, encoding='sjis')
reader = csv.reader(f)
category_header = next(reader)[0]
for format_name, format in reader:
format = re.sub('{', '{0[', format)
formats[format_name] = re.sub('}', ']}', format)
return category_header, formats
def readBibList(biblist_csv_fname):
f = open(biblist_csv_fname, encoding='sjis')
reader = csv.reader(f)
properties = next(reader)
bib_list = []
for bib in reader:
current_bib = {}
i = 0
for i in range(len(properties)):
current_bib[properties[i]] = bib[i]
bib_list.append(current_bib)
return bib_list
def applyStyleToBib(format, bib):
line = format.format(bib)
line = re.sub('///(.*)///', '<I>\\1</I>', line)
return line
def generateHTML(formatfname, biblistfname):
category_header, formats = readStyles(formatfname)
biblist = readBibList(biblistfname)
body = ''
for current_bib in biblist:
selected_format = formats[current_bib.pop(category_header)]
body += applyStyleToBib(selected_format, current_bib) + '<BR/>\n'
outputfile = open('result.html', 'w', encoding='utf-8')
outputfile.write(HTML_FORMAT.format(bib_body=body,
time_stamp=datetime.now()))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('formats_csv_fname', action='store', nargs='?',
const=None, default='formats.csv', type=str)
parser.add_argument('biblist_csv_fname', action='store', nargs='?',
const=None, default='biblist.csv', type=str)
args = parser.parse_args()
generateHTML(args.formats_csv_fname, args.biblist_csv_fname)
if __name__ == '__main__':
main()
|
mit
| -3,812,726,738,535,261,000
| 31.245902
| 73
| 0.613625
| false
| 3.506239
| false
| false
| false
|
daizhengy/RDS
|
trove/guestagent/datastore/mysql/service.py
|
1
|
43726
|
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
import re
import uuid
from collections import defaultdict
import sqlalchemy
from sqlalchemy import exc
from sqlalchemy import interfaces
from sqlalchemy.sql.expression import text
from trove.common import cfg
from trove.common import utils as utils
from trove.common import exception
from trove.common import instance as rd_instance
from trove.common.exception import PollTimeOut
from trove.guestagent.common import operating_system
from trove.guestagent.common import sql_query
from trove.guestagent.db import models
from trove.guestagent import pkg
from trove.guestagent.datastore import service
from trove.openstack.common import log as logging
from trove.common.i18n import _
ADMIN_USER_NAME = "os_admin"
LOG = logging.getLogger(__name__)
FLUSH = text(sql_query.FLUSH)
ENGINE = None
PREPARING = False
UUID = False
TMP_MYCNF = "/tmp/my.cnf.tmp"
MYSQL_BASE_DIR = "/var/lib/mysql"
CONF = cfg.CONF
MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'mysql'
INCLUDE_MARKER_OPERATORS = {
True: ">=",
False: ">"
}
OS_NAME = operating_system.get_os()
MYSQL_CONFIG = {operating_system.REDHAT: "/etc/my.cnf",
operating_system.DEBIAN: "/etc/mysql/my.cnf",
operating_system.SUSE: "/etc/my.cnf"}[OS_NAME]
MYSQL_SERVICE_CANDIDATES = ["mysql", "mysqld", "mysql-server"]
MYSQL_BIN_CANDIDATES = ["/usr/sbin/mysqld", "/usr/libexec/mysqld"]
MYCNF_OVERRIDES = "/etc/mysql/conf.d/overrides.cnf"
MYCNF_OVERRIDES_TMP = "/tmp/overrides.cnf.tmp"
MYCNF_REPLMASTER = "/etc/mysql/conf.d/0replmaster.cnf"
MYCNF_REPLSLAVE = "/etc/mysql/conf.d/1replslave.cnf"
MYCNF_REPLCONFIG_TMP = "/tmp/replication.cnf.tmp"
# Create a package impl
packager = pkg.Package()
def clear_expired_password():
"""
Some mysql installations generate random root password
and save it in /root/.mysql_secret, this password is
expired and should be changed by client that supports expired passwords.
"""
LOG.debug("Removing expired password.")
secret_file = "/root/.mysql_secret"
try:
out, err = utils.execute("cat", secret_file,
run_as_root=True, root_helper="sudo")
except exception.ProcessExecutionError:
LOG.exception(_("/root/.mysql_secret does not exist."))
return
m = re.match('# The random password set for the root user at .*: (.*)',
out)
if m:
try:
out, err = utils.execute("mysqladmin", "-p%s" % m.group(1),
"password", "", run_as_root=True,
root_helper="sudo")
except exception.ProcessExecutionError:
LOG.exception(_("Cannot change mysql password."))
return
utils.execute("rm", "-f", secret_file, run_as_root=True,
root_helper="sudo")
LOG.debug("Expired password removed.")
def get_auth_password():
pwd, err = utils.execute_with_timeout(
"sudo",
"awk",
"/password\\t=/{print $3; exit}",
MYSQL_CONFIG)
if err:
LOG.error(err)
raise RuntimeError("Problem reading my.cnf! : %s" % err)
return pwd.strip()
def get_engine():
"""Create the default engine with the updated admin user."""
#TODO(rnirmal):Based on permissions issues being resolved we may revert
#url = URL(drivername='mysql', host='localhost',
# query={'read_default_file': '/etc/mysql/my.cnf'})
global ENGINE
if ENGINE:
return ENGINE
pwd = get_auth_password()
ENGINE = sqlalchemy.create_engine("mysql://%s:%s@localhost:3306" %
(ADMIN_USER_NAME, pwd.strip()),
pool_recycle=7200,
echo=CONF.sql_query_logging,
listeners=[KeepAliveConnection()])
return ENGINE
def load_mysqld_options():
#find mysqld bin
for bin in MYSQL_BIN_CANDIDATES:
if os.path.isfile(bin):
mysqld_bin = bin
break
else:
return {}
try:
out, err = utils.execute(mysqld_bin, "--print-defaults",
run_as_root=True, root_helper="sudo")
arglist = re.split("\n", out)[1].split()
args = defaultdict(list)
for item in arglist:
if "=" in item:
key, value = item.split("=", 1)
args[key.lstrip("--")].append(value)
else:
args[item.lstrip("--")].append(None)
return args
except exception.ProcessExecutionError:
return {}
class MySqlAppStatus(service.BaseDbStatus):
@classmethod
def get(cls):
if not cls._instance:
cls._instance = MySqlAppStatus()
return cls._instance
def _get_actual_db_status(self):
try:
out, err = utils.execute_with_timeout(
"/usr/bin/mysqladmin",
"ping", run_as_root=True, root_helper="sudo",
log_output_on_error=True)
LOG.info(_("MySQL Service Status is RUNNING."))
return rd_instance.ServiceStatuses.RUNNING
except exception.ProcessExecutionError:
LOG.exception(_("Failed to get database status."))
try:
out, err = utils.execute_with_timeout("/bin/ps", "-C",
"mysqld", "h")
pid = out.split()[0]
# TODO(rnirmal): Need to create new statuses for instances
# where the mysql service is up, but unresponsive
LOG.info(_('MySQL Service Status %(pid)s is BLOCKED.') %
{'pid': pid})
return rd_instance.ServiceStatuses.BLOCKED
except exception.ProcessExecutionError:
LOG.exception(_("Process execution failed."))
mysql_args = load_mysqld_options()
pid_file = mysql_args.get('pid_file',
['/var/run/mysqld/mysqld.pid'])[0]
if os.path.exists(pid_file):
LOG.info(_("MySQL Service Status is CRASHED."))
return rd_instance.ServiceStatuses.CRASHED
else:
LOG.info(_("MySQL Service Status is SHUTDOWN."))
return rd_instance.ServiceStatuses.SHUTDOWN
class LocalSqlClient(object):
"""A sqlalchemy wrapper to manage transactions."""
def __init__(self, engine, use_flush=True):
self.engine = engine
self.use_flush = use_flush
def __enter__(self):
self.conn = self.engine.connect()
self.trans = self.conn.begin()
return self.conn
def __exit__(self, type, value, traceback):
if self.trans:
if type is not None: # An error occurred
self.trans.rollback()
else:
if self.use_flush:
self.conn.execute(FLUSH)
self.trans.commit()
self.conn.close()
def execute(self, t, **kwargs):
try:
return self.conn.execute(t, kwargs)
except Exception:
self.trans.rollback()
self.trans = None
raise
class MySqlAdmin(object):
"""Handles administrative tasks on the MySQL database."""
def _associate_dbs(self, user):
"""Internal. Given a MySQLUser, populate its databases attribute."""
LOG.debug("Associating dbs to user %s at %s." %
(user.name, user.host))
with LocalSqlClient(get_engine()) as client:
q = sql_query.Query()
q.columns = ["grantee", "table_schema"]
q.tables = ["information_schema.SCHEMA_PRIVILEGES"]
q.group = ["grantee", "table_schema"]
q.where = ["privilege_type != 'USAGE'"]
t = text(str(q))
db_result = client.execute(t)
for db in db_result:
LOG.debug("\t db: %s." % db)
if db['grantee'] == "'%s'@'%s'" % (user.name, user.host):
mysql_db = models.MySQLDatabase()
mysql_db.name = db['table_schema']
user.databases.append(mysql_db.serialize())
def change_passwords(self, users):
"""Change the passwords of one or more existing users."""
LOG.debug("Changing the password of some users.")
with LocalSqlClient(get_engine()) as client:
for item in users:
LOG.debug("Changing password for user %s." % item)
user_dict = {'_name': item['name'],
'_host': item['host'],
'_password': item['password']}
user = models.MySQLUser()
user.deserialize(user_dict)
LOG.debug("\tDeserialized: %s." % user.__dict__)
uu = sql_query.UpdateUser(user.name, host=user.host,
clear=user.password)
t = text(str(uu))
client.execute(t)
def update_attributes(self, username, hostname, user_attrs):
"""Change the attributes of an existing user."""
LOG.debug("Changing user attributes for user %s." % username)
user = self._get_user(username, hostname)
db_access = set()
grantee = set()
with LocalSqlClient(get_engine()) as client:
q = sql_query.Query()
q.columns = ["grantee", "table_schema"]
q.tables = ["information_schema.SCHEMA_PRIVILEGES"]
q.group = ["grantee", "table_schema"]
q.where = ["privilege_type != 'USAGE'"]
t = text(str(q))
db_result = client.execute(t)
for db in db_result:
grantee.add(db['grantee'])
if db['grantee'] == "'%s'@'%s'" % (user.name, user.host):
db_name = db['table_schema']
db_access.add(db_name)
with LocalSqlClient(get_engine()) as client:
uu = sql_query.UpdateUser(user.name, host=user.host,
clear=user_attrs.get('password'),
new_user=user_attrs.get('name'),
new_host=user_attrs.get('host'))
t = text(str(uu))
client.execute(t)
uname = user_attrs.get('name') or username
host = user_attrs.get('host') or hostname
find_user = "'%s'@'%s'" % (uname, host)
if find_user not in grantee:
self.grant_access(uname, host, db_access)
def create_database(self, databases):
"""Create the list of specified databases."""
with LocalSqlClient(get_engine()) as client:
for item in databases:
mydb = models.ValidatedMySQLDatabase()
mydb.deserialize(item)
cd = sql_query.CreateDatabase(mydb.name,
mydb.character_set,
mydb.collate)
t = text(str(cd))
client.execute(t)
def create_user(self, users):
"""Create users and grant them privileges for the
specified databases.
"""
with LocalSqlClient(get_engine()) as client:
for item in users:
user = models.MySQLUser()
user.deserialize(item)
# TODO(cp16net):Should users be allowed to create users
# 'os_admin' or 'debian-sys-maint'
g = sql_query.Grant(user=user.name, host=user.host,
clear=user.password)
t = text(str(g))
client.execute(t)
for database in user.databases:
mydb = models.ValidatedMySQLDatabase()
mydb.deserialize(database)
g = sql_query.Grant(permissions='ALL', database=mydb.name,
user=user.name, host=user.host,
clear=user.password)
t = text(str(g))
client.execute(t)
def delete_database(self, database):
"""Delete the specified database."""
with LocalSqlClient(get_engine()) as client:
mydb = models.ValidatedMySQLDatabase()
mydb.deserialize(database)
dd = sql_query.DropDatabase(mydb.name)
t = text(str(dd))
client.execute(t)
def delete_user(self, user):
"""Delete the specified user."""
mysql_user = models.MySQLUser()
mysql_user.deserialize(user)
self.delete_user_by_name(mysql_user.name, mysql_user.host)
def delete_user_by_name(self, name, host='%'):
with LocalSqlClient(get_engine()) as client:
du = sql_query.DropUser(name, host=host)
t = text(str(du))
LOG.debug("delete_user_by_name: %s", t)
client.execute(t)
def get_user(self, username, hostname):
user = self._get_user(username, hostname)
if not user:
return None
return user.serialize()
def _get_user(self, username, hostname):
"""Return a single user matching the criteria."""
user = models.MySQLUser()
try:
user.name = username # Could possibly throw a BadRequest here.
except exception.ValueError as ve:
LOG.exception(_("Error Getting user information"))
raise exception.BadRequest(_("Username %(user)s is not valid"
": %(reason)s") %
{'user': username, 'reason': ve.message}
)
with LocalSqlClient(get_engine()) as client:
q = sql_query.Query()
q.columns = ['User', 'Host', 'Password']
q.tables = ['mysql.user']
q.where = ["Host != 'localhost'",
"User = '%s'" % username,
"Host = '%s'" % hostname]
q.order = ['User', 'Host']
t = text(str(q))
result = client.execute(t).fetchall()
LOG.debug("Getting user information %s." % result)
if len(result) != 1:
return None
found_user = result[0]
user.password = found_user['Password']
user.host = found_user['Host']
self._associate_dbs(user)
return user
def grant_access(self, username, hostname, databases):
"""Grant a user permission to use a given database."""
user = self._get_user(username, hostname)
mydb = models.ValidatedMySQLDatabase()
with LocalSqlClient(get_engine()) as client:
for database in databases:
try:
mydb.name = database
except ValueError:
LOG.exception(_("Error granting access"))
raise exception.BadRequest(_(
"Grant access to %s is not allowed") % database)
g = sql_query.Grant(permissions='ALL', database=mydb.name,
user=user.name, host=user.host,
hashed=user.password)
t = text(str(g))
client.execute(t)
def is_root_enabled(self):
"""Return True if root access is enabled; False otherwise."""
return MySqlRootAccess.is_root_enabled()
def enable_root(self, root_password=None):
"""Enable the root user global access and/or
reset the root password.
"""
return MySqlRootAccess.enable_root(root_password)
def list_databases(self, limit=None, marker=None, include_marker=False):
"""List databases the user created on this mysql instance."""
LOG.debug("---Listing Databases---")
ignored_database_names = "'%s'" % "', '".join(CONF.ignore_dbs)
LOG.debug("The following database names are on ignore list and will "
"be omitted from the listing: %s" % ignored_database_names)
databases = []
with LocalSqlClient(get_engine()) as client:
# If you have an external volume mounted at /var/lib/mysql
# the lost+found directory will show up in mysql as a database
# which will create errors if you try to do any database ops
# on it. So we remove it here if it exists.
q = sql_query.Query()
q.columns = [
'schema_name as name',
'default_character_set_name as charset',
'default_collation_name as collation',
]
q.tables = ['information_schema.schemata']
q.where = ["schema_name NOT IN (" + ignored_database_names + ")"]
q.order = ['schema_name ASC']
if limit:
q.limit = limit + 1
if marker:
q.where.append("schema_name %s '%s'" %
(INCLUDE_MARKER_OPERATORS[include_marker],
marker))
t = text(str(q))
database_names = client.execute(t)
next_marker = None
LOG.debug("database_names = %r." % database_names)
for count, database in enumerate(database_names):
if count >= limit:
break
LOG.debug("database = %s." % str(database))
mysql_db = models.MySQLDatabase()
mysql_db.name = database[0]
next_marker = mysql_db.name
mysql_db.character_set = database[1]
mysql_db.collate = database[2]
databases.append(mysql_db.serialize())
LOG.debug("databases = " + str(databases))
if database_names.rowcount <= limit:
next_marker = None
return databases, next_marker
def list_users(self, limit=None, marker=None, include_marker=False):
"""List users that have access to the database."""
'''
SELECT
User,
Host,
Marker
FROM
(SELECT
User,
Host,
CONCAT(User, '@', Host) as Marker
FROM mysql.user
ORDER BY 1, 2) as innerquery
WHERE
Marker > :marker
ORDER BY
Marker
LIMIT :limit;
'''
LOG.debug("---Listing Users---")
users = []
with LocalSqlClient(get_engine()) as client:
mysql_user = models.MySQLUser()
iq = sql_query.Query() # Inner query.
iq.columns = ['User', 'Host', "CONCAT(User, '@', Host) as Marker"]
iq.tables = ['mysql.user']
iq.order = ['User', 'Host']
innerquery = str(iq).rstrip(';')
oq = sql_query.Query() # Outer query.
oq.columns = ['User', 'Host', 'Marker']
oq.tables = ['(%s) as innerquery' % innerquery]
oq.where = ["Host != 'localhost'"]
oq.order = ['Marker']
if marker:
oq.where.append("Marker %s '%s'" %
(INCLUDE_MARKER_OPERATORS[include_marker],
marker))
if limit:
oq.limit = limit + 1
t = text(str(oq))
result = client.execute(t)
next_marker = None
LOG.debug("result = " + str(result))
for count, row in enumerate(result):
if count >= limit:
break
LOG.debug("user = " + str(row))
mysql_user = models.MySQLUser()
mysql_user.name = row['User']
mysql_user.host = row['Host']
self._associate_dbs(mysql_user)
next_marker = row['Marker']
users.append(mysql_user.serialize())
if result.rowcount <= limit:
next_marker = None
LOG.debug("users = " + str(users))
return users, next_marker
def revoke_access(self, username, hostname, database):
"""Revoke a user's permission to use a given database."""
user = self._get_user(username, hostname)
with LocalSqlClient(get_engine()) as client:
r = sql_query.Revoke(database=database,
user=user.name,
host=user.host)
t = text(str(r))
client.execute(t)
def list_access(self, username, hostname):
"""Show all the databases to which the user has more than
USAGE granted.
"""
user = self._get_user(username, hostname)
return user.databases
class KeepAliveConnection(interfaces.PoolListener):
"""
A connection pool listener that ensures live connections are returned
from the connection pool at checkout. This alleviates the problem of
MySQL connections timing out.
"""
def checkout(self, dbapi_con, con_record, con_proxy):
"""Event triggered when a connection is checked out from the pool."""
try:
try:
dbapi_con.ping(False)
except TypeError:
dbapi_con.ping()
except dbapi_con.OperationalError as ex:
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
raise exc.DisconnectionError()
else:
raise
class MySqlApp(object):
"""Prepares DBaaS on a Guest container."""
TIME_OUT = 1000
def __init__(self, status):
"""By default login with root no password for initial setup."""
self.state_change_wait_time = CONF.state_change_wait_time
self.status = status
def _create_admin_user(self, client, password):
"""
Create a os_admin user with a random password
with all privileges similar to the root user.
"""
localhost = "localhost"
g = sql_query.Grant(permissions='ALL', user=ADMIN_USER_NAME,
host=localhost, grant_option=True, clear=password)
t = text(str(g))
client.execute(t)
@staticmethod
def _generate_root_password(client):
"""Generate and set a random root password and forget about it."""
localhost = "localhost"
uu = sql_query.UpdateUser("root", host=localhost,
clear=utils.generate_random_password())
t = text(str(uu))
client.execute(t)
def install_if_needed(self, packages):
"""Prepare the guest machine with a secure
mysql server installation.
"""
LOG.info(_("Preparing Guest as MySQL Server."))
if not packager.pkg_is_installed(packages):
LOG.debug("Installing MySQL server.")
self._clear_mysql_config()
# set blank password on pkg configuration stage
pkg_opts = {'root_password': '',
'root_password_again': ''}
packager.pkg_install(packages, pkg_opts, self.TIME_OUT)
self._create_mysql_confd_dir()
LOG.info(_("Finished installing MySQL server."))
self.start_mysql()
def complete_install_or_restart(self):
self.status.end_install_or_restart()
def secure(self, config_contents, overrides):
LOG.info(_("Generating admin password."))
admin_password = utils.generate_random_password()
clear_expired_password()
engine = sqlalchemy.create_engine("mysql://root:@localhost:3306",
echo=True)
with LocalSqlClient(engine) as client:
self._remove_anonymous_user(client)
self._create_admin_user(client, admin_password)
self.stop_db()
self._write_mycnf(admin_password, config_contents, overrides)
self.start_mysql()
LOG.debug("MySQL secure complete.")
def secure_root(self, secure_remote_root=True):
with LocalSqlClient(get_engine()) as client:
LOG.info(_("Preserving root access from restore."))
self._generate_root_password(client)
if secure_remote_root:
self._remove_remote_root_access(client)
def _clear_mysql_config(self):
"""Clear old configs, which can be incompatible with new version."""
LOG.debug("Clearing old MySQL config.")
random_uuid = str(uuid.uuid4())
configs = ["/etc/my.cnf", "/etc/mysql/conf.d", "/etc/mysql/my.cnf"]
for config in configs:
command = "mv %s %s_%s" % (config, config, random_uuid)
try:
utils.execute_with_timeout(command, shell=True,
root_helper="sudo")
LOG.debug("%s saved to %s_%s." %
(config, config, random_uuid))
except exception.ProcessExecutionError:
pass
def _create_mysql_confd_dir(self):
conf_dir = "/etc/mysql/conf.d"
LOG.debug("Creating %s." % conf_dir)
command = "sudo mkdir -p %s" % conf_dir
utils.execute_with_timeout(command, shell=True)
def _enable_mysql_on_boot(self):
LOG.debug("Enabling MySQL on boot.")
try:
mysql_service = operating_system.service_discovery(
MYSQL_SERVICE_CANDIDATES)
utils.execute_with_timeout(mysql_service['cmd_enable'], shell=True)
except KeyError:
LOG.exception(_("Error enabling MySQL start on boot."))
raise RuntimeError("Service is not discovered.")
def _disable_mysql_on_boot(self):
try:
mysql_service = operating_system.service_discovery(
MYSQL_SERVICE_CANDIDATES)
utils.execute_with_timeout(mysql_service['cmd_disable'],
shell=True)
except KeyError:
LOG.exception(_("Error disabling MySQL start on boot."))
raise RuntimeError("Service is not discovered.")
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
LOG.info(_("Stopping MySQL."))
if do_not_start_on_reboot:
self._disable_mysql_on_boot()
try:
mysql_service = operating_system.service_discovery(
MYSQL_SERVICE_CANDIDATES)
utils.execute_with_timeout(mysql_service['cmd_stop'], shell=True)
except KeyError:
LOG.exception(_("Error stopping MySQL."))
raise RuntimeError("Service is not discovered.")
if not self.status.wait_for_real_status_to_change_to(
rd_instance.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db):
LOG.error(_("Could not stop MySQL."))
self.status.end_install_or_restart()
raise RuntimeError("Could not stop MySQL!")
def _remove_anonymous_user(self, client):
t = text(sql_query.REMOVE_ANON)
client.execute(t)
def _remove_remote_root_access(self, client):
t = text(sql_query.REMOVE_ROOT)
client.execute(t)
def restart(self):
try:
self.status.begin_restart()
self.stop_db()
self.start_mysql()
finally:
self.status.end_install_or_restart()
def update_overrides(self, override_values):
"""
This function will update the MySQL overrides.cnf file
if there is content to write.
:param override_values:
:return:
"""
if override_values:
LOG.debug("Writing new overrides.cnf config file.")
self._write_config_overrides(override_values)
def apply_overrides(self, overrides):
LOG.debug("Applying overrides to MySQL.")
with LocalSqlClient(get_engine()) as client:
LOG.debug("Updating override values in running MySQL.")
for k, v in overrides.iteritems():
q = sql_query.SetServerVariable(key=k, value=v)
t = text(str(q))
try:
client.execute(t)
except exc.OperationalError:
output = {'key': k, 'value': v}
LOG.exception(_("Unable to set %(key)s with value "
"%(value)s.") % output)
def make_read_only(self, read_only):
with LocalSqlClient(get_engine()) as client:
q = "set global read_only = %s" % read_only
client.execute(text(str(q)))
def _write_temp_mycnf_with_admin_account(self, original_file_path,
temp_file_path, password):
mycnf_file = open(original_file_path, 'r')
tmp_file = open(temp_file_path, 'w')
for line in mycnf_file:
tmp_file.write(line)
if "[client]" in line:
tmp_file.write("user\t\t= %s\n" % ADMIN_USER_NAME)
tmp_file.write("password\t= %s\n" % password)
mycnf_file.close()
tmp_file.close()
def wipe_ib_logfiles(self):
"""Destroys the iblogfiles.
If for some reason the selected log size in the conf changes from the
current size of the files MySQL will fail to start, so we delete the
files to be safe.
"""
LOG.info(_("Wiping ib_logfiles."))
for index in range(2):
try:
# On restarts, sometimes these are wiped. So it can be a race
# to have MySQL start up before it's restarted and these have
# to be deleted. That's why its ok if they aren't found and
# that is why we use the "-f" option to "rm".
(utils.
execute_with_timeout("sudo", "rm", "-f", "%s/ib_logfile%d"
% (MYSQL_BASE_DIR, index)))
except exception.ProcessExecutionError:
LOG.exception("Could not delete logfile.")
raise
def _write_mycnf(self, admin_password, config_contents, overrides=None):
"""
Install the set of mysql my.cnf templates.
Update the os_admin user and password to the my.cnf
file for direct login from localhost.
"""
LOG.info(_("Writing my.cnf templates."))
if admin_password is None:
admin_password = get_auth_password()
try:
with open(TMP_MYCNF, 'w') as t:
t.write(config_contents)
utils.execute_with_timeout("sudo", "mv", TMP_MYCNF,
MYSQL_CONFIG)
self._write_temp_mycnf_with_admin_account(MYSQL_CONFIG,
TMP_MYCNF,
admin_password)
utils.execute_with_timeout("sudo", "mv", TMP_MYCNF,
MYSQL_CONFIG)
except Exception:
os.unlink(TMP_MYCNF)
raise
self.wipe_ib_logfiles()
# write configuration file overrides
if overrides:
self._write_config_overrides(overrides)
def _write_config_overrides(self, overrideValues):
LOG.info(_("Writing new temp overrides.cnf file."))
with open(MYCNF_OVERRIDES_TMP, 'w') as overrides:
overrides.write(overrideValues)
LOG.info(_("Moving overrides.cnf into correct location."))
utils.execute_with_timeout("sudo", "mv", MYCNF_OVERRIDES_TMP,
MYCNF_OVERRIDES)
LOG.info(_("Setting permissions on overrides.cnf."))
utils.execute_with_timeout("sudo", "chmod", "0644",
MYCNF_OVERRIDES)
def remove_overrides(self):
LOG.info(_("Removing overrides configuration file."))
if os.path.exists(MYCNF_OVERRIDES):
utils.execute_with_timeout("sudo", "rm", MYCNF_OVERRIDES)
def _write_replication_overrides(self, overrideValues, cnf_file):
LOG.info(_("Writing replication.cnf file."))
with open(MYCNF_REPLCONFIG_TMP, 'w') as overrides:
overrides.write(overrideValues)
LOG.debug("Moving temp replication.cnf into correct location.")
utils.execute_with_timeout("sudo", "mv", MYCNF_REPLCONFIG_TMP,
cnf_file)
LOG.debug("Setting permissions on replication.cnf.")
utils.execute_with_timeout("sudo", "chmod", "0644", cnf_file)
def _remove_replication_overrides(self, cnf_file):
LOG.info(_("Removing replication configuration file."))
if os.path.exists(cnf_file):
utils.execute_with_timeout("sudo", "rm", cnf_file)
def exists_replication_source_overrides(self):
return os.path.exists(MYCNF_REPLMASTER)
def write_replication_source_overrides(self, overrideValues):
self._write_replication_overrides(overrideValues, MYCNF_REPLMASTER)
def write_replication_replica_overrides(self, overrideValues):
self._write_replication_overrides(overrideValues, MYCNF_REPLSLAVE)
def remove_replication_source_overrides(self):
self._remove_replication_overrides(MYCNF_REPLMASTER)
def remove_replication_replica_overrides(self):
self._remove_replication_overrides(MYCNF_REPLSLAVE)
def grant_replication_privilege(self, replication_user):
LOG.info(_("Granting Replication Slave privilege."))
LOG.debug("grant_replication_privilege: %s" % replication_user)
with LocalSqlClient(get_engine()) as client:
g = sql_query.Grant(permissions=['REPLICATION SLAVE'],
user=replication_user['name'],
clear=replication_user['password'])
t = text(str(g))
client.execute(t)
def get_port(self):
with LocalSqlClient(get_engine()) as client:
result = client.execute('SELECT @@port').first()
return result[0]
def get_binlog_position(self):
with LocalSqlClient(get_engine()) as client:
result = client.execute('SHOW MASTER STATUS').first()
binlog_position = {
'log_file': result['File'],
'position': result['Position']
}
return binlog_position
def execute_on_client(self, sql_statement):
LOG.debug("Executing SQL: %s" % sql_statement)
with LocalSqlClient(get_engine()) as client:
return client.execute(sql_statement)
def start_slave(self):
LOG.info(_("Starting slave replication."))
with LocalSqlClient(get_engine()) as client:
client.execute('START SLAVE')
self._wait_for_slave_status("ON", client, 60)
def stop_slave(self, for_failover):
replication_user = None
LOG.info(_("Stopping slave replication."))
with LocalSqlClient(get_engine()) as client:
result = client.execute('SHOW SLAVE STATUS')
replication_user = result.first()['Master_User']
client.execute('STOP SLAVE')
client.execute('RESET SLAVE ALL')
self._wait_for_slave_status("OFF", client, 30)
if not for_failover:
client.execute('DROP USER ' + replication_user)
return {
'replication_user': replication_user
}
def stop_master(self):
LOG.info(_("Stopping replication master."))
with LocalSqlClient(get_engine()) as client:
client.execute('RESET MASTER')
def _wait_for_slave_status(self, status, client, max_time):
def verify_slave_status():
actual_status = client.execute(
"SHOW GLOBAL STATUS like 'slave_running'").first()[1]
return actual_status.upper() == status.upper()
LOG.debug("Waiting for SLAVE_RUNNING to change to %s.", status)
try:
utils.poll_until(verify_slave_status, sleep_time=3,
time_out=max_time)
LOG.info(_("Replication is now %s.") % status.lower())
except PollTimeOut:
raise RuntimeError(
_("Replication is not %(status)s after %(max)d seconds.") % {
'status': status.lower(), 'max': max_time})
def start_mysql(self, update_db=False):
LOG.info(_("Starting MySQL."))
# This is the site of all the trouble in the restart tests.
# Essentially what happens is that mysql start fails, but does not
# die. It is then impossible to kill the original, so
self._enable_mysql_on_boot()
try:
mysql_service = operating_system.service_discovery(
MYSQL_SERVICE_CANDIDATES)
utils.execute_with_timeout(mysql_service['cmd_start'], shell=True)
except KeyError:
raise RuntimeError("Service is not discovered.")
except exception.ProcessExecutionError:
# it seems mysql (percona, at least) might come back with [Fail]
# but actually come up ok. we're looking into the timing issue on
# parallel, but for now, we'd like to give it one more chance to
# come up. so regardless of the execute_with_timeout() response,
# we'll assume mysql comes up and check it's status for a while.
pass
if not self.status.wait_for_real_status_to_change_to(
rd_instance.ServiceStatuses.RUNNING,
self.state_change_wait_time, update_db):
LOG.error(_("Start up of MySQL failed."))
# If it won't start, but won't die either, kill it by hand so we
# don't let a rouge process wander around.
try:
utils.execute_with_timeout("sudo", "pkill", "-9", "mysql")
except exception.ProcessExecutionError:
LOG.exception(_("Error killing stalled MySQL start command."))
# There's nothing more we can do...
self.status.end_install_or_restart()
raise RuntimeError("Could not start MySQL!")
def start_db_with_conf_changes(self, config_contents):
LOG.info(_("Starting MySQL with conf changes."))
LOG.debug("Inside the guest - Status is_running = (%s)."
% self.status.is_running)
if self.status.is_running:
LOG.error(_("Cannot execute start_db_with_conf_changes because "
"MySQL state == %s.") % self.status)
raise RuntimeError("MySQL not stopped.")
LOG.info(_("Resetting configuration."))
self._write_mycnf(None, config_contents)
self.start_mysql(True)
def reset_configuration(self, configuration):
config_contents = configuration['config_contents']
LOG.info(_("Resetting configuration."))
self._write_mycnf(None, config_contents)
# DEPRECATED: Mantain for API Compatibility
def get_txn_count(self):
LOG.info(_("Retrieving latest txn id."))
txn_count = 0
with LocalSqlClient(get_engine()) as client:
result = client.execute('SELECT @@global.gtid_executed').first()
for uuid_set in result[0].split(','):
for interval in uuid_set.split(':')[1:]:
if '-' in interval:
iparts = interval.split('-')
txn_count += int(iparts[1]) - int(iparts[0])
else:
txn_count += 1
return txn_count
def _get_slave_status(self):
with LocalSqlClient(get_engine()) as client:
return client.execute('SHOW SLAVE STATUS').first()
def _get_master_UUID(self):
slave_status = self._get_slave_status()
return slave_status and slave_status['Master_UUID'] or None
def _get_gtid_executed(self):
with LocalSqlClient(get_engine()) as client:
return client.execute('SELECT @@global.gtid_executed').first()[0]
def get_last_txn(self):
master_UUID = self._get_master_UUID()
last_txn_id = '0'
gtid_executed = self._get_gtid_executed()
for gtid_set in gtid_executed.split(','):
uuid_set = gtid_set.split(':')
if uuid_set[0] == master_UUID:
last_txn_id = uuid_set[-1].split('-')[-1]
break
return master_UUID, int(last_txn_id)
def get_latest_txn_id(self):
LOG.info(_("Retrieving latest txn id."))
return self._get_gtid_executed()
def wait_for_txn(self, txn):
LOG.info(_("Waiting on txn '%s'.") % txn)
with LocalSqlClient(get_engine()) as client:
client.execute("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('%s')"
% txn)
class MySqlRootAccess(object):
@classmethod
def is_root_enabled(cls):
"""Return True if root access is enabled; False otherwise."""
with LocalSqlClient(get_engine()) as client:
t = text(sql_query.ROOT_ENABLED)
result = client.execute(t)
LOG.debug("Found %s with remote root access." % result.rowcount)
return result.rowcount != 0
@classmethod
def enable_root(cls, root_password=None):
"""Enable the root user global access and/or
reset the root password.
"""
user = models.RootUser()
user.name = "root"
user.host = "%"
user.password = root_password or utils.generate_random_password()
with LocalSqlClient(get_engine()) as client:
print(client)
try:
cu = sql_query.CreateUser(user.name, host=user.host)
t = text(str(cu))
client.execute(t, **cu.keyArgs)
except exc.OperationalError as err:
# Ignore, user is already created, just reset the password
# TODO(rnirmal): More fine grained error checking later on
LOG.debug(err)
with LocalSqlClient(get_engine()) as client:
print(client)
uu = sql_query.UpdateUser(user.name, host=user.host,
clear=user.password)
t = text(str(uu))
client.execute(t)
LOG.debug("CONF.root_grant: %s CONF.root_grant_option: %s." %
(CONF.root_grant, CONF.root_grant_option))
g = sql_query.Grant(permissions=CONF.root_grant,
user=user.name,
host=user.host,
grant_option=CONF.root_grant_option,
clear=user.password)
t = text(str(g))
client.execute(t)
return user.serialize()
|
apache-2.0
| 5,361,696,649,788,965,000
| 39.263352
| 79
| 0.55196
| false
| 4.247717
| true
| false
| false
|
sigma-random/pwndbg
|
pwndbg/commands/nearpc.py
|
1
|
2744
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from capstone import *
import pwndbg.arguments
import pwndbg.color
import pwndbg.disasm
import pwndbg.disasm.color
import pwndbg.functions
import pwndbg.ida
import pwndbg.regs
import pwndbg.strings
import pwndbg.symbol
import pwndbg.ui
import pwndbg.vmmap
@pwndbg.commands.ParsedCommand
@pwndbg.commands.OnlyWhenRunning
def nearpc(pc=None, lines=None, to_string=False):
"""
Disassemble near a specified address.
"""
# Fix the case where we only have one argument, and
# it's a small value.
if lines is None and (pc is None or int(pc) < 0x100):
lines = pc
pc = None
if pc is None:
pc = pwndbg.regs.pc
if lines is None:
lines = 5
pc = int(pc)
lines = int(lines)
result = []
instructions = pwndbg.disasm.near(pc, lines)
# In case $pc is in a new map we don't know about,
# this will trigger an exploratory search.
pwndbg.vmmap.find(pc)
# Find all of the symbols for the addresses
symbols = []
for i in instructions:
symbol = pwndbg.symbol.get(i.address)
if symbol:
symbol = '<%s> ' % symbol
symbols.append(symbol)
# Find the longest symbol name so we can adjust
if symbols:
longest_sym = max(map(len, symbols))
else:
longest_sym = ''
# Pad them all out
for i,s in enumerate(symbols):
symbols[i] = s.ljust(longest_sym)
prev = None
# Print out each instruction
for i,s in zip(instructions, symbols):
asm = pwndbg.disasm.color.instruction(i)
prefix = ' =>' if i.address == pc else ' '
pre = pwndbg.ida.Anterior(i.address)
if pre:
result.append(pwndbg.color.bold(pre))
line = ' '.join((prefix, "%#x" % i.address, s or '', asm))
# If there was a branch before this instruction which was not
# contiguous, put in some ellipses.
if prev and prev.address + prev.size != i.address:
result.append('...')
# Otherwise if it's a branch and it *is* contiguous, just put
# and empty line.
elif prev and any(g in prev.groups for g in (CS_GRP_CALL, CS_GRP_JUMP, CS_GRP_RET)):
result.append('')
result.append(line)
# For call instructions, attempt to resolve the target and
# determine the number of arguments.
for arg, value in pwndbg.arguments.get(i):
code = False if arg.type == 'char' else True
pretty = pwndbg.chain.format(value, code=code)
result.append('%8s%-10s %s' % ('',arg.name+':', pretty))
prev = i
if not to_string:
print('\n'.join(result))
return result
|
mit
| 3,652,252,772,521,421,000
| 25.901961
| 92
| 0.603863
| false
| 3.522465
| false
| false
| false
|
eLRuLL/scrapy
|
tests/mockserver.py
|
1
|
7989
|
import json
import os
import random
import sys
from subprocess import Popen, PIPE
from urllib.parse import urlencode
from OpenSSL import SSL
from twisted.web.server import Site, NOT_DONE_YET
from twisted.web.resource import Resource
from twisted.web.static import File
from twisted.web.test.test_webclient import PayloadResource
from twisted.web.server import GzipEncoderFactory
from twisted.web.resource import EncodingResourceWrapper
from twisted.web.util import redirectTo
from twisted.internet import reactor, ssl
from twisted.internet.task import deferLater
from scrapy.utils.python import to_bytes, to_unicode
from scrapy.utils.ssl import SSL_OP_NO_TLSv1_3
def getarg(request, name, default=None, type=None):
if name in request.args:
value = request.args[name][0]
if type is not None:
value = type(value)
return value
else:
return default
class LeafResource(Resource):
isLeaf = True
def deferRequest(self, request, delay, f, *a, **kw):
def _cancelrequest(_):
# silence CancelledError
d.addErrback(lambda _: None)
d.cancel()
d = deferLater(reactor, delay, f, *a, **kw)
request.notifyFinish().addErrback(_cancelrequest)
return d
class Follow(LeafResource):
def render(self, request):
total = getarg(request, b"total", 100, type=int)
show = getarg(request, b"show", 1, type=int)
order = getarg(request, b"order", b"desc")
maxlatency = getarg(request, b"maxlatency", 0, type=float)
n = getarg(request, b"n", total, type=int)
if order == b"rand":
nlist = [random.randint(1, total) for _ in range(show)]
else: # order == "desc"
nlist = range(n, max(n - show, 0), -1)
lag = random.random() * maxlatency
self.deferRequest(request, lag, self.renderRequest, request, nlist)
return NOT_DONE_YET
def renderRequest(self, request, nlist):
s = """<html> <head></head> <body>"""
args = request.args.copy()
for nl in nlist:
args[b"n"] = [to_bytes(str(nl))]
argstr = urlencode(args, doseq=True)
s += "<a href='/follow?%s'>follow %d</a><br>" % (argstr, nl)
s += """</body>"""
request.write(to_bytes(s))
request.finish()
class Delay(LeafResource):
def render_GET(self, request):
n = getarg(request, b"n", 1, type=float)
b = getarg(request, b"b", 1, type=int)
if b:
# send headers now and delay body
request.write('')
self.deferRequest(request, n, self._delayedRender, request, n)
return NOT_DONE_YET
def _delayedRender(self, request, n):
request.write(to_bytes("Response delayed for %0.3f seconds\n" % n))
request.finish()
class Status(LeafResource):
def render_GET(self, request):
n = getarg(request, b"n", 200, type=int)
request.setResponseCode(n)
return b""
class Raw(LeafResource):
def render_GET(self, request):
request.startedWriting = 1
self.deferRequest(request, 0, self._delayedRender, request)
return NOT_DONE_YET
render_POST = render_GET
def _delayedRender(self, request):
raw = getarg(request, b'raw', b'HTTP 1.1 200 OK\n')
request.startedWriting = 1
request.write(raw)
request.channel.transport.loseConnection()
request.finish()
class Echo(LeafResource):
def render_GET(self, request):
output = {
'headers': dict(
(to_unicode(k), [to_unicode(v) for v in vs])
for k, vs in request.requestHeaders.getAllRawHeaders()),
'body': to_unicode(request.content.read()),
}
return to_bytes(json.dumps(output))
render_POST = render_GET
class RedirectTo(LeafResource):
def render(self, request):
goto = getarg(request, b'goto', b'/')
# we force the body content, otherwise Twisted redirectTo()
# returns HTML with <meta http-equiv="refresh"
redirectTo(goto, request)
return b'redirecting...'
class Partial(LeafResource):
def render_GET(self, request):
request.setHeader(b"Content-Length", b"1024")
self.deferRequest(request, 0, self._delayedRender, request)
return NOT_DONE_YET
def _delayedRender(self, request):
request.write(b"partial content\n")
request.finish()
class Drop(Partial):
def _delayedRender(self, request):
abort = getarg(request, b"abort", 0, type=int)
request.write(b"this connection will be dropped\n")
tr = request.channel.transport
try:
if abort and hasattr(tr, 'abortConnection'):
tr.abortConnection()
else:
tr.loseConnection()
finally:
request.finish()
class ArbitraryLengthPayloadResource(LeafResource):
def render(self, request):
return request.content.read()
class Root(Resource):
def __init__(self):
Resource.__init__(self)
self.putChild(b"status", Status())
self.putChild(b"follow", Follow())
self.putChild(b"delay", Delay())
self.putChild(b"partial", Partial())
self.putChild(b"drop", Drop())
self.putChild(b"raw", Raw())
self.putChild(b"echo", Echo())
self.putChild(b"payload", PayloadResource())
self.putChild(b"xpayload", EncodingResourceWrapper(PayloadResource(), [GzipEncoderFactory()]))
self.putChild(b"alpayload", ArbitraryLengthPayloadResource())
try:
from tests import tests_datadir
self.putChild(b"files", File(os.path.join(tests_datadir, 'test_site/files/')))
except Exception:
pass
self.putChild(b"redirect-to", RedirectTo())
def getChild(self, name, request):
return self
def render(self, request):
return b'Scrapy mock HTTP server\n'
class MockServer():
def __enter__(self):
from scrapy.utils.test import get_testenv
self.proc = Popen([sys.executable, '-u', '-m', 'tests.mockserver'],
stdout=PIPE, env=get_testenv())
http_address = self.proc.stdout.readline().strip().decode('ascii')
https_address = self.proc.stdout.readline().strip().decode('ascii')
self.http_address = http_address
self.https_address = https_address
return self
def __exit__(self, exc_type, exc_value, traceback):
self.proc.kill()
self.proc.communicate()
def url(self, path, is_secure=False):
host = self.http_address.replace('0.0.0.0', '127.0.0.1')
if is_secure:
host = self.https_address
return host + path
def ssl_context_factory(keyfile='keys/localhost.key', certfile='keys/localhost.crt', cipher_string=None):
factory = ssl.DefaultOpenSSLContextFactory(
os.path.join(os.path.dirname(__file__), keyfile),
os.path.join(os.path.dirname(__file__), certfile),
)
if cipher_string:
ctx = factory.getContext()
# disabling TLS1.2+ because it unconditionally enables some strong ciphers
ctx.set_options(SSL.OP_CIPHER_SERVER_PREFERENCE | SSL.OP_NO_TLSv1_2 | SSL_OP_NO_TLSv1_3)
ctx.set_cipher_list(to_bytes(cipher_string))
return factory
if __name__ == "__main__":
root = Root()
factory = Site(root)
httpPort = reactor.listenTCP(0, factory)
contextFactory = ssl_context_factory()
httpsPort = reactor.listenSSL(0, factory, contextFactory)
def print_listening():
httpHost = httpPort.getHost()
httpsHost = httpsPort.getHost()
httpAddress = 'http://%s:%d' % (httpHost.host, httpHost.port)
httpsAddress = 'https://%s:%d' % (httpsHost.host, httpsHost.port)
print(httpAddress)
print(httpsAddress)
reactor.callWhenRunning(print_listening)
reactor.run()
|
bsd-3-clause
| 8,276,427,149,703,918,000
| 30.207031
| 105
| 0.619477
| false
| 3.740169
| true
| false
| false
|
dstufft/fastly-py
|
tests/test_core.py
|
1
|
1153
|
# Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from fastly.auth import KeyAuth, SessionAuth
from fastly.core import Fastly
def test_fastly_key():
api = Fastly("1234")
assert isinstance(api._session.auth, KeyAuth)
assert api._session.auth.key == "1234"
def test_fastly_session():
api = Fastly("test@example.com", "password")
assert isinstance(api._session.auth, SessionAuth)
assert api._session.auth.user == "test@example.com"
assert api._session.auth.password == "password"
assert api._session.auth.session is api._session
|
apache-2.0
| -7,275,991,855,456,668,000
| 33.939394
| 74
| 0.740676
| false
| 3.830565
| false
| false
| false
|
zuun77/givemegoogletshirts
|
codejam/2019/1B/q1.py
|
1
|
1278
|
import collections
def solve(case, P, Q, people):
ver, hor = {}, {}
s, n, e, w = 0, 0, 0, 0
for p in people:
x, y, d = p
if d == 'S':
if y in ver: ver[y] = (ver[y][0]+1, ver[y][1])
else: ver[y] = (1, 0)
s += 1
elif d == 'N':
if y in ver: ver[y] = (ver[y][0], ver[y][1]+1)
else: ver[y] = (0, 1)
n += 1
elif d == 'W':
if x in hor: hor[x] = (hor[x][0]+1, hor[x][1])
else: hor[x] = (1, 0)
e += 1
else:
if x in hor: hor[x] = (hor[x][0], hor[x][1]+1)
else: hor[x] = (0, 1)
w += 1
X, Y = getMaxCord(w, hor), getMaxCord(s, ver)
print("Case #{}: {} {}".format(case, X, Y))
def getMaxCord(n, dic):
X, maxV = 0, n
wcnt, ecnt = n, 0
for i in range(100001):
if i in dic: wcnt -= dic[i][0]
cnt = wcnt + ecnt
if cnt > maxV:
X = i
maxV = cnt
if i in dic: ecnt += dic[i][1]
return X
for case in range(1, eval(input()) + 1):
P, Q = map(int, input().split())
people = []
for i in range(P):
p = input().split()
people.append((int(p[0]), int(p[1]), p[2]))
solve(case, P, Q, people)
|
apache-2.0
| -4,067,940,558,325,939,000
| 25.625
| 58
| 0.400626
| false
| 2.690526
| false
| false
| false
|
jimbotonic/df_nlp
|
step1/prepare_data2.py
|
1
|
2591
|
#
# This file is part of DF.
#
# DF is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation;
# either version 3 of the License, or (at your option) any
# later version.
#
# Latassan is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public
# License along with DF; see the file COPYING. If not
# see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2014-2019 Jimmy Dubuisson <jimmy.dubuisson@gmail.com>
#
from __future__ import division
from utils import *
from lemmatizer import *
from igraph import Graph
from numpy import dot
import cPickle as pickle
if __name__ == '__main__':
rgx = '\w+'
#punct = '\',.!?'
min_length = 3
# min number of occurences
min_occ = 3
# max frequency (between 0 and 1)
max_freq = 1
# min number of tokens
min_size = 100
# max number of tokens
max_size = 1000
# folder path
data_dir = sys.argv[1]
pickle_dir1 = sys.argv[2]
pickle_dir2 = sys.argv[3]
# collocation metrics instance to be used
#cmetrics = CollocationMetrics(CollocationMetrics.decreasing_exp,(1,1),CollocationMetrics.do_nothing,())
cmetrics = CollocationMetrics(CollocationMetrics.decreasing_exp,(1,1),CollocationMetrics.information,())
# batch replace arrays
vold = ['</?blog>','</?Blog>','</?post>','<date>.*</date>','nbsp','urlLink']
vnew = ['','','','','','']
fnames = FileUtils.get_files_list(data_dir)
counter = 1
max_count = 2000
success_count = 0
for p in fnames:
if success_count == max_count:
break
print counter, '- Tokenizing: ', p
counter += 1
txt = FileUtils.read_text_file(data_dir + '/' + p)
txt = FileUtils.batch_replace(txt,vold,vnew)
doc = DocStats(txt, rgx, min_length, min_occ, max_freq, cmetrics)
print '# tokens: ', len(doc.token_set)
if len(doc.token_set) >= min_size and len(doc.token_set) <= max_size:
mat = doc.get_collocation_mat()
print '# rows: ', mat.dim
print '# nnz entries: ', mat.vmat.nnz
if mat:
success_count += 1
pickle.dump(doc.token_stats, open(pickle_dir1 + '/' + p.replace('.xml','') + ".p", "wb"), pickle.HIGHEST_PROTOCOL)
pickle.dump(mat, open(pickle_dir2 + '/' + p.replace('.xml','') + ".p", "wb"), pickle.HIGHEST_PROTOCOL)
print '---'
|
gpl-2.0
| 8,706,524,835,819,565,000
| 33.092105
| 118
| 0.650714
| false
| 3.159756
| false
| false
| false
|
renesugar/arrow
|
python/pyarrow/pandas_compat.py
|
1
|
37193
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import ast
import json
import operator
import re
import warnings
import numpy as np
import six
import pyarrow as pa
from pyarrow.lib import _pandas_api
from pyarrow.compat import (builtin_pickle, # noqa
PY2, zip_longest, Sequence, u_utf8)
_logical_type_map = {}
def get_logical_type_map():
global _logical_type_map
if not _logical_type_map:
_logical_type_map.update({
pa.lib.Type_NA: 'empty',
pa.lib.Type_BOOL: 'bool',
pa.lib.Type_INT8: 'int8',
pa.lib.Type_INT16: 'int16',
pa.lib.Type_INT32: 'int32',
pa.lib.Type_INT64: 'int64',
pa.lib.Type_UINT8: 'uint8',
pa.lib.Type_UINT16: 'uint16',
pa.lib.Type_UINT32: 'uint32',
pa.lib.Type_UINT64: 'uint64',
pa.lib.Type_HALF_FLOAT: 'float16',
pa.lib.Type_FLOAT: 'float32',
pa.lib.Type_DOUBLE: 'float64',
pa.lib.Type_DATE32: 'date',
pa.lib.Type_DATE64: 'date',
pa.lib.Type_TIME32: 'time',
pa.lib.Type_TIME64: 'time',
pa.lib.Type_BINARY: 'bytes',
pa.lib.Type_FIXED_SIZE_BINARY: 'bytes',
pa.lib.Type_STRING: 'unicode',
})
return _logical_type_map
def get_logical_type(arrow_type):
logical_type_map = get_logical_type_map()
try:
return logical_type_map[arrow_type.id]
except KeyError:
if isinstance(arrow_type, pa.lib.DictionaryType):
return 'categorical'
elif isinstance(arrow_type, pa.lib.ListType):
return 'list[{}]'.format(get_logical_type(arrow_type.value_type))
elif isinstance(arrow_type, pa.lib.TimestampType):
return 'datetimetz' if arrow_type.tz is not None else 'datetime'
elif isinstance(arrow_type, pa.lib.Decimal128Type):
return 'decimal'
return 'object'
_numpy_logical_type_map = {
np.bool_: 'bool',
np.int8: 'int8',
np.int16: 'int16',
np.int32: 'int32',
np.int64: 'int64',
np.uint8: 'uint8',
np.uint16: 'uint16',
np.uint32: 'uint32',
np.uint64: 'uint64',
np.float32: 'float32',
np.float64: 'float64',
'datetime64[D]': 'date',
np.unicode_: 'string' if not PY2 else 'unicode',
np.bytes_: 'bytes' if not PY2 else 'string',
}
def get_logical_type_from_numpy(pandas_collection):
try:
return _numpy_logical_type_map[pandas_collection.dtype.type]
except KeyError:
if hasattr(pandas_collection.dtype, 'tz'):
return 'datetimetz'
# See https://github.com/pandas-dev/pandas/issues/24739
if str(pandas_collection.dtype) == 'datetime64[ns]':
return 'datetime64[ns]'
result = _pandas_api.infer_dtype(pandas_collection)
if result == 'string':
return 'bytes' if PY2 else 'unicode'
return result
def get_extension_dtype_info(column):
dtype = column.dtype
if str(dtype) == 'category':
cats = getattr(column, 'cat', column)
assert cats is not None
metadata = {
'num_categories': len(cats.categories),
'ordered': cats.ordered,
}
physical_dtype = str(cats.codes.dtype)
elif hasattr(dtype, 'tz'):
metadata = {'timezone': pa.lib.tzinfo_to_string(dtype.tz)}
physical_dtype = 'datetime64[ns]'
else:
metadata = None
physical_dtype = str(dtype)
return physical_dtype, metadata
def get_column_metadata(column, name, arrow_type, field_name):
"""Construct the metadata for a given column
Parameters
----------
column : pandas.Series or pandas.Index
name : str
arrow_type : pyarrow.DataType
field_name : str
Equivalent to `name` when `column` is a `Series`, otherwise if `column`
is a pandas Index then `field_name` will not be the same as `name`.
This is the name of the field in the arrow Table's schema.
Returns
-------
dict
"""
logical_type = get_logical_type(arrow_type)
string_dtype, extra_metadata = get_extension_dtype_info(column)
if logical_type == 'decimal':
extra_metadata = {
'precision': arrow_type.precision,
'scale': arrow_type.scale,
}
string_dtype = 'object'
if name is not None and not isinstance(name, six.string_types):
raise TypeError(
'Column name must be a string. Got column {} of type {}'.format(
name, type(name).__name__
)
)
assert field_name is None or isinstance(field_name, six.string_types), \
str(type(field_name))
return {
'name': name,
'field_name': 'None' if field_name is None else field_name,
'pandas_type': logical_type,
'numpy_type': string_dtype,
'metadata': extra_metadata,
}
def construct_metadata(df, column_names, index_levels, index_descriptors,
preserve_index, types):
"""Returns a dictionary containing enough metadata to reconstruct a pandas
DataFrame as an Arrow Table, including index columns.
Parameters
----------
df : pandas.DataFrame
index_levels : List[pd.Index]
index_descriptors : List[Dict]
preserve_index : bool
types : List[pyarrow.DataType]
Returns
-------
dict
"""
num_serialized_index_levels = len([descr for descr in index_descriptors
if not isinstance(descr, dict)])
# Use ntypes instead of Python shorthand notation [:-len(x)] as [:-0]
# behaves differently to what we want.
ntypes = len(types)
df_types = types[:ntypes - num_serialized_index_levels]
index_types = types[ntypes - num_serialized_index_levels:]
column_metadata = []
for col_name, sanitized_name, arrow_type in zip(df.columns, column_names,
df_types):
metadata = get_column_metadata(df[col_name], name=sanitized_name,
arrow_type=arrow_type,
field_name=sanitized_name)
column_metadata.append(metadata)
index_column_metadata = []
if preserve_index is not False:
for level, arrow_type, descriptor in zip(index_levels, index_types,
index_descriptors):
if isinstance(descriptor, dict):
# The index is represented in a non-serialized fashion,
# e.g. RangeIndex
continue
metadata = get_column_metadata(level, name=level.name,
arrow_type=arrow_type,
field_name=descriptor)
index_column_metadata.append(metadata)
column_indexes = []
levels = getattr(df.columns, 'levels', [df.columns])
names = getattr(df.columns, 'names', [df.columns.name])
for level, name in zip(levels, names):
metadata = _get_simple_index_descriptor(level, name)
column_indexes.append(metadata)
else:
index_descriptors = index_column_metadata = column_indexes = []
return {
b'pandas': json.dumps({
'index_columns': index_descriptors,
'column_indexes': column_indexes,
'columns': column_metadata + index_column_metadata,
'creator': {
'library': 'pyarrow',
'version': pa.__version__
},
'pandas_version': _pandas_api.version
}).encode('utf8')
}
def _get_simple_index_descriptor(level, name):
string_dtype, extra_metadata = get_extension_dtype_info(level)
pandas_type = get_logical_type_from_numpy(level)
if 'mixed' in pandas_type:
warnings.warn(
"The DataFrame has column names of mixed type. They will be "
"converted to strings and not roundtrip correctly.",
UserWarning, stacklevel=4)
if pandas_type == 'unicode':
assert not extra_metadata
extra_metadata = {'encoding': 'UTF-8'}
return {
'name': name,
'field_name': name,
'pandas_type': pandas_type,
'numpy_type': string_dtype,
'metadata': extra_metadata,
}
def _column_name_to_strings(name):
"""Convert a column name (or level) to either a string or a recursive
collection of strings.
Parameters
----------
name : str or tuple
Returns
-------
value : str or tuple
Examples
--------
>>> name = 'foo'
>>> _column_name_to_strings(name)
'foo'
>>> name = ('foo', 'bar')
>>> _column_name_to_strings(name)
('foo', 'bar')
>>> import pandas as pd
>>> name = (1, pd.Timestamp('2017-02-01 00:00:00'))
>>> _column_name_to_strings(name)
('1', '2017-02-01 00:00:00')
"""
if isinstance(name, six.string_types):
return name
elif isinstance(name, six.binary_type):
# XXX: should we assume that bytes in Python 3 are UTF-8?
return name.decode('utf8')
elif isinstance(name, tuple):
return str(tuple(map(_column_name_to_strings, name)))
elif isinstance(name, Sequence):
raise TypeError("Unsupported type for MultiIndex level")
elif name is None:
return None
return str(name)
def _index_level_name(index, i, column_names):
"""Return the name of an index level or a default name if `index.name` is
None or is already a column name.
Parameters
----------
index : pandas.Index
i : int
Returns
-------
name : str
"""
if index.name is not None and index.name not in column_names:
return index.name
else:
return '__index_level_{:d}__'.format(i)
def _get_columns_to_convert(df, schema, preserve_index, columns):
columns = _resolve_columns_of_interest(df, schema, columns)
if not df.columns.is_unique:
raise ValueError(
'Duplicate column names found: {}'.format(list(df.columns))
)
if schema is not None:
return _get_columns_to_convert_given_schema(df, schema, preserve_index)
column_names = []
index_levels = (
_get_index_level_values(df.index) if preserve_index is not False
else []
)
columns_to_convert = []
convert_fields = []
for name in columns:
col = df[name]
name = _column_name_to_strings(name)
if _pandas_api.is_sparse(col):
raise TypeError(
"Sparse pandas data (column {}) not supported.".format(name))
columns_to_convert.append(col)
convert_fields.append(None)
column_names.append(name)
index_descriptors = []
index_column_names = []
for i, index_level in enumerate(index_levels):
name = _index_level_name(index_level, i, column_names)
if (isinstance(index_level, _pandas_api.pd.RangeIndex)
and preserve_index is None):
descr = _get_range_index_descriptor(index_level)
else:
columns_to_convert.append(index_level)
convert_fields.append(None)
descr = name
index_column_names.append(name)
index_descriptors.append(descr)
all_names = column_names + index_column_names
# all_names : all of the columns in the resulting table including the data
# columns and serialized index columns
# column_names : the names of the data columns
# index_column_names : the names of the serialized index columns
# index_descriptors : descriptions of each index to be used for
# reconstruction
# index_levels : the extracted index level values
# columns_to_convert : assembled raw data (both data columns and indexes)
# to be converted to Arrow format
# columns_fields : specified column to use for coercion / casting
# during serialization, if a Schema was provided
return (all_names, column_names, index_column_names, index_descriptors,
index_levels, columns_to_convert, convert_fields)
def _get_columns_to_convert_given_schema(df, schema, preserve_index):
"""
Specialized version of _get_columns_to_convert in case a Schema is
specified.
In that case, the Schema is used as the single point of truth for the
table structure (types, which columns are included, order of columns, ...).
"""
column_names = []
columns_to_convert = []
convert_fields = []
index_descriptors = []
index_column_names = []
index_levels = []
for name in schema.names:
try:
col = df[name]
is_index = False
except KeyError:
if preserve_index is not False and name in df.index.names:
col = df.index.get_level_values(name)
if (preserve_index is None and
isinstance(col, _pandas_api.pd.RangeIndex)):
raise ValueError(
"name '{}' is present in the schema, but it is a "
"RangeIndex which will not be converted as a column "
"in the Table, but saved as metadata-only not in "
"columns. Specify 'preserve_index=True' to force it "
"being added as a column, or remove it from the "
"specified schema".format(name))
is_index = True
else:
raise KeyError(
"name '{}' present in the specified schema is not found "
"in the columns or index".format(name))
name = _column_name_to_strings(name)
if _pandas_api.is_sparse(col):
raise TypeError(
"Sparse pandas data (column {}) not supported.".format(name))
field = schema.field(name)
columns_to_convert.append(col)
convert_fields.append(field)
column_names.append(name)
if is_index:
index_column_names.append(name)
index_descriptors.append(name)
index_levels.append(col)
all_names = column_names + index_column_names
return (all_names, column_names, index_column_names, index_descriptors,
index_levels, columns_to_convert, convert_fields)
def _get_range_index_descriptor(level):
# public start/stop/step attributes added in pandas 0.25.0
return {
'kind': 'range',
'name': level.name,
'start': _pandas_api.get_rangeindex_attribute(level, 'start'),
'stop': _pandas_api.get_rangeindex_attribute(level, 'stop'),
'step': _pandas_api.get_rangeindex_attribute(level, 'step')
}
def _get_index_level_values(index):
n = len(getattr(index, 'levels', [index]))
return [index.get_level_values(i) for i in range(n)]
def _resolve_columns_of_interest(df, schema, columns):
if schema is not None and columns is not None:
raise ValueError('Schema and columns arguments are mutually '
'exclusive, pass only one of them')
elif schema is not None:
columns = schema.names
elif columns is not None:
columns = [c for c in columns if c in df.columns]
else:
columns = df.columns
return columns
def dataframe_to_types(df, preserve_index, columns=None):
(all_names,
column_names,
_,
index_descriptors,
index_columns,
columns_to_convert,
_) = _get_columns_to_convert(df, None, preserve_index, columns)
types = []
# If pandas knows type, skip conversion
for c in columns_to_convert:
values = c.values
if _pandas_api.is_categorical(values):
type_ = pa.array(c, from_pandas=True).type
else:
values, type_ = get_datetimetz_type(values, c.dtype, None)
type_ = pa.lib._ndarray_to_arrow_type(values, type_)
if type_ is None:
type_ = pa.array(c, from_pandas=True).type
types.append(type_)
metadata = construct_metadata(df, column_names, index_columns,
index_descriptors, preserve_index, types)
return all_names, types, metadata
def dataframe_to_arrays(df, schema, preserve_index, nthreads=1, columns=None,
safe=True):
(all_names,
column_names,
index_column_names,
index_descriptors,
index_columns,
columns_to_convert,
convert_fields) = _get_columns_to_convert(df, schema, preserve_index,
columns)
# NOTE(wesm): If nthreads=None, then we use a heuristic to decide whether
# using a thread pool is worth it. Currently the heuristic is whether the
# nrows > 100 * ncols.
if nthreads is None:
nrows, ncols = len(df), len(df.columns)
if nrows > ncols * 100:
nthreads = pa.cpu_count()
else:
nthreads = 1
def convert_column(col, field):
if field is None:
field_nullable = True
type_ = None
else:
field_nullable = field.nullable
type_ = field.type
try:
result = pa.array(col, type=type_, from_pandas=True, safe=safe)
except (pa.ArrowInvalid,
pa.ArrowNotImplementedError,
pa.ArrowTypeError) as e:
e.args += ("Conversion failed for column {0!s} with type {1!s}"
.format(col.name, col.dtype),)
raise e
if not field_nullable and result.null_count > 0:
raise ValueError("Field {} was non-nullable but pandas column "
"had {} null values".format(str(field),
result.null_count))
return result
if nthreads == 1:
arrays = [convert_column(c, f)
for c, f in zip(columns_to_convert, convert_fields)]
else:
from concurrent import futures
with futures.ThreadPoolExecutor(nthreads) as executor:
arrays = list(executor.map(convert_column, columns_to_convert,
convert_fields))
types = [x.type for x in arrays]
if schema is None:
fields = []
for name, type_ in zip(all_names, types):
name = name if name is not None else 'None'
fields.append(pa.field(name, type_))
schema = pa.schema(fields)
metadata = construct_metadata(df, column_names, index_columns,
index_descriptors, preserve_index,
types)
schema = schema.with_metadata(metadata)
return arrays, schema
def get_datetimetz_type(values, dtype, type_):
if values.dtype.type != np.datetime64:
return values, type_
if _pandas_api.is_datetimetz(dtype) and type_ is None:
# If no user type passed, construct a tz-aware timestamp type
tz = dtype.tz
unit = dtype.unit
type_ = pa.timestamp(unit, tz)
elif type_ is None:
# Trust the NumPy dtype
type_ = pa.from_numpy_dtype(values.dtype)
return values, type_
# ----------------------------------------------------------------------
# Converting pandas.DataFrame to a dict containing only NumPy arrays or other
# objects friendly to pyarrow.serialize
def dataframe_to_serialized_dict(frame):
import pandas.core.internals as _int
block_manager = frame._data
blocks = []
axes = [ax for ax in block_manager.axes]
for block in block_manager.blocks:
values = block.values
block_data = {}
if isinstance(block, _int.DatetimeTZBlock):
block_data['timezone'] = pa.lib.tzinfo_to_string(values.tz)
if hasattr(values, 'values'):
values = values.values
elif isinstance(block, _int.CategoricalBlock):
block_data.update(dictionary=values.categories,
ordered=values.ordered)
values = values.codes
block_data.update(
placement=block.mgr_locs.as_array,
block=values
)
# If we are dealing with an object array, pickle it instead. Note that
# we do not use isinstance here because _int.CategoricalBlock is a
# subclass of _int.ObjectBlock.
if type(block) == _int.ObjectBlock:
block_data['object'] = None
block_data['block'] = builtin_pickle.dumps(
values, protocol=builtin_pickle.HIGHEST_PROTOCOL)
blocks.append(block_data)
return {
'blocks': blocks,
'axes': axes
}
def serialized_dict_to_dataframe(data):
import pandas.core.internals as _int
reconstructed_blocks = [_reconstruct_block(block)
for block in data['blocks']]
block_mgr = _int.BlockManager(reconstructed_blocks, data['axes'])
return _pandas_api.data_frame(block_mgr)
def _reconstruct_block(item):
import pandas.core.internals as _int
# Construct the individual blocks converting dictionary types to pandas
# categorical types and Timestamps-with-timezones types to the proper
# pandas Blocks
block_arr = item.get('block', None)
placement = item['placement']
if 'dictionary' in item:
cat = _pandas_api.categorical_type.from_codes(
block_arr, categories=item['dictionary'],
ordered=item['ordered'])
block = _int.make_block(cat, placement=placement,
klass=_int.CategoricalBlock)
elif 'timezone' in item:
dtype = make_datetimetz(item['timezone'])
block = _int.make_block(block_arr, placement=placement,
klass=_int.DatetimeTZBlock,
dtype=dtype)
elif 'object' in item:
block = _int.make_block(builtin_pickle.loads(block_arr),
placement=placement, klass=_int.ObjectBlock)
elif 'py_array' in item:
arr = item['py_array']
# TODO have mechanism to know a method to create a
# pandas ExtensionArray given the pyarrow type
# Now hardcode here to create a pandas IntegerArray for the example
arr = arr.chunk(0)
buflist = arr.buffers()
data = np.frombuffer(buflist[-1], dtype=arr.type.to_pandas_dtype())[
arr.offset:arr.offset + len(arr)]
bitmask = buflist[0]
if bitmask is not None:
mask = pa.BooleanArray.from_buffers(
pa.bool_(), len(arr), [None, bitmask])
mask = np.asarray(mask)
else:
mask = np.ones(len(arr), dtype=bool)
block_arr = _pandas_api.pd.arrays.IntegerArray(
data.copy(), ~mask, copy=False)
# create ExtensionBlock
block = _int.make_block(block_arr, placement=placement,
klass=_int.ExtensionBlock)
else:
block = _int.make_block(block_arr, placement=placement)
return block
def make_datetimetz(tz):
tz = pa.lib.string_to_tzinfo(tz)
return _pandas_api.datetimetz_type('ns', tz=tz)
# ----------------------------------------------------------------------
# Converting pyarrow.Table efficiently to pandas.DataFrame
def table_to_blockmanager(options, table, categories=None,
extension_columns=None, ignore_metadata=False):
from pandas.core.internals import BlockManager
all_columns = []
column_indexes = []
pandas_metadata = table.schema.pandas_metadata
if not ignore_metadata and pandas_metadata is not None:
all_columns = pandas_metadata['columns']
column_indexes = pandas_metadata.get('column_indexes', [])
index_descriptors = pandas_metadata['index_columns']
table = _add_any_metadata(table, pandas_metadata)
table, index = _reconstruct_index(table, index_descriptors,
all_columns)
else:
index = _pandas_api.pd.RangeIndex(table.num_rows)
_check_data_column_metadata_consistency(all_columns)
blocks = _table_to_blocks(options, table, categories, extension_columns)
columns = _deserialize_column_index(table, all_columns, column_indexes)
axes = [columns, index]
return BlockManager(blocks, axes)
def _check_data_column_metadata_consistency(all_columns):
# It can never be the case in a released version of pyarrow that
# c['name'] is None *and* 'field_name' is not a key in the column metadata,
# because the change to allow c['name'] to be None and the change to add
# 'field_name' are in the same release (0.8.0)
assert all(
(c['name'] is None and 'field_name' in c) or c['name'] is not None
for c in all_columns
)
def _deserialize_column_index(block_table, all_columns, column_indexes):
column_strings = [u_utf8(x) for x in block_table.column_names]
if all_columns:
columns_name_dict = {
c.get('field_name', _column_name_to_strings(c['name'])): c['name']
for c in all_columns
}
columns_values = [
columns_name_dict.get(name, name) for name in column_strings
]
else:
columns_values = column_strings
# If we're passed multiple column indexes then evaluate with
# ast.literal_eval, since the column index values show up as a list of
# tuples
to_pair = ast.literal_eval if len(column_indexes) > 1 else lambda x: (x,)
# Create the column index
# Construct the base index
if not columns_values:
columns = _pandas_api.pd.Index(columns_values)
else:
columns = _pandas_api.pd.MultiIndex.from_tuples(
list(map(to_pair, columns_values)),
names=[col_index['name'] for col_index in column_indexes] or None,
)
# if we're reconstructing the index
if len(column_indexes) > 0:
columns = _reconstruct_columns_from_metadata(columns, column_indexes)
# ARROW-1751: flatten a single level column MultiIndex for pandas 0.21.0
columns = _flatten_single_level_multiindex(columns)
return columns
def _reconstruct_index(table, index_descriptors, all_columns):
# 0. 'field_name' is the name of the column in the arrow Table
# 1. 'name' is the user-facing name of the column, that is, it came from
# pandas
# 2. 'field_name' and 'name' differ for index columns
# 3. We fall back on c['name'] for backwards compatibility
field_name_to_metadata = {
c.get('field_name', c['name']): c
for c in all_columns
}
# Build up a list of index columns and names while removing those columns
# from the original table
index_arrays = []
index_names = []
result_table = table
for descr in index_descriptors:
if isinstance(descr, six.string_types):
result_table, index_level, index_name = _extract_index_level(
table, result_table, descr, field_name_to_metadata)
if index_level is None:
# ARROW-1883: the serialized index column was not found
continue
elif descr['kind'] == 'range':
index_name = descr['name']
index_level = _pandas_api.pd.RangeIndex(descr['start'],
descr['stop'],
step=descr['step'],
name=index_name)
if len(index_level) != len(table):
# Possibly the result of munged metadata
continue
else:
raise ValueError("Unrecognized index kind: {0}"
.format(descr['kind']))
index_arrays.append(index_level)
index_names.append(index_name)
pd = _pandas_api.pd
# Reconstruct the row index
if len(index_arrays) > 1:
index = pd.MultiIndex.from_arrays(index_arrays, names=index_names)
elif len(index_arrays) == 1:
index = index_arrays[0]
if not isinstance(index, pd.Index):
# Box anything that wasn't boxed above
index = pd.Index(index, name=index_names[0])
else:
index = pd.RangeIndex(table.num_rows)
return result_table, index
def _extract_index_level(table, result_table, field_name,
field_name_to_metadata):
logical_name = field_name_to_metadata[field_name]['name']
index_name = _backwards_compatible_index_name(field_name, logical_name)
i = table.schema.get_field_index(field_name)
if i == -1:
# The serialized index column was removed by the user
return table, None, None
pd = _pandas_api.pd
col = table.column(i)
values = col.to_pandas().values
if hasattr(values, 'flags') and not values.flags.writeable:
# ARROW-1054: in pandas 0.19.2, factorize will reject
# non-writeable arrays when calling MultiIndex.from_arrays
values = values.copy()
if isinstance(col.type, pa.lib.TimestampType):
index_level = (pd.Series(values).dt.tz_localize('utc')
.dt.tz_convert(col.type.tz))
else:
index_level = pd.Series(values, dtype=values.dtype)
result_table = result_table.remove_column(
result_table.schema.get_field_index(field_name)
)
return result_table, index_level, index_name
def _backwards_compatible_index_name(raw_name, logical_name):
"""Compute the name of an index column that is compatible with older
versions of :mod:`pyarrow`.
Parameters
----------
raw_name : str
logical_name : str
Returns
-------
result : str
Notes
-----
* Part of :func:`~pyarrow.pandas_compat.table_to_blockmanager`
"""
# Part of table_to_blockmanager
if raw_name == logical_name and _is_generated_index_name(raw_name):
return None
else:
return logical_name
def _is_generated_index_name(name):
pattern = r'^__index_level_\d+__$'
return re.match(pattern, name) is not None
_pandas_logical_type_map = {
'date': 'datetime64[D]',
'datetime': 'datetime64[ns]',
'unicode': np.unicode_,
'bytes': np.bytes_,
'string': np.str_,
'empty': np.object_,
}
def _pandas_type_to_numpy_type(pandas_type):
"""Get the numpy dtype that corresponds to a pandas type.
Parameters
----------
pandas_type : str
The result of a call to pandas.lib.infer_dtype.
Returns
-------
dtype : np.dtype
The dtype that corresponds to `pandas_type`.
"""
try:
return _pandas_logical_type_map[pandas_type]
except KeyError:
if 'mixed' in pandas_type:
# catching 'mixed', 'mixed-integer' and 'mixed-integer-float'
return np.object_
return np.dtype(pandas_type)
def _get_multiindex_codes(mi):
# compat for pandas < 0.24 (MI labels renamed to codes).
if isinstance(mi, _pandas_api.pd.MultiIndex):
return mi.codes if hasattr(mi, 'codes') else mi.labels
else:
return None
def _reconstruct_columns_from_metadata(columns, column_indexes):
"""Construct a pandas MultiIndex from `columns` and column index metadata
in `column_indexes`.
Parameters
----------
columns : List[pd.Index]
The columns coming from a pyarrow.Table
column_indexes : List[Dict[str, str]]
The column index metadata deserialized from the JSON schema metadata
in a :class:`~pyarrow.Table`.
Returns
-------
result : MultiIndex
The index reconstructed using `column_indexes` metadata with levels of
the correct type.
Notes
-----
* Part of :func:`~pyarrow.pandas_compat.table_to_blockmanager`
"""
pd = _pandas_api.pd
# Get levels and labels, and provide sane defaults if the index has a
# single level to avoid if/else spaghetti.
levels = getattr(columns, 'levels', None) or [columns]
labels = _get_multiindex_codes(columns) or [
pd.RangeIndex(len(level)) for level in levels
]
# Convert each level to the dtype provided in the metadata
levels_dtypes = [
(level, col_index.get('pandas_type', str(level.dtype)))
for level, col_index in zip_longest(
levels, column_indexes, fillvalue={}
)
]
new_levels = []
encoder = operator.methodcaller('encode', 'UTF-8')
for level, pandas_dtype in levels_dtypes:
dtype = _pandas_type_to_numpy_type(pandas_dtype)
# Since our metadata is UTF-8 encoded, Python turns things that were
# bytes into unicode strings when json.loads-ing them. We need to
# convert them back to bytes to preserve metadata.
if dtype == np.bytes_:
level = level.map(encoder)
elif level.dtype != dtype:
level = level.astype(dtype)
new_levels.append(level)
return pd.MultiIndex(new_levels, labels, names=columns.names)
def _table_to_blocks(options, block_table, categories, extension_columns):
# Part of table_to_blockmanager
# Convert an arrow table to Block from the internal pandas API
result = pa.lib.table_to_blocks(options, block_table, categories,
extension_columns)
# Defined above
return [_reconstruct_block(item) for item in result]
def _flatten_single_level_multiindex(index):
pd = _pandas_api.pd
if isinstance(index, pd.MultiIndex) and index.nlevels == 1:
levels, = index.levels
labels, = _get_multiindex_codes(index)
# Cheaply check that we do not somehow have duplicate column names
if not index.is_unique:
raise ValueError('Found non-unique column index')
return pd.Index([levels[_label] if _label != -1 else None
for _label in labels],
name=index.names[0])
return index
def _add_any_metadata(table, pandas_metadata):
modified_columns = {}
modified_fields = {}
schema = table.schema
index_columns = pandas_metadata['index_columns']
# only take index columns into account if they are an actual table column
index_columns = [idx_col for idx_col in index_columns
if isinstance(idx_col, six.string_types)]
n_index_levels = len(index_columns)
n_columns = len(pandas_metadata['columns']) - n_index_levels
# Add time zones
for i, col_meta in enumerate(pandas_metadata['columns']):
raw_name = col_meta.get('field_name')
if not raw_name:
# deal with metadata written with arrow < 0.8 or fastparquet
raw_name = col_meta['name']
if i >= n_columns:
# index columns
raw_name = index_columns[i - n_columns]
if raw_name is None:
raw_name = 'None'
idx = schema.get_field_index(raw_name)
if idx != -1:
if col_meta['pandas_type'] == 'datetimetz':
col = table[idx]
converted = col.to_pandas()
tz = col_meta['metadata']['timezone']
tz_aware_type = pa.timestamp('ns', tz=tz)
with_metadata = pa.Array.from_pandas(converted,
type=tz_aware_type)
modified_fields[idx] = pa.field(schema[idx].name,
tz_aware_type)
modified_columns[idx] = with_metadata
if len(modified_columns) > 0:
columns = []
fields = []
for i in range(len(table.schema)):
if i in modified_columns:
columns.append(modified_columns[i])
fields.append(modified_fields[i])
else:
columns.append(table[i])
fields.append(table.schema[i])
return pa.Table.from_arrays(columns, schema=pa.schema(fields))
else:
return table
# ----------------------------------------------------------------------
# Helper functions used in lib
def make_tz_aware(series, tz):
"""
Make a datetime64 Series timezone-aware for the given tz
"""
tz = pa.lib.string_to_tzinfo(tz)
series = (series.dt.tz_localize('utc')
.dt.tz_convert(tz))
return series
|
apache-2.0
| -6,731,303,660,057,483,000
| 33.247698
| 79
| 0.591912
| false
| 4.007435
| false
| false
| false
|
nymoral/euler
|
p18.py
|
1
|
2156
|
"""
By starting at the top of the triangle below and moving to adjacent numbers on the row below, the maximum total from top to bottom is 23.
3 ->
7 4 ->
2 4 6
8 5 9 3
That is, 3 + 7 + 4 + 9 = 23.
Find the maximum total from top to bottom of the triangle below:
75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23
NOTE: As there are only 16384 routes, it is possible to solve this problem by trying every route.
However, Problem 67, is the same challenge with a triangle containing one-hundred rows; it cannot be solved by brute force, and requires a clever method! ;o)
"""
def biggest_sum(triangle):
triangle = triangle.split("\n")
triangle = [t for t in triangle if t != ""]
triangle = [[int(x) for x in t.split()] for t in triangle]
# Flip the triangle upside down and expand each node thus:
# node in lowest level (0 in upside-down one) becomes (node)
# node (j) in others levels (i) becomes (node + max(level[i + 1][j], level[i + 1][j+1])), where we index the original triangle.
# The biggest path sum will be at the top of the original triangle (bottom of the upside-down one)
triangle = triangle[::-1]
for rid, row in enumerate(triangle):
if rid != 0:
for nid, node in enumerate(row):
row[nid] = node + max(triangle[rid - 1][nid], triangle[rid - 1][nid + 1])
#print(row)
return triangle[-1][0]
if __name__ == "__main__":
triangle = """
75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23
"""
print(biggest_sum(triangle))
|
mit
| 1,246,611,544,375,990,300
| 31.179104
| 157
| 0.650742
| false
| 3.189349
| false
| false
| false
|
BV-DR/foamBazar
|
pythonScripts/gmshScript/geo.py
|
1
|
14381
|
import numpy as np
import copy
from .misc import *
from .point import Point
from .line import Line
from .surface import Surface
from .volume import Volume
"""
class to handle gmsh geo-file(s)
"""
class extdb(dict):
'''
Extrude database, this is for conveniently accessing dict-keys by calling as attribute
'''
def __getattr__(self, attr):
return self[attr]
class geo(object):
def __init__(self):
'''
GMSH requires users to provide unique ID(s) for point(s), line(s), etc.
and we need to keep track of these ID(s) manually
'''
self.__dict__[Point._ID_NAME] = 0
self.__dict__[Line._ID_NAME] = 0
self.__dict__[Surface._ID_NAME] = 0
self.__dict__[Volume._ID_NAME] = 0
self.__dict__[Point._DB_NAME] = dict()
self.__dict__[Line._DB_NAME] = dict()
self.__dict__[Surface._DB_NAME] = dict()
self.__dict__[Volume._DB_NAME] = dict()
self._EXTRUDE_ID = 0
self._PHYS_IDS = [] # array of physical group id(s)
self._CODE = [
'/* This script was generated using fsMesher.gmshScript */',
'Geometry.OldNewReg=0;'
]
return
# for printing to terminal
def __repr__(self):
quickinfo = "geo(p:"+str(len(getDB(self,Point)))
if self.hasDB(Line):
quickinfo += ",l:" + str(len(getDB(self,Line)))
if self.hasDB(Surface):
quickinfo += ",s:" + str(len(getDB(self,Surface)))
return quickinfo + ")"
def printDB(self):
if not self.hasDB(Point):
print 'no data'
return
self._print_db(getDB(self,Point), prefix='p')
print 'next p:', getIDX(self,Point) + 1
self._print_db(getDB(self,Line), prefix='l')
print 'next l:', getIDX(self,Line) + 1
self._print_db(getDB(self,Surface), prefix='s')
print 'next s:', getIDX(self,Surface) + 1
self._print_db(getDB(self,Volume), prefix='v')
print 'next v:', getIDX(self,Volume) + 1
print
self.printScript()
return
def _print_db(self, db, prefix=''):
idx = sorted(db, key=db.get)
for i in idx:
print prefix + str(db[i]), ':', i
return
def printScript(self):
tmp = self._CODE
for i in tmp:
print i
return
def add(self, obj):
'''
Add a geometrical object to the code ... the actual code is generated in
obj.code(self) where the arg. self is needed for a proper check of id(s)
'''
obj_code = obj.code(self)
if obj_code:
self._CODE.append(obj_code)
self._db_insert(obj)
return
def addPoint(self, x, y, z, lc=None):
p = Point(x,y,z,lc)
self.add(p)
return p
def addLine(self, p0, p1):
l = Line(self,p0,p1)
self.add(l)
return l
def extrude(self, obj, dx, dy, dz, layers=1, opts=None):
'''
Extrude "point|line|surface" along translation axis
'''
# we need the object in a list format
objList = obj if isinstance(obj, list) else [obj]
if len(objList) == 0 or objList[0] is None: return
assert isinstance(dx, (int,long,float))
assert isinstance(dy, (int,long,float))
assert isinstance(dz, (int,long,float))
assert isinstance(layers, (str,int,list,np.ndarray))
#The layers are defined using two arrays i.e. Layers {{nElem[]},{nCut[]}}
#The first array nElem[]={1,1,1,(n elements),1,1,1} defines the number of element created between each cut.
#The second array nCut[]={0.1,0.2,(n cuts),...,1} defines the cut location (normalized) where the last cut must be at 100% i.e. 1
layers_str='1'
if isinstance(layers, (int, long)):
layers_str=str(layers)
elif isinstance(layers, str):
# user(s) need to provide a valid format here
# e.g: '#n' or '{n,n,n,n}, {float,float,float,1}'
layers_str=layers
elif isinstance(layers, (np.ndarray,list)):
layerList = copy.deepcopy(layers)
layerList.sort()
maxVal = max(layerList) # for normalization
# assume each cut has 1 element, and use only cut locations to control the extrude
nElem_str = ','.join(str(1) for i in layerList)
cut_str = ','.join(Point._FLOAT_TO_STR.format(float(i)/maxVal) for i in layerList)
layers_str = '{' + nElem_str + '},{' + cut_str + '}'
#
# Scan the object list and determine the type
# All element must be of the same type i.e. either Point|Line|Surface
objtype = objList[0].__class__
for i in objList:
if not isinstance(i, objtype):
raise RuntimeError("extrude: all extruded obj must be of the same type")
#
if isinstance(objList[0], Point):
return self._extrude_points(objList, [dx,dy,dz], layers_str, opts=opts)
elif isinstance(objList[0], Line):
return self._extrude_lines(objList, [dx,dy,dz], layers_str, opts=opts)
elif isinstance(objList[0], Surface):
return self._extrude_surfaces(objList, [dx,dy,dz], layers_str, opts=opts)
else:
raise RuntimeError('The object to be extruded must be of type Point|Line|Surface')
return
def hasDB(self,obj):
return bool(getDB(self,obj))
def incIDX(self,obj,n):
self.__dict__[obj._ID_NAME] += n
return
def get(self, obj, idx):
db=getDB(self,obj)
allIdx=db.values()
if not abs(idx) in allIdx: return None
return obj.fromkey(db.keys()[allIdx.index(abs(idx))])
def _create_idx_str(self, objList):
idx = []
for obj in objList:
if not obj.key() in getDB(self,obj):
raise RuntimeError('id not found: ' + str(obj))
idx.append(getDB(self,obj)[obj.key()])
return ','.join(str(i) for i in idx)
def _db_insert(self, obj):
found,idx = exist(self,obj)
self.incIDX(obj,1) # gmsh always keeps incrementing the id by 1 !!!
if not found:
getDB(self,obj)[obj.key()] = getIDX(self,obj)
return True # insert successful
else:
return False # no need to insert, the obj already exists
def _extrude_points(self, pointList, axis, layers, opts=None):
'''
line[] = Extrude{dx, dy, dz} { Point{#ID}; Layers{{1,..(nElem)..,1},{0.1,..(nCut)..,1}}; };
For each point extruded, 1 new point and 1 new line are created
'''
out = extdb({
'newPoints': [],
'newLines': []
})
ok_to_extrude=False
for i in pointList:
newpoint = Point(np.asarray(axis) + i.pos)
if self._db_insert(newpoint): ok_to_extrude=True
newline = Line(self,i,newpoint)
if self._db_insert(newline): ok_to_extrude=True
out['newPoints'].append(newpoint)
out['newLines'].append(newline)
if ok_to_extrude:
idx_str = self._create_idx_str(pointList)
axis_str = ','.join(Point._FLOAT_TO_STR.format(i) for i in axis)
self._EXTRUDE_ID += 1
self._CODE.append(
'ex%d[] = Extrude {%s} { Point{%s}; Layers{%s}; };' %
(self._EXTRUDE_ID, axis_str, idx_str, layers)
)
return out
def _extrude_lines(self, lineList, axis, layers, opts=None):
'''
surface[] = Extrude{dx, dy, dz} { Line{#ID}; Layers{{1,..(nElem)..,1},{0.1,..(nCut)..,1}}; };
For each line extruded, 2 new points, 3 new lines and 1 surface are created
'''
out = extdb({
'newPoints': [],
'newLines': [],
'newSurfaces': []
})
axis_as_nparray = np.asarray(axis)
ok_to_extrude=False
for i in lineList:
# 2 old point(s),
oldPoint0 = self.get(Point, i.pid[0])
oldPoint1 = self.get(Point, i.pid[1])
# 2 new points
newpoint0 = Point(axis_as_nparray + oldPoint0.pos)
newpoint1 = Point(axis_as_nparray + oldPoint1.pos)
# create 3 new lines
if self._db_insert(newpoint0): ok_to_extrude=True
if self._db_insert(newpoint1): ok_to_extrude=True
newline1 = Line(self,newpoint0,newpoint1)
if self._db_insert(newline1): ok_to_extrude=True
#
self.incIDX(Point,2) # stupid gmsh
newline2 = Line(self,oldPoint0,newpoint0)
if self._db_insert(newline2): ok_to_extrude=True
#
self.incIDX(Point,2) # stupid gmsh
newline3 = Line(self,oldPoint1,newpoint1)
if self._db_insert(newline3): ok_to_extrude=True
# create 1 new surface
newsurf = Surface(self,[i,newline3,newline1,newline2])
if self._db_insert(newsurf): ok_to_extrude=True
out['newPoints'].append(newpoint0)
out['newPoints'].append(newpoint1)
out['newLines'].append(newline1)
out['newLines'].append(newline2)
out['newLines'].append(newline3)
out['newSurfaces'].append(newsurf)
if ok_to_extrude:
idx_str = self._create_idx_str(lineList)
axis_str = ','.join(Point._FLOAT_TO_STR.format(i) for i in axis)
opts_str = opts if opts is not None else 'Recombine;'
self._EXTRUDE_ID += 1
self._CODE.append(
'ex%d[] = Extrude {%s} { Line{%s}; Layers{%s}; %s};' %
(self._EXTRUDE_ID, axis_str, idx_str, layers, opts_str)
)
return out
def _extrude_surfaces(self, surfList, axis, layers, opts=None):
'''
volume[] = Extrude{dx, dy, dz} { Surface{#ID}; Layers{{1,..(nElem)..,1},{0.1,..(nCut)..,1}}; };
If the surface has n lines, we will create
n new points,
2*n new lines,
n+1 new surfaces,
and 1 volume
'''
out = extdb({
'newPoints': [],
'newLines': [],
'newSurfaces': [],
'newVolumes': [],
})
axis_as_nparray = np.asarray(axis)
ok_to_extrude=False
newp=out['newPoints']
newl=out['newLines']
news=out['newSurfaces']
newv=out['newVolumes']
for s in surfList:
# extract ordered surface points
sp=[]
for i in s.lid:
l=self.get(Line, i)
if (i<0):
sp.append(self.get(Point,l.pid[1]))
else:
sp.append(self.get(Point, l.pid[0]))
n = len(sp) # the total number of point(s) on this surface
# create line(s) parallel to old lines
# treat 1st line (stupid gmsh), 2 newp, 1 newl
newp.append(Point(axis_as_nparray + sp[0].pos))
if self._db_insert(newp[-1]): ok_to_extrude=True
newp.append(Point(axis_as_nparray + sp[1].pos))
if self._db_insert(newp[-1]): ok_to_extrude=True
newl.append(Line(self,newp[-2],newp[-1]))
if self._db_insert(newl[-1]): ok_to_extrude=True
# treat internal line(s), 1 newp, 1 newl for each internal line
for i in sp[2:]:
newp.append(Point(axis_as_nparray + i.pos))
self.incIDX(Point,3) # stupid gmsh
if self._db_insert(newp[-1]): ok_to_extrude=True
newl.append(Line(self,newp[-2],newp[-1]))
if self._db_insert(newl[-1]): ok_to_extrude=True
#
# Important Note to myself:
# Do not change self.incIDX(Point,???) before this line
#
# treat last line, no newp, 1 newl
self.incIDX(Point,18) # stupid gmsh
newl.append(Line(self,newp[-1],newp[-n]))
if self._db_insert(newl[-1]): ok_to_extrude=True
# create lines in the extruded direction, n newl
# the first two lines are treated differently (stupid gmsh)
self.incIDX(Line,1) # stupid gmsh
newl.append(Line(self, sp[0], newp[-n]))
if self._db_insert(newl[-1]): ok_to_extrude=True
newl.append(Line(self, sp[1], newp[-n+1]))
if self._db_insert(newl[-1]): ok_to_extrude=True
for i in range(2,n):
self.incIDX(Point,6) # stupid gmsh
self.incIDX(Line,2) # stupid gmsh
newl.append(Line(self, sp[i], newp[-n+i]))
if self._db_insert(newl[-1]): ok_to_extrude=True
#
# Important Note to myself:
# Do not change self.incIDX(Line,???) before this line
#
# create n+1 new surfaces
self.incIDX(Line,3) # stupid gmsh
self.incIDX(Surface,1) # stupid gmsh
for i in range(0,n-1):
news.append(Surface(self,[s.lid[i],newl[-2*n+i],newl[-n+i],newl[-n+i+1]]))
if self._db_insert(news[-1]): ok_to_extrude=True
news.append(Surface(self,[s.lid[-1],newl[-n-1],newl[-n],newl[-1]]))
if self._db_insert(news[-1]): ok_to_extrude=True
lList=[] # last surface
for i in range(0,n): lList.append(newl[-2*n+i])
news.append(Surface(self,lList))
if self._db_insert(news[-1]): ok_to_extrude=True
# create 1 volume
newv.append(Volume(self, [s,news[-1]] + news[-n-1:-1]))
if self._db_insert(newv[-1]): ok_to_extrude=True
if ok_to_extrude:
idx_str = self._create_idx_str(surfList)
axis_str = ','.join(Point._FLOAT_TO_STR.format(i) for i in axis)
opts_str = opts if opts is not None else 'Recombine;'
self._EXTRUDE_ID += 1
self._CODE.append(
'ex%d[] = Extrude {%s} { Surface{%s}; Layers{%s}; %s};' %
(self._EXTRUDE_ID, axis_str, idx_str, layers, opts_str)
)
return out
|
gpl-3.0
| -6,198,291,772,516,877,000
| 39.509859
| 137
| 0.527849
| false
| 3.428129
| false
| false
| false
|
AutorestCI/azure-sdk-for-python
|
azure-cognitiveservices-vision-contentmoderator/azure/cognitiveservices/vision/contentmoderator/models/classification.py
|
1
|
1545
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Classification(Model):
"""The classification details of the text.
:param adult_score: The adult score.
:type adult_score: float
:param racy_score: The racy score.
:type racy_score: float
:param offensive_score: The offensive score.
:type offensive_score: float
:param review_recommended: The review recommended flag.
:type review_recommended: bool
"""
_attribute_map = {
'adult_score': {'key': 'AdultScore', 'type': 'float'},
'racy_score': {'key': 'RacyScore', 'type': 'float'},
'offensive_score': {'key': 'OffensiveScore', 'type': 'float'},
'review_recommended': {'key': 'ReviewRecommended', 'type': 'bool'},
}
def __init__(self, adult_score=None, racy_score=None, offensive_score=None, review_recommended=None):
super(Classification, self).__init__()
self.adult_score = adult_score
self.racy_score = racy_score
self.offensive_score = offensive_score
self.review_recommended = review_recommended
|
mit
| -4,682,587,792,800,070,000
| 37.625
| 105
| 0.606472
| false
| 3.941327
| false
| false
| false
|
Asparagirl/ArchiveBot
|
pipeline/archivebot/seesaw/wpullargs_test.py
|
1
|
2884
|
from os import environ as env
import unittest
from .wpull import WpullArgs
from seesaw.item import Item
# taken form pipeline/pipeline.py
if 'WARC_MAX_SIZE' in env:
WARC_MAX_SIZE = env['WARC_MAX_SIZE']
else:
WARC_MAX_SIZE = '5368709120'
def joined(args):
return str.join(' ', args)
class TestWpullArgs(unittest.TestCase):
def setUp(self):
self.item = {
'cookie_jar': '/foobar/cookies.txt',
'ident': 'abc123',
'item_dir': '/foobar',
'url': 'http://www.example.com',
'warc_file_base': '/foobar/warc'
}
self.args = WpullArgs(default_user_agent='Default/1',
wpull_exe='/bin/wpull',
youtube_dl_exe='/usr/bin/youtube-dl',
phantomjs_exe='/usr/bin/phantomjs',
finished_warcs_dir='/lost+found/',
warc_max_size=WARC_MAX_SIZE
)
def test_user_agent_can_be_set(self):
self.item['user_agent'] = 'Frobinator/20.1'
self.assertIn('-U Frobinator/20.1', joined(self.args.realize(self.item)))
def test_youtube_dl_activation(self):
self.item['youtube_dl'] = True
self.assertIn('--youtube-dl', joined(self.args.realize(self.item)))
def test_uses_default_user_agent(self):
self.assertIn('-U Default/1', joined(self.args.realize(self.item)))
def test_recursive_fetch_settings(self):
self.item['recursive'] = True
self.item['depth'] = 'inf'
cmdline = joined(self.args.realize(self.item))
self.assertIn('--recursive', cmdline)
self.assertIn('--level inf', cmdline)
def test_nonrecursive_fetch_settings(self):
self.item['recursive'] = False
cmdline = joined(self.args.realize(self.item))
self.assertNotIn('--recursive', cmdline)
self.assertNotIn('--level inf', cmdline)
def test_recursive_fetch_enables_linked_pages_and_requisites(self):
self.item['recursive'] = True
self.item['depth'] = 'inf'
cmdline = joined(self.args.realize(self.item))
self.assertIn('--span-hosts-allow page-requisites,linked-pages',
cmdline)
def test_recursive_fetch_with_no_offsite_links_enables_requisites(self):
self.item['recursive'] = True
self.item['depth'] = 'inf'
self.item['no_offsite_links'] = True
cmdline = joined(self.args.realize(self.item))
self.assertIn('--span-hosts-allow page-requisites', cmdline)
self.assertNotIn('linked-pages', cmdline)
def test_nonrecursive_fetch_enables_requisites(self):
self.item['recursive'] = False
cmdline = joined(self.args.realize(self.item))
self.assertIn('--span-hosts-allow page-requisites', cmdline)
self.assertNotIn('linked-pages', cmdline)
# vim:ts=4:sw=4:et:tw=78
|
mit
| -364,133,565,755,845,800
| 30.692308
| 81
| 0.606449
| false
| 3.569307
| true
| false
| false
|
pfig/CmdrKeen
|
setup.py
|
1
|
1221
|
from setuptools import setup, find_packages
def long_description_from_readme():
with open('README.rst') as readme:
return readme.read()
setup(
name="CommanderKeen",
version="0.1",
packages=find_packages(),
scripts=['scripts/keen.py'],
author="Pedro Figueiredo",
author_email="pfig@me.com",
description="Commander Keen is a Slack bot with long term memory",
long_description=long_description_from_readme(),
license="MIT",
keywords="slack bot chat",
url="https://pfig.github.io/CmdrKeen/",
data_files=[('config', ['cfg/keen.json'])],
setup_requires=['pytest-runner'],
install_requires=[
'slackclient>=0.16',
'websocket-client>=0.35',
'requests>=2.9.1',
'python-daemon>=2.1.1'
],
tests_require=['pytest'],
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Topic :: Communications :: Chat'
]
)
|
mit
| 3,875,417,983,791,774,000
| 30.307692
| 70
| 0.610975
| false
| 3.791925
| false
| false
| false
|
Haikson/virtenviro
|
virtenviro/content/templatetags/page_tags.py
|
1
|
4472
|
# ~*~ coding: utf-8 ~*~
__author__ = 'Kamo Petrosyan'
from django import template
from django.db.models import Q
from virtenviro.content.models import Snippet, Page, AdditionalField, Menu
from django.template import loader, Context
from virtenviro.utils import *
register = template.Library()
@register.assignment_tag
def additional_field(page, field_name):
try:
additional_field = AdditionalField.objects.get(name=field_name)
field = page.fieldvalue_set.filter(additional_field=additional_field)
if field.count() > 0:
return field[0]
except AdditionalField.DoesNotExist:
return None
@register.simple_tag(takes_context=True)
def render_snippet(context, snippet_name):
try:
snippet = Snippet.objects.get(name=snippet_name)
except Snippet.DoesNotExist:
snippet = None
if snippet.render:
t = loader.get_template_from_string(snippet.code)
res = t.render(Context(context))
return res
return snippet.code
@register.simple_tag(takes_context=True)
def render_content(context, content):
t = loader.get_template_from_string(content)
return t.render(Context(context))
@register.simple_tag(takes_context=True)
def render_field(context, page, field_name):
try:
additional_field = AdditionalField.objects.get(name=field_name)
except AdditionalField.DoesNotExist:
return ''
field = page.fieldvalue_set.filter(additional_field=additional_field)
if additional_field.render:
t = loader.get_template_from_string(field.value)
return t.render(Context(context))
else:
return field.value
@register.assignment_tag(takes_context=True)
def get_pages(context, *args, **kwargs):
parent_id = kwargs.get('parent', 0)
if parent_id == 0:
queryset = Page.objects.filter(parent__isnull=True)
else:
if isinstance(parent_id, int):
try:
parent_node = Page.objects.get(id=parent_id)
except Page.DoesNotExist:
return None
elif isinstance(parent_id, str) or isinstance(parent_id, unicode):
try:
parent_node = Page.objects.get(slug=parent_id)
except Page.DoesNotExist:
return None
level = kwargs.get('level', 1) + 1
queryset = Page.objects.filter(
level__lte=level,
tree_id=parent_node.tree_id,
lft__gte=parent_node.lft,
rght__lte=parent_node.rght)
if not kwargs.get('include_parents', False):
queryset = queryset.exclude(level__lte=parent_node.level)
if kwargs.get('author', False):
queryset = queryset.filter(author=kwargs['author'])
queryset = queryset.order_by(kwargs.get('order', 'id'))
if context['request'].GET.has_key('page'):
rpage = context['request'].GET['page']
else:
rpage = 1
if kwargs.get('limit', False):
queryset = paginate(queryset, rpage, int(kwargs['limit']))
return queryset
@register.assignment_tag(takes_context=True)
def get_content_ml(context, page, lang):
content = page.get_content(language=lang)
return content
@register.assignment_tag
def leaf_pages(root=None, root_id=None, count=0, rnd=False):
if root is None:
if root_id is None:
return []
else:
try:
root = Page.objects.get(pk=root_id)
except Page.DoesNotExist:
return []
nodes = []
m_nodes = root.get_descendants(include_self=False).order_by('-pub_datetime', '-pk')
if rnd:
m_nodes = m_nodes.order_by('?')
if count == 0:
count = m_nodes.count()
for m_node in m_nodes:
if m_node.is_leaf_node():
nodes.append(m_node)
count -= 1
if count == 0:
break
return nodes
@register.assignment_tag
def page_breadcrumb(page):
breadcrumb = [page]
while page.parent:
page = page.parent
breadcrumb.append(page)
breadcrumb.reverse()
return breadcrumb
@register.assignment_tag
def get_page_by_id(page_id):
try:
return Page.objects.get(pk=page_id)
except Page.DoesNotExist:
return None
@register.assignment_tag
def get_menu(sys_name):
try:
menu = Menu.objects.get(sys_name=sys_name)
except Menu.DoesNotExist:
return None
return menu.pagemenurelationship_set.all().order_by('ordering')
|
apache-2.0
| -318,434,964,313,289,300
| 28.813333
| 87
| 0.632156
| false
| 3.68369
| false
| false
| false
|
MehdiSfr/tensor-flow
|
tensorflow/python/ops/candidate_sampling_ops.py
|
1
|
18205
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for primitive Neural Net (NN) Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_candidate_sampling_ops
from tensorflow.python.ops import math_ops
def uniform_candidate_sampler(true_classes, num_true, num_sampled, unique,
range_max, seed=None, name=None):
"""Samples a set of classes using a uniform base distribution.
This operation randomly samples a tensor of sampled classes
(`sampled_candidates`) from the range of integers `[0, range_max]`.
The elements of `sampled_candidates` are drawn without replacement
(if `unique=True`) or with replacement (if `unique=False`) from
the base distribution.
The base distribution for this operation is the uniform distribution
over the range of integers `[0, range_max]`.
In addition, this operation returns tensors `true_expected_count`
and `sampled_expected_count` representing the number of times each
of the target classes (`true_classes`) and the sampled
classes (`sampled_candidates`) is expected to occur in an average
tensor of sampled classes. These values correspond to `Q(y|x)`
defined in [this
document](http://www.tensorflow.org/extras/candidate_sampling.pdf).
If `unique=True`, then these are post-rejection probabilities and we
compute them approximately.
Args:
true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_true: An `int`. The number of target classes per training example.
num_sampled: An `int`. The number of classes to randomly sample per batch.
unique: A `bool`. Determines whether all sampled classes in a batch are
unique.
range_max: An `int`. The number of possible classes.
seed: An `int`. An operation-specific seed. Default is 0.
name: A name for the operation (optional).
Returns:
sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
The sampled classes.
true_expected_count: A tensor of type `float`. Same shape as
`true_classes`. The expected counts under the sampling distribution
of each of `true_classes`.
sampled_expected_count: A tensor of type `float`. Same shape as
`sampled_candidates`. The expected counts under the sampling distribution
of each of `sampled_candidates`.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_candidate_sampling_ops._uniform_candidate_sampler(
true_classes, num_true, num_sampled, unique, range_max, seed=seed1,
seed2=seed2, name=name)
def log_uniform_candidate_sampler(true_classes, num_true, num_sampled, unique,
range_max, seed=None, name=None):
"""Samples a set of classes using a log-uniform (Zipfian) base distribution.
This operation randomly samples a tensor of sampled classes
(`sampled_candidates`) from the range of integers `[0, range_max]`.
The elements of `sampled_candidates` are drawn without replacement
(if `unique=True`) or with replacement (if `unique=False`) from
the base distribution.
The base distribution for this operation is an approximately log-uniform
or Zipfian distribution:
`P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
This sampler is useful when the target classes approximately follow such
a distribution - for example, if the classes represent words in a lexicon
sorted in decreasing order of frequency. If your classes are not ordered by
decreasing frequency, do not use this op.
In addition, this operation returns tensors `true_expected_count`
and `sampled_expected_count` representing the number of times each
of the target classes (`true_classes`) and the sampled
classes (`sampled_candidates`) is expected to occur in an average
tensor of sampled classes. These values correspond to `Q(y|x)`
defined in [this
document](http://www.tensorflow.org/extras/candidate_sampling.pdf).
If `unique=True`, then these are post-rejection probabilities and we
compute them approximately.
Args:
true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_true: An `int`. The number of target classes per training example.
num_sampled: An `int`. The number of classes to randomly sample per batch.
unique: A `bool`. Determines whether all sampled classes in a batch are
unique.
range_max: An `int`. The number of possible classes.
seed: An `int`. An operation-specific seed. Default is 0.
name: A name for the operation (optional).
Returns:
sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
The sampled classes.
true_expected_count: A tensor of type `float`. Same shape as
`true_classes`. The expected counts under the sampling distribution
of each of `true_classes`.
sampled_expected_count: A tensor of type `float`. Same shape as
`sampled_candidates`. The expected counts under the sampling distribution
of each of `sampled_candidates`.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_candidate_sampling_ops._log_uniform_candidate_sampler(
true_classes, num_true, num_sampled, unique, range_max, seed=seed1,
seed2=seed2, name=name)
def learned_unigram_candidate_sampler(true_classes, num_true, num_sampled,
unique, range_max, seed=None, name=None):
"""Samples a set of classes from a distribution learned during training.
This operation randomly samples a tensor of sampled classes
(`sampled_candidates`) from the range of integers `[0, range_max]`.
The elements of `sampled_candidates` are drawn without replacement
(if `unique=True`) or with replacement (if `unique=False`) from
the base distribution.
The base distribution for this operation is constructed on the fly
during training. It is a unigram distribution over the target
classes seen so far during training. Every integer in `[0, range_max]`
begins with a weight of 1, and is incremented by 1 each time it is
seen as a target class. The base distribution is not saved to checkpoints,
so it is reset when the model is reloaded.
In addition, this operation returns tensors `true_expected_count`
and `sampled_expected_count` representing the number of times each
of the target classes (`true_classes`) and the sampled
classes (`sampled_candidates`) is expected to occur in an average
tensor of sampled classes. These values correspond to `Q(y|x)`
defined in [this
document](http://www.tensorflow.org/extras/candidate_sampling.pdf).
If `unique=True`, then these are post-rejection probabilities and we
compute them approximately.
Args:
true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_true: An `int`. The number of target classes per training example.
num_sampled: An `int`. The number of classes to randomly sample per batch.
unique: A `bool`. Determines whether all sampled classes in a batch are
unique.
range_max: An `int`. The number of possible classes.
seed: An `int`. An operation-specific seed. Default is 0.
name: A name for the operation (optional).
Returns:
sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
The sampled classes.
true_expected_count: A tensor of type `float`. Same shape as
`true_classes`. The expected counts under the sampling distribution
of each of `true_classes`.
sampled_expected_count: A tensor of type `float`. Same shape as
`sampled_candidates`. The expected counts under the sampling distribution
of each of `sampled_candidates`.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_candidate_sampling_ops._learned_unigram_candidate_sampler(
true_classes, num_true, num_sampled, unique, range_max, seed=seed1,
seed2=seed2, name=name)
def fixed_unigram_candidate_sampler(true_classes, num_true, num_sampled, unique,
range_max, vocab_file='', distortion=0.0,
num_reserved_ids=0, num_shards=1, shard=0,
unigrams=[], seed=None, name=None):
"""Samples a set of classes using the provided (fixed) base distribution.
This operation randomly samples a tensor of sampled classes
(`sampled_candidates`) from the range of integers `[0, range_max]`.
The elements of `sampled_candidates` are drawn without replacement
(if `unique=True`) or with replacement (if `unique=False`) from
the base distribution.
The base distribution is read from a file or passed in as an
in-memory array. There is also an option to skew the distribution by
applying a distortion power to the weights.
In addition, this operation returns tensors `true_expected_count`
and `sampled_expected_count` representing the number of times each
of the target classes (`true_classes`) and the sampled
classes (`sampled_candidates`) is expected to occur in an average
tensor of sampled classes. These values correspond to `Q(y|x)`
defined in [this
document](http://www.tensorflow.org/extras/candidate_sampling.pdf).
If `unique=True`, then these are post-rejection probabilities and we
compute them approximately.
Args:
true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_true: An `int`. The number of target classes per training example.
num_sampled: An `int`. The number of classes to randomly sample per batch.
unique: A `bool`. Determines whether all sampled classes in a batch are
unique.
range_max: An `int`. The number of possible classes.
vocab_file: Each valid line in this file (which should have a CSV-like
format) corresponds to a valid word ID. IDs are in sequential order,
starting from num_reserved_ids. The last entry in each line is expected
to be a value corresponding to the count or relative probability. Exactly
one of `vocab_file` and `unigrams` needs to be passed to this operation.
distortion: The distortion is used to skew the unigram probability
distribution. Each weight is first raised to the distortion's power
before adding to the internal unigram distribution. As a result,
`distortion = 1.0` gives regular unigram sampling (as defined by the vocab
file), and `distortion = 0.0` gives a uniform distribution.
num_reserved_ids: Optionally some reserved IDs can be added in the range
`[0, num_reserved_ids]` by the users. One use case is that a special
unknown word token is used as ID 0. These IDs will have a sampling
probability of 0.
num_shards: A sampler can be used to sample from a subset of the original
range in order to speed up the whole computation through parallelism. This
parameter (together with `shard`) indicates the number of partitions that
are being used in the overall computation.
shard: A sampler can be used to sample from a subset of the original range
in order to speed up the whole computation through parallelism. This
parameter (together with `num_shards`) indicates the particular partition
number of the operation, when partitioning is being used.
unigrams: A list of unigram counts or probabilities, one per ID in
sequential order. Exactly one of `vocab_file` and `unigrams` should be
passed to this operation.
seed: An `int`. An operation-specific seed. Default is 0.
name: A name for the operation (optional).
Returns:
sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
The sampled classes.
true_expected_count: A tensor of type `float`. Same shape as
`true_classes`. The expected counts under the sampling distribution
of each of `true_classes`.
sampled_expected_count: A tensor of type `float`. Same shape as
`sampled_candidates`. The expected counts under the sampling distribution
of each of `sampled_candidates`.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_candidate_sampling_ops._fixed_unigram_candidate_sampler(
true_classes, num_true, num_sampled, unique, range_max,
vocab_file=vocab_file, distortion=distortion,
num_reserved_ids=num_reserved_ids, num_shards=num_shards, shard=shard,
unigrams=unigrams, seed=seed1, seed2=seed2, name=name)
def all_candidate_sampler(true_classes, num_true, num_sampled, unique,
seed=None, name=None):
"""Generate the set of all classes.
Deterministically generates and returns the set of all possible classes.
For testing purposes. There is no need to use this, since you might as
well use full softmax or full logistic regression.
Args:
true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_true: An `int`. The number of target classes per training example.
num_sampled: An `int`. The number of possible classes.
unique: A `bool`. Ignored.
unique.
seed: An `int`. An operation-specific seed. Default is 0.
name: A name for the operation (optional).
Returns:
sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
This operation deterministically returns the entire range
`[0, num_sampled]`.
true_expected_count: A tensor of type `float`. Same shape as
`true_classes`. The expected counts under the sampling distribution
of each of `true_classes`. All returned values are 1.0.
sampled_expected_count: A tensor of type `float`. Same shape as
`sampled_candidates`. The expected counts under the sampling distribution
of each of `sampled_candidates`. All returned values are 1.0.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_candidate_sampling_ops._all_candidate_sampler(
true_classes, num_true, num_sampled, unique, seed=seed1, seed2=seed2,
name=name)
def compute_accidental_hits(true_classes, sampled_candidates, num_true,
seed=None, name=None):
"""Compute the position ids in `sampled_candidates` matching `true_classes`.
In Candidate Sampling, this operation facilitates virtually removing
sampled classes which happen to match target classes. This is done
in Sampled Softmax and Sampled Logistic.
See our [Candidate Sampling Algorithms
Reference](http://www.tensorflow.org/extras/candidate_sampling.pdf).
We presuppose that the `sampled_candidates` are unique.
We call it an 'accidental hit' when one of the target classes
matches one of the sampled classes. This operation reports
accidental hits as triples `(index, id, weight)`, where `index`
represents the row number in `true_classes`, `id` represents the
position in `sampled_candidates`, and weight is `-FLOAT_MAX`.
The result of this op should be passed through a `sparse_to_dense`
operation, then added to the logits of the sampled classes. This
removes the contradictory effect of accidentally sampling the true
target classes as noise classes for the same example.
Args:
true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
The sampled_candidates output of CandidateSampler.
num_true: An `int`. The number of target classes per training example.
seed: An `int`. An operation-specific seed. Default is 0.
name: A name for the operation (optional).
Returns:
indices: A `Tensor` of type `int32` and shape `[num_accidental_hits]`.
Values indicate rows in `true_classes`.
ids: A `Tensor` of type `int64` and shape `[num_accidental_hits]`.
Values indicate positions in `sampled_candidates`.
weights: A `Tensor` of type `float` and shape `[num_accidental_hits]`.
Each value is `-FLOAT_MAX`.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_candidate_sampling_ops._compute_accidental_hits(
true_classes, sampled_candidates, num_true, seed=seed1, seed2=seed2,
name=name)
@ops.RegisterShape("AllCandidateSampler")
@ops.RegisterShape("FixedUnigramCandidateSampler")
@ops.RegisterShape("LearnedUnigramCandidateSampler")
@ops.RegisterShape("LogUniformCandidateSampler")
@ops.RegisterShape("ThreadUnsafeUnigramCandidateSampler")
@ops.RegisterShape("UniformCandidateSampler")
def _CandidateSamplerShape(op):
true_classes_shape = op.inputs[0].get_shape().with_rank(2)
batch_size = true_classes_shape[0]
num_sampled = op.get_attr("num_sampled")
num_true = op.get_attr("num_true")
return [tensor_shape.vector(num_sampled),
tensor_shape.matrix(batch_size, num_true),
tensor_shape.vector(num_sampled)]
@ops.RegisterShape("ComputeAccidentalHits")
def _ComputeAccidentalHitsShape(op):
num_true = op.get_attr("num_true")
# Validate that the input shape matches the attrs, even though it
# does not influence the shape of the output.
true_candidates_shape = op.inputs[0].get_shape().merge_with(
tensor_shape.matrix(None, num_true))
output_shape = tensor_shape.vector(None)
return [output_shape] * 3
|
apache-2.0
| -4,878,222,035,196,797,000
| 46.408854
| 80
| 0.715737
| false
| 4.05276
| false
| false
| false
|
xzhang2016/tfagent
|
setup.py
|
1
|
1318
|
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
def main():
setup(name='tfta',
version='0.0.1',
description='TF Agent',
long_description='TF Agent',
author='Xue Zhang',
author_email='xue.zhang@tufts.edu',
url='https://github.com/xzhang2016/tfagent',
packages=['tfta','enrichment'],
install_requires=['pysb', 'indra', 'pykqml', 'objectpath', 'rdflib',
'functools32', 'requests', 'lxml',
'pandas', 'suds'],
include_package_data=True,
keywords=['systems', 'biology', 'model', 'pathway', 'assembler',
'nlp', 'mechanism', 'biochemistry'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Mathematics',
],
)
if __name__ == '__main__':
main()
|
bsd-2-clause
| 5,773,306,868,928,383,000
| 38.939394
| 78
| 0.525797
| false
| 4.157729
| false
| false
| false
|
mendhak/Kindle-Time-and-Weather
|
server/weather-script.py
|
1
|
6448
|
#!/usr/bin/python
# Kindle Weather Display
# Matthew Petroff (http://www.mpetroff.net/)
# September 2012
#
# Owen Bullock - UK Weather - MetOffice - Aug 2013
# Apr 2014 - amended for Wind option
#
# Mendhak - redone for WeatherUnderground API
import json
import urllib2
from xml.dom import minidom
import datetime
import codecs
import os.path
import time
import sys
import os
#
# Weather Underground API Key - unique to me.
#
wuapikey= os.environ.get('WUNDERGROUND_API_KEY') or "2f1126aef047991e"
template = 'weather-script-preprocess_temps.svg'
#
# Map the Wunderground weather codes to Icons.
# ( See https://www.wunderground.com/weather/api/d/docs?d=resources/icon-sets&MR=1 )
#
mapping = [
[0 , 'skc '], # Clear night skc.svg
[1 , 'skc '], # Sunny day skc.svg
[2 , 'sct '], # Partly cloudy (night) sct.svg
[3 , 'sct '], # Partly cloudy (day) sct.svg
[4 , ' '], # Not used -
[5 , 'fg '], # Mist fg.svg
[6 , 'fg '], # Fog fg.svg
[7 , 'bkn '], # Cloudy bkn.svg
[8 , 'ovc '], # Overcast ovc.svg
[9 , 'hi_shwrs'], # Light rain shower (night) hi_shwrs.svg
[10, 'hi_shwrs'], # Light rain shower (day) hi_shwrs.svg
[11, 'hi_shwrs'], # Drizzle hi_shwrs.svg
[12, 'ra1 '], # Light rain ra1.svg
[13, 'ra '], # Heavy rain shower (night) ra.svg
[14, 'ra '], # Heavy rain shower (day) ra.svg
[15, 'ra '], # Heavy rain ra.svg
[16, 'rasn '], # Sleet shower (night) rasn.svg
[17, 'rasn '], # Sleet shower (day) rasn.svg
[18, 'rasn '], # Sleet rasn.svg
[19, 'ip '], # Hail shower (night) ip.svg
[20, 'ip '], # Hail shower (day) ip.svg
[21, 'ip '], # Hail ip.svg
[22, 'sn '], # Light snow shower (night) sn.svg
[23, 'sn '], # Light snow shower (day) sn.svg
[24, 'sn '], # Light snow sn.svg
[25, 'sn '], # Heavy snow shower (night) sn.xvg
[26, 'sn '], # Heavy snow shower (day) sn.svg
[27, 'sn '], # Heavy snow sn.svg
[28, 'tsra '], # Thunder shower (night) tsra.svg
[29, 'tsra '], # Thunder shower (day) tsra.svg
[30, 'tsra '], # Thunder tsra.svg
]
icon_dict={
'chanceflurries':'sn',
'chancerain':'hi_shwrs',
'chancesleet':'rasn',
'chancesnow':'sn',
'chancetstorms':'tsra',
'clear':'skc',
'cloudy':'bkn',
'flurries':'sn',
'fog':'fg',
'hazy':'fg',
'mostlycloudy':'ovc',
'mostlysunny':'skc',
'partlycloudy':'sct',
'partlysunny':'skc',
'sleet':'rasn',
'rain':'ra',
'sleet':'rasn',
'snow':'sn',
'sunny':'skc',
'tstorms':'tsra',
'cloudy':'bkn',
'partlycloudy':'bkn',
}
#
# Download and parse weather data - location 353773 = Sutton, Surrey
#
weather_json=''
stale=True
if(os.path.isfile(os.getcwd() + "/wunderground.json")):
#Read the contents anyway
with open(os.getcwd() + "/wunderground.json", 'r') as content_file:
weather_json = content_file.read()
stale=time.time() - os.path.getmtime(os.getcwd() + "/wunderground.json") > (12*60*60)
#If old file or file doesn't exist, time to download it
if(stale):
try:
print "Old file, attempting re-download"
url='http://api.wunderground.com/api/' + wuapikey + '/forecast/q/UK/Reigate.json'
weather_json = urllib2.urlopen(url).read()
with open(os.getcwd() + "/wunderground.json", "w") as text_file:
text_file.write(weather_json)
except:
print "FAILED. using previous read"
with open(os.getcwd() + "/wunderground.json", 'r') as content_file:
weather_json = content_file.read()
weatherData = json.loads(weather_json)
icon_one = weatherData['forecast']['simpleforecast']['forecastday'][0]['icon']
high_one = weatherData['forecast']['simpleforecast']['forecastday'][0]['high']['celsius']
low_one = weatherData['forecast']['simpleforecast']['forecastday'][0]['low']['celsius']
day_one = weatherData['forecast']['simpleforecast']['forecastday'][0]['date']['weekday']
icon_two = weatherData['forecast']['simpleforecast']['forecastday'][1]['icon']
high_two = weatherData['forecast']['simpleforecast']['forecastday'][1]['high']['celsius']
low_two = weatherData['forecast']['simpleforecast']['forecastday'][1]['low']['celsius']
day_two = weatherData['forecast']['simpleforecast']['forecastday'][1]['date']['weekday']
icon_three = weatherData['forecast']['simpleforecast']['forecastday'][2]['icon']
high_three = weatherData['forecast']['simpleforecast']['forecastday'][2]['high']['celsius']
low_three = weatherData['forecast']['simpleforecast']['forecastday'][2]['low']['celsius']
day_three = weatherData['forecast']['simpleforecast']['forecastday'][2]['date']['weekday']
print icon_one,low_one,high_one,day_one
print icon_two,low_two,high_two,day_two
print icon_three,low_three,high_three,day_three
dtnow=datetime.datetime.now().strftime("%d-%b %H:%M")
print "NOW:",dtnow
#
# Preprocess SVG
#
# Open SVG to process
output = codecs.open(template , 'r', encoding='utf-8').read()
# Insert weather icons and temperatures
output = output.replace('ICON_ONE',icon_dict[icon_one])
output = output.replace('ICON_TWO',icon_dict[icon_two])
output = output.replace('ICON_THREE',icon_dict[icon_three])
output = output.replace('TIME_NOW',datetime.datetime.now().strftime("%H:%M"))
output = output.replace('HIGH_ONE',high_one)
output = output.replace('HIGH_TWO',high_two)
output = output.replace('HIGH_THREE',high_three)
output = output.replace('LOW_ONE',low_one)
output = output.replace('LOW_TWO',low_two)
output = output.replace('LOW_THREE',low_three)
# Insert current time
# (thanks Jennifer http://www.shatteredhaven.com/2012/11/1347365-kindle-weather-display.html)
output = output.replace('DATE_VALPLACE',str(dtnow))
readableDate = datetime.datetime.now().strftime("%A %B %d")
output = output.replace('TODAY_DATE', str(readableDate))
output = output.replace('DAY_TWO',day_two)
output = output.replace('DAY_THREE',day_three)
# Write output
codecs.open('weather-script-output.svg', 'w', encoding='utf-8').write(output)
|
mit
| 3,446,430,669,195,974,700
| 34.234973
| 93
| 0.596309
| false
| 2.916327
| false
| false
| false
|
xswxm/MyIoT
|
devices/system.py
|
1
|
1715
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import threading, os
class CPUTemp:
_lock = threading.RLock()
def __init__(self, id, title, feasible = True):
self.id = id
self.title = title
self.feasible = feasible
self.category = 'Value'
def description(self):
message = {}
message['id'] = self.id
message['title'] = self.title
message['category'] = self.category
message['value'] = self.getValue()
message['feasible'] = self.feasible
return message
def getValue(self):
try:
with CPUTemp._lock:
res = os.popen('vcgencmd measure_temp').readline()
return res.replace("temp=", "").replace("'C\n", "") + " °C"
except Exception as e:
return str(e)
class MemUse:
_lock = threading.RLock()
def __init__(self, id, title, feasible = True):
self.id = id
self.title = title
self.feasible = feasible
self.category = 'Value'
def description(self):
message = {}
message['id'] = self.id
message['title'] = self.title
message['category'] = self.category
message['value'] = self.getValue()
message['feasible'] = self.feasible
return message
def getValue(self):
try:
with CPUTemp._lock:
mem = os.popen("cat /proc/meminfo | awk '/Mem/ {print $2}'")
memTotal = int(mem.readline()) / 1000
memFree = int(mem.readline()) / 1000
memUsed = memTotal - memFree
return '{0:d}MB/{1:d}MB'.format(memUsed, memTotal)
except Exception as e:
return str(e)
|
gpl-3.0
| -1,412,676,988,932,031,500
| 31.339623
| 76
| 0.533839
| false
| 3.808889
| false
| false
| false
|
bfirsh/django-mptt
|
setup.py
|
1
|
2361
|
"""
Based entirely on Django's own ``setup.py``.
"""
import os
from distutils.command.install import INSTALL_SCHEMES
from distutils.core import setup
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
# Tell distutils to put the data_files in platform-specific installation
# locations. See here for an explanation:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
mptt_dir = os.path.join(root_dir, 'mptt')
pieces = fullsplit(root_dir)
if pieces[-1] == '':
len_root_dir = len(pieces) - 1
else:
len_root_dir = len(pieces)
for dirpath, dirnames, filenames in os.walk(mptt_dir):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
packages.append('.'.join(fullsplit(dirpath)[len_root_dir:]))
elif filenames:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
setup(
name = 'django-mptt',
description = 'Utilities for implementing Modified Preorder Tree Traversal with your Django Models and working with trees of Model instances',
version = '0.3_pre',
author = 'Jonathan Buchanan',
author_email = 'jonathan.buchanan@gmail.com',
url = 'http://code.google.com/p/django-mptt/',
packages = packages,
data_files = data_files,
classifiers = ['Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'],
)
|
mit
| -2,161,140,952,425,725,200
| 35.323077
| 146
| 0.639136
| false
| 3.759554
| false
| false
| false
|
Tanmay28/coala
|
bears/tests/natural_language/AlexBearTest.py
|
1
|
1313
|
import os
import subprocess
import sys
from queue import Queue
sys.path.insert(0, ".")
import unittest
from bears.tests.LocalBearTestHelper import LocalBearTestHelper
from bears.natural_language.AlexBear import AlexBear
from coalib.settings.Section import Section
class AlexBearTest(LocalBearTestHelper):
def setUp(self):
self.section = Section("test section")
self.uut = AlexBear(self.section, Queue())
self.test_file1 = os.path.join(os.path.dirname(__file__),
"test_files",
"alex_test1.md")
self.test_file2 = os.path.join(os.path.dirname(__file__),
"test_files",
"alex_test2.md")
def test_run(self):
# Test a file with no issues
self.assertLinesValid(self.uut, [], self.test_file1)
# Test a file with issues
self.assertLinesInvalid(self.uut, [], self.test_file2)
def skip_test():
try:
subprocess.Popen(['alex', '--version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return False
except OSError:
return "Alex is not installed."
if __name__ == '__main__':
unittest.main(verbosity=2)
|
agpl-3.0
| 2,532,052,658,769,836,500
| 29.534884
| 65
| 0.568926
| false
| 4.115987
| true
| false
| false
|
hpcloud/CloudAgents
|
agents/file_exists.py
|
1
|
3605
|
#!/usr/bin/env python
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Only required for more convenient local development.
import sys, os
sys.path.append(os.path.dirname(os.path.realpath(__file__))+'/lib')
from cloudagents import CloudAgent
from keystoneclient.v2_0 import client
import novaclient
import swiftclient
from time import mktime
import datetime
import parsedatetime.parsedatetime as pdt
ca = CloudAgent()
ca.required_config = {
"name": "File Existence Checker",
"version": "0.2.0",
"author": "Jeff Kramer",
"url": "http://www.hpcloud.com/",
"help": """This script checks to see if a file exists inside of a swift container. It also has functions to allow for searching for files named on relative dates, daily backups for instance.""",
"config":
[{
"name": "region",
"regexp": "^.{1,50}$",
"title": "Region",
"description": "Short name for the object storage endpoint region to search. IE: region-a.geo-1",
"type": "string",
"required": True,
"resource": "openstack.object-store.endpoints.region"
},{
"name": "container",
"regexp": "^.{1,50}$",
"title": "Container",
"description": "Name of the container to search for the file.",
"type": "string",
"required": True,
"resource": "openstack.object-store.[region].containers"
},{
"name": "date",
"regexp": "^.{1,250}$",
"title": "Date Adjustment",
"description": "Date adjustment. Enables time substitution in object name. IE: 'yesterday'. Dates are compared in UTC.",
"type": "string",
"required": False,
},{
"name": "name",
"regexp": "^.{1,250}$",
"title": "Name",
"description": "Object name to check for in the container. If a date adjustment is set, python datetime time substution is enabled. IE: 'backups/%Y-%m-%d.zip'",
"type": "string",
"required": True
},
]
}
def agent():
ca.log("Starting!")
keystone = client.Client(token=ca.creds['token'], tenant_id=ca.creds['tenantId'],
auth_url=ca.creds['identity_url'])
object_store_catalog = keystone.service_catalog.get_endpoints()['object-store']
region_endpoints = None
for endpoints in object_store_catalog:
if endpoints['region'] == ca.conf['region']:
region_endpoints = endpoints
if not region_endpoints:
ca.log_fail("Failing, region not found in endpoint list.")
exit()
if ca.conf.get('date'):
p = pdt.Calendar()
result = p.parse(ca.conf['date'])
dt = datetime.datetime.fromtimestamp(mktime(result[0]))
path = dt.strftime(ca.conf['name'])
else:
path = ca.conf['name']
try:
headers = swiftclient.head_object(region_endpoints['publicURL'],ca.creds['token'],
ca.conf['container'],path)
if headers['content-length'] >= 0:
ca.log("File exists!")
except swiftclient.client.ClientException, e:
ca.log("File doesn't exist!")
ca.email("File missing: "+ca.conf['container']+"/"+path,'''
The container '%s' appears to be missing the file '%s'.
''' % (ca.conf['container'], path))
ca.run(agent)
|
apache-2.0
| 6,946,789,395,821,561,000
| 30.347826
| 196
| 0.671567
| false
| 3.319521
| false
| false
| false
|
pwittchen/learn-python-the-hard-way
|
exercises/exercise35.py
|
1
|
1993
|
# Exercise 35: Branches and Functions
from sys import exit
def gold_room():
print "This room is full of gold. How much do you take?"
choice = raw_input("> ")
if "0" in choice or "1" in choice:
how_much = int(choice)
else:
dead("Man, learn to type a number.")
if how_much < 50:
print "Nice, you're not greedy, you win!"
exit(0)
else:
dead("You greedy bastard!")
def bear_room():
print "There is a bear here."
print "The bear has a bunch of honey."
print "The fat bear is in front of another door."
print "How are you going to move the bear?"
bear_moved = False
while True:
choice = raw_input("> ")
if choice == "take honey":
dead("The bear looks at you then slaps your face off.")
elif choice == "taunt bear" and not bear_moved:
print "The bear has moved from the door. You can go through it now."
bear_moved = True
elif choice == "taunt bear" and bear_moved:
dead("The bear gets pissed off and chews your leg off.")
elif choice == "open door" and bear_moved:
gold_room()
else:
print "I got no idea what that means."
def cthulhu_room():
print "Here you see the great evil Cthulhu."
print "He, it, whatever stares at you and you go insane."
print "Do you flee for your life or eat your head?"
choice = raw_input("> ")
if "flee" in choice:
start()
elif "head" in choice:
dead("Well that was tasty!")
else:
cthulhu_room()
def dead(why):
print why, "Good job!"
exit(0)
def start():
print "You are in a dark room."
print "There is a door to your right and left."
print "Which one do you take?"
choice = raw_input("> ")
if choice == "left":
bear_room()
elif choice == "right":
cthulhu_room()
else:
dead("You stumble around the room until you starve.")
start()
|
mit
| 956,250,791,782,358,400
| 24.551282
| 80
| 0.578525
| false
| 3.484266
| false
| false
| false
|
willjp/pyqconcurrency
|
qconcurrency/models.py
|
1
|
37871
|
#!/usr/bin/env python
"""
Name : qconcurrency/models.py
Created : Apr 14, 2017
Author : Will Pittman
Contact : willjpittman@gmail.com
________________________________________________________________________________
Description : Generic models, and interfaces for models to be used
in various Qt `View` widgets (ex: QTableView, QListView, QTableView, QComboBox, ...)
________________________________________________________________________________
"""
#builtin
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Iterable, MutableMapping
#external
from Qt import QtGui, QtCore
import six
#internal
__all__ = [
'DictModel',
'DictModelRow',
]
#!TODO: implement the other QStandardItemModel methods (insertRow, ...)
#! taking special care to handle self._data
#! (to keep IPython happy)
#!TODO: test using delattr unecessary, (and potentially harmful)
#! QStandardItemModel methods (like appendRow, setItem, ...)
#!TODO: validation based on `hierarchy`, preventing nesting below defined
#!TODO: validation of column-names when setting columnvals
class DictModel( QtGui.QStandardItemModel ):
"""
Customized python interface for :py:obj:`QtGui.QStandardItemModel` so that it's
values, and nested tables can be accessed like a python dictionary.
Example:
Simple Example:
.. code-block:: bash
| _id | firstname | lastname | username |
|========================================|
| 101 | luke | skywalker | lukes |
| 102 | leia | skywalker | leias |
|========================================|
.. code-block:: python
model = DictModel( columns=('firstname','lastname','username') )
model.add_row( 101, columnvals = {
'firstname':'luke' ,
'lastname' :'skywalker' ,
'username' :'lukes' ,
}
)
userId = 101
print( model[userId].column('firstname') )
>>> 'luke'
print( model[userId].columnvals() )
>>> {'_id':101, 'firstname':'luke', 'lastname':'skywalker', 'username':'lukes'}
Nested-Table Example:
.. code-block:: bash
|=============|
| _id | class | # level: 'jedi_class'
|=============|
| 101 | sith |
| |===========================================|
| | _id | firstname | lastname | username | # level: 'user'
| |===========================================|
| | 56 | Darth | Vader | anakins |
| | 57 | Darth | Maul | darthm |
| |===========================================|
| |
| 102 | jedi |
| |===========================================|
| | _id | firstname | lastname | username | # level: 'user'
| |===========================================|
| | 58 | Mace | Windu | macew |
| | 59 | Ben | Kenobi | benk |
| |===========================================|
| |
|=============|
.. code-block:: python
model = DictModel(
hierarchy = ('jedi_class','user'),
columns = {
'jedi_class': ('class'),
'user': ('firstname','lastname','username')
},
)
sith_row = model.add_row( 101, {'class':'sith'} )
jedi_row = model.add_row( 102, {'class':'jedi'} )
sith_row.add_child( 56, {'firstname':'Darth', 'lastname':'Vader', 'username':'anakins'} )
sith_row.add_child( 57, {'firstname':'Darth', 'lastname':'Maul', 'username':'darthm'} )
jediclassId = 101
userId = 56
print( model[jediclassId][userId].column('username') )
>>> 'anakins'
print( model[jediclassId].level() )
>>> 'jedi_class'
print( model[jediclassId][userId].level() )
>>> 'user'
:py:obj:`qconcurrency.models.DictModel` column datatypes
.. code-block:: bash
|===============================================|
| _id | columnA | columnB |
|===============================================|
| DictModelRow | QStandardItem | QStandardItem |
| |===============================================|
| | _id | columnA | columnB |
| |===============================================|
| | DictModelRow | QStandardItem | QStandardItem |
| |===============================================|
| |
|===============================================|
"""
def __init__(self, columns, hierarchy=None ):
"""
Args:
columns (list, dict):
Defines the available columns for the table/tree this :py:obj:`QtGui.QStandardItemModel`
(the `key`, generally referring to the databaseId, is always the first column)
If `hierarchy` argument is set, you have two options:
* This can be a list of column-names, that will be
created in all levels of nested table.
* This can be a dictionary in the form of ``{'level_name':(column,column,column,...), ...}``
that indicates specific-columns for each level of table-nesting.
If `hierarchy` is not set, this must be a list of column-names,
and they will be applicable to any level of table-nesting.
.. code-block:: python
{
'jedi_class': ('class',),
'user': ('firstname','lastname'),
}
hierarchy (dict, optional): ``(ex: ('department_type','department') )``
A list that labels what type of data is stored at each
level of table-nesting in this :py:obj:`qconcurrency.models.DictModel`. Each item
indicates another level of nesting.
.. code-block:: python
hierarchy = ('jedi_class','user'),
"""
QtGui.QStandardItemModel.__init__(self)
# Attributes
self._defaultcolumnvals = {} # all columns for new rows are initialized as ``None``
self._columns = None # either a list of columns, or a dict of hierarchy-keys and their columns
self._hierarchy = None # either ``None``, or a list, indicating the level of each
self._data = {} # unfortunately, if an item has a dict interface
# and has assignments within a context-manager,
# IPython tries to save/restore the values when it
# is destroyed.
#
# This means we need a real-dict (or something
# to fake it) in order to cleanly use
# within IPython. So we are now keeping
# 2x references to the data.
# Validation
# ==========
# If imposing hierarchy restrictions
if hierarchy:
self._hierarchy = hierarchy
if isinstance( columns, MutableMapping ):
if not set(hierarchy).issubset( set(columns.keys()) ):
raise RuntimeError((
'`columns` argument is missing keys represented in`hierarchy` \n'
'columns: %s \n'
'hierarchy: %s \n'
) % (repr(columns.keys()), repr(hierarchy))
)
# so that hierarchy can always be handled the same,
# create `columns` as a dict, if a list was passed
elif hasattr( columns, Iterable ):
new_columns = {}
for level in hierarchy:
new_columns[ level ] = columns[:]
columns = new_columns
else:
raise RuntimeError(
'When `hierarchy` argument is set, `columns` must be either: \n'
' * a list of columns (applicable to all hierarchy levels) \n'
' * a dict of hierarchy-keys, and the columns associated with them \n'
)
for level in hierarchy:
self._defaultcolumnvals[ level ] = {}
for key in columns[level]:
self._defaultcolumnvals[ level ][ key ] = None
# If not imposing hierarchy restrictions
else:
if isinstance( columns, MutableMapping ):
raise RuntimeError(
'When `hierarchy` argument is *not* set, `columns` should always \n'
'be a list of column-names. This set of columns will be reused by all \n'
'levels of nested tables. '
)
for key in columns:
self._defaultcolumnvals[ key ] = None
self._columns = columns
self._hierarchy = hierarchy
def add_row(self, key, columnvals=None ):
"""
Adds a new (toplevel) row to this DictModel, henceforth referred to by the key `key`.
Args:
key (obj):
Key is the id you will use to refer to this object.
Generally it will be a databaseId. This object must be
hashable.
columnvals (dict, optional):
Optionally, you may provide a dictionary of column-val assignments
(appropriate to this item's table-level). All columns, not
assigned in `columnvals` will be initialized with a value of ''.
Returns:
:py:obj:`qconcurrency.models.DictModelRow`
"""
set_columnvals = self._defaultcolumnvals.copy()
if self._hierarchy:
set_columnvals = set_columnvals[ self._hierarchy[0] ]
if columnvals:
set_columnvals.update( columnvals )
item = DictModelRow( parent=self, key=key, columnvals=set_columnvals)
# NOTE: this step should not be necessary,
# but it seems to be...
self.setItem( self.rowCount()-1, 0, item )
self._data[ str(key) ] = item
return item
def columns(self, level=None ):
"""
Returns the columns for a particular level of nested-table
within this :py:obj:`qconcurrency.models.DictModel`.
Args:
level (obj): ``( ex: 'jedi_class', 0 )``
If a `hierarchy` was assigned to this :py:obj:`qconcurrency.models.DictModel`,
this can be a label from it, or an integer indicating the level-of-nesting.
Otherwise, this will be an integer indicating the level-of-nesting
(and it will be ignored).
Returns:
.. code-block:: python
('id','firstname','lastname','username', ...)
"""
if self._hierarchy:
if level == None:
raise RuntimeError(
'This `qconcurrency.models.DictModel` was created with different columns at '
'different levels. You\'ll need to provide the `level` you are '
'interested in to get the column-list '
)
if level in self._columns:
columns = list(self._columns[ level ][:])
columns.insert( 0, 'id' )
return columns
elif isinstance( level, int ) and isinstance( self._hierarchy, Iterable):
if level <= len(self._hierarchy):
i = 0
for key in self._hierarchy:
if i == level:
columns = list(self._columns[ key ][:])
columns.insert( 0, 'id' )
return columns
i +=1
raise KeyError('unknown level: %s' % level )
else:
columns = list(self._columns[:])
columns.insert( 0, 'id' )
return columns
def column_index(self, level=None, column=None ):
"""
Returns the column-index for a specific columnname
at a specific level.
Args:
level (obj): ``( ex: 'jedi_class', 0 )``
If a `hierarchy` was assigned to this :py:obj:`qconcurrency.models.DictModel`,
this can be a label from it, or an integer indicating the level-of-nesting.
Otherwise, this will be an integer indicating the level-of-nesting
(and it will be ignored).
Returns:
.. code-block:: python
3 # a column-index
"""
if self._hierarchy:
if level == None:
raise RuntimeError(
'This `qconcurrency.models.DictModel` was created with different columns at '
'different levels. You\'ll need to provide the `level` you are '
'interested in to get the column-list '
)
if level in self._columns:
return self._columns[ level ].index( column ) +1
elif isinstance( level, int ) and isinstance( self._hierarchy, Iterable ):
if level <= len(self._hierarchy):
i = 0
for key in self._hierarchy:
if i == level:
return self._columns[ key ].index( column ) +1
i +=1
raise KeyError('unknown level: %s' % level )
else:
return self._columns.index( column ) +1
def default_columnvals(self, level=None ):
"""
Returns the default-columnvals for a particular level of nested-table.
See :py:meth:`qconcurrency.models.DictModelRow.level`
Args:
level (obj):
If a `hierarchy` was assigned to this :py:obj:`qconcurrency.models.DictModel`,
this will be a label from it. Otherwise, this will be an integer
indicating the level-of-nesting (and it will be ignored).
Returns:
.. code-block:: python
{
'firstname': None,
'lastname': None,
...
}
"""
if self._hierarchy:
if level in self._defaultcolumnvals:
return self._defaultcolumnvals[ level ]
elif isinstance( level, int ):
if level <= len(self._defaultcolumnvals):
i = 0
for key in self._defaultcolumnvals:
if i == level:
return self._defaultcolumnvals[ key ]
i +=1
raise KeyError('unknown level: %s' % level )
else:
return self._defaultcolumnvals
def hierarchy(self):
"""
Returns the model's hierarchy tuple
(if one has been assigned in :py:obj:`qconcurrency.models.DictModel.__init__`)
Returns:
.. code-block:: python
('jedi_class', 'user') # if assigned a hierarchy
None # if no hierarchy is assigned
"""
return self._hierarchy
def _get_rowitem(self, key):
"""
Returns the item in the first column of this :py:obj:`QtGui.QStandardItemModel`
for the row with the key indicated by `key`.
Args:
key (obj):
A key assigned to a row within this Model. Generally,
this would be a database-Id.
Returns:
QtGui.QStandardItem
"""
for i in range(self.rowCount()):
if self.item(i,0).text() == str(key):
return self.item(i,0)
raise KeyError(
'no row has the key "%s"' % key
)
def _get_colindex(self, level, column):
"""
Returns the column-index for a column within this :py:obj:`QtGui.QStandardItemModel`
by it's name.
Args:
column (str): ``(ex: 'name' )``
Any item from the :py:meth:`__init__` argument `columns`.
Returns:
.. code-block:: python
4 # integer, representing the 0-based index of this column
# in the table
Raises:
KeyError: if column does not exist in table
"""
if self._hierarchy:
if level == None:
raise RuntimeError(
'This `qconcurrency.models.DictModel` was created with different columns at '
'different levels. You\'ll need to provide the `level` you are '
'interested in to get the column-list '
)
if level in self._columns:
return self._columns[ level ].index( column ) +1
elif isinstance( level, int ) and isinstance( self._hierarchy, Iterable ):
if level <= len(self._hierarchy):
i = 0
for key in self._hierarchy:
if i == level:
return self._columns[ key ].index( column ) +1
i +=1
raise KeyError('unknown level: %s' % level )
else:
return self._columns.index(column) +1
raise KeyError(
'Column "%s" does not exist in this `qconcurrency.models.DictModel` columns: %s' % (
column, str(self._columns)
)
)
def keys(self):
"""
Returns list containing keys for every
row that has been added to this :py:obj:`qconcurrency.models.DictModel` s root
Returns:
.. code-block:: python
[ 1, 2, 3, 5, 8, ... ]
"""
return self._data.keys()
def removeRow(self, key):
self._data.pop( str(key) )
# row is gone. that is all we care about
try:
modelitem = self._get_rowitem( key )
return QtGui.QStandardItemModel.removeRow( self, modelitem.row() )
except( KeyError ):
return
def takeRow(self, key):
return self.removeRow( str(key) )
def __getitem__(self, key):
"""
Returns a :py:obj:`qconcurrency.models.DictModelRow` object representing
a row from this :py:obj:`qconcurrency.models.DictModel`.
"""
return self._data[str(key)]
def __delitem__(self, key):
"""
Wraps :py:meth:`removeRow`
"""
self.removeRow( key )
def __contains__(self, item):
"""
Returns True/False if a row with `key` exists in
:py:obj:`QtWidgets.QStandardItemModel`
"""
return str(item) in self._data
def __len__(self):
"""
Wraps `self._data.__len__`
"""
return len(self._data)
def __iter__(self):
"""
Allows iteration over Ids in DictModel.
"""
return iter(self._data)
def has_key(self, k):
"""
Wraps `self._data.has_key`
"""
return self._data.has_key(k)
def keys(self):
"""
Lists `key` value for every row in the
:py:obj:`QtWidgets.QStandardItemModel`
"""
return self._data.keys()
def values(self):
"""
Lists :py:obj:`DictModelRow` objects for every row in the
:py:obj:`QtWidgets.QStandardItemModel`
"""
return self._data.values()
def items(self):
"""
Lists a tuple with the `key` and :py:obj:`DictModelRow`
objects for every row in the :py:obj:`QtWidgets.QStandardItemModel`
"""
return self._data.items()
def clear(self):
"""
Removes all items from :py:obj:`QtGui.QStandardItemModel`
"""
self._data = {}
QtGui.QStandardItemModel.clear(self)
class DictModelRow( QtGui.QStandardItem ):
"""
A DictModelRow is a :py:obj:`QtGui.QStandardItem` that holds
an item's key (usually database-Id) within a :py:obj:`qconcurrency.models.DictModel`.
It is always added to a :py:obj:`qconcurrency.models.DictModel` at the column-index ``0``.
When setting columnvals, they are added to the same parent :py:obj:`qconcurrency.models.DictModel`
or :py:obj:`qconcurrency.models.DictModelRow`, but at different column-indexes.
Example:
.. code-block:: bash
===== ========|
DictModelRow _id | class | # level: 'jedi_class'
| ===== ========|
+---------> 101 | sith |
| |============================================|
| | _id | firstname | lastname | username | # level: 'user'
| |============================================|
+-------------> 56 | Darth | Vader | anakins |
+-------------> 57 | Darth | Maul | darthm |
|============================================|
/\\ /\\ /\\
| | |
QtGui.QStandardItem ----+------------+-------------+
"""
def __init__(self, parent, key, columnvals=None ):
"""
Args:
parent (QtGui.QStandardItem, QtGui.QStandardItemModel ):
Another QStandardItem that has already
been added to the model, or a model itself.
It will be used to access the model's info,
and this widget will be added to it.
key (obj):
A hashable python object that will be
used to represent this object's databaseId.
columnvals (dict, optional):
A dictionary of columns, and assignments to store
in the view.
"""
QtGui.QStandardItem.__init__(self, str(key))
if not isinstance( parent, QtGui.QStandardItemModel ):
if not parent.model():
raise RuntimeError(
'`parent` %s QStandardItem must have already been added to a QStandardItemModel' % repr(parent)
)
self._key = key
self._level = None # if `hierarchy` argument was set in `DictModel`, this will be
# a label indicating the type of information this
# table represents.
#
# otherwise, this will be an incremented integer
# (starting from 0)
# append this item to the parent's list of children
if isinstance( parent, QtGui.QStandardItemModel ):
if parent.hierarchy():
self._level = parent.hierarchy()[0]
else:
self._level = 0
parent.setItem( parent.rowCount(), 0, self )
else:
hierarchy = parent.model().hierarchy()
if hierarchy:
index = hierarchy.index( parent.level() )+1
self._level = hierarchy[ index ]
else:
self._level = parent.level() +1
parent.setChild( parent.rowCount(), 0, self )
self.setText( str(key) )
default_columnvals = self.model().default_columnvals(self._level)
self.set_columnvals( default_columnvals )
if columnvals:
self.set_columnvals( columnvals )
def __getitem__(self, key):
return self._get_child_row(key)
def add_child(self, key, columnvals=None ):
"""
Adds a new row to this DictModel, at a new level of nesting
henceforth referred to by the key `key`.
Example:
.. code-block:: bash
|==============|
| _id | column |
|==============|
| 100 | 'A' | # add_child( 102, {'column':'A1'} )
| |==============|
| | _id | column | # added child: model[100][102]
| |==============|
| | 102 | 'A1' |
| |==============|
| |
| 101 | 'B' |
|==============|
Args:
key (obj):
Key is the id you will use to refer to this object.
Generally it will be a databaseId. This object must be
hashable.
columnvals (dict, optional):
Optionally, you may provide a dictionary of column-val assignments
(appropriate to this item's table-level) as determined by the `columns`
argument to :py:meth:`qconcurrency.models.DictModel.__init__`
Returns:
:py:obj:`qconcurrency.models.DictModelRow`
See Also:
* :py:meth:`qconcurrency.models.DictModelRow.add_row`
* :py:meth:`qconcurrency.models.DictModel.add_row`
"""
item = DictModelRow( parent=self, key=key, columnvals=columnvals )
return item
def add_row(self, key, columnvals=None ):
"""
Adds a new row to this DictModel, at the same level of nesting
henceforth referred to by the key `key`.
Example:
.. code-block:: bash
|==============|
| _id | column |
|==============|
| 100 | 'A' | # add_row( 102, {'column':'C'} )
| 101 | 'B' |
| 102 | 'C' | # added row: model[102]
|==============|
Args:
key (obj):
Key is the id you will use to refer to this object.
Generally it will be a databaseId. This object must be
hashable.
columnvals (dict, optional):
Optionally, you may provide a dictionary of column-val assignments
(appropriate to this item's table-level) as determined by the `columns`
argument to :py:meth:`qconcurrency.models.DictModel.__init__`
Returns:
:py:obj:`qconcurrency.models.DictModelRow`
See Also:
* :py:meth:`qconcurrency.models.DictModelRow.add_row`
* :py:meth:`qconcurrency.models.DictModel.add_row`
"""
if self.parent():
item = DictModelRow( parent=self.parent(), key=key, columnvals=columnvals )
else:
item = DictModelRow( parent=self.model(), key=key, columnvals=columnvals )
return item
def set_columnvals(self, columnvals ):
"""
Set columnvals on a key of this :py:obj:`qconcurrency.models.DictModel`
"""
# validation
if self.model() is None:
raise RuntimeError('Cannot set columnvals until item has been added to a model')
columns = self.model().columns( self._level )
# set columnvals
for i in range(len(columns)):
column = columns[i]
if column in columnvals:
if columnvals[column] == None:
columnvals[column] = ''
if self.parent() is not None:
self.parent().setChild(
self.index().row(), # row
i, # column
QtGui.QStandardItem( str(columnvals[column]) ) # item
)
else:
self.model().setItem(
self.index().row(), # row
i, # column
QtGui.QStandardItem( str(columnvals[column]) ) # item
)
def columnvals(self):
"""
Returns a dictionary of this item's columnvals from the Model.
A column `_id` will be added to the list of columns, which will
be the `key` value of this row.
"""
columnvals = {}
columns = self.model().columns(self._level)
for i in range(len(columns)):
column = columns[i]
# nested-modelitem
if self.parent() is not None:
modelitem = self.parent().child( self.row(), i )
if modelitem is not None:
columnvals[ column ] = modelitem.text()
else:
raise RuntimeError(
'item at level "%s" in column "%s" (%s,%s) is None. Expected QtCore.QStandardItem' % (
self._level, column, self.row(), i)
)
# root-modelitems
else:
modelitem = self.model().item( self.row(), i )
if modelitem is not None:
columnvals[ column ] = modelitem.text()
else:
raise RuntimeError(
'item at level "%s" in column "%s" (%s,%s) is None. Expected QtCore.QStandardItem' % (
self._level, column, self.row(), i)
)
columnvals['_id'] = self._key
return columnvals
def columnval(self, name):
"""
Retrieve a single column-value only.
"""
if name == '_id':
if self.parent() is not None:
return self.parent().child( self.row(), 0 ).text()
else:
return self.model().item( self.row(), 0 ).text()
columns = self.model().columns(self._level)
for i in range(len(columns)):
column = columns[i]
if column == name:
if self.parent() is not None:
modelitem = self.parent().child( self.row(), i )
if modelitem:
return modelitem.text()
else:
modelitem = self.model().item( self.row(), i )
if modelitem:
return modelitem.text()
raise KeyError(
'Unable to find a column named: "%s" in %s' % (name, repr(columns))
)
def columnitem(self, name):
"""
Returns the sibling-widget representing one of the columnvals
"""
sibling_index = self.index().sibling( self.index().row(), self._get_colindex( name ) )
return self.model().itemFromIndex( sibling_index )
def level(self):
"""
Returns either a label (if :py:meth:`qconcurrency.models.DictModel.__init__` was passed a
`hierarchy` argument), or an integer representing the nesting-depth.
Either way, level is used to indicate the level-of-nesting of the table
that this item is in.
"""
return self._level
def delete(self):
"""
Removes this *row* from the model.
"""
if self.parent() is not None:
self.parent().removeRow( self.id() )
else:
self.model().removeRow( self.id() )
def _get_sibling_row(self, key):
"""
Returns a sibling with a different key at the same level.
Example:
.. code-block:: bash
----------------------------------------------------
key | name | path |
----------------------------------------------------
100 | 'mnt' | '/mnt' |
100.1 | 'mntusb' | '/mnt/usb' |
100.1.a | 'mntusbbackup' | '/mnt/usb/backup' |
100.2 | 'mntcd' | '/mnt/cd' |
| | |
200 | 'home' | '/home' |
200.1 | 'will' | '/home/will' |
In the above diagram representing the :py:obj:`QStandardItemModel`,
from `100.1` you would be able retrieve `100.2`.
"""
if self.parent() == None:
for i in range(self.model().rowCount()):
if self.model().item(i,0).text() == str(key):
return self.model().item(i,0)
else:
for i in range(self.parent().rowCount()):
if self.parent().child(i,0).text() == str(key):
return self.parent().child(i,0)
raise KeyError(
'Unable to find key %s in table containing %s' % (key, repr(self))
)
def _get_child_row(self, key):
"""
Returns a child with a particular key.
Example:
.. code-block:: bash
----------------------------------------------------
key | name | path |
----------------------------------------------------
100 | 'mnt' | '/mnt' |
100.1 | 'mntusb' | '/mnt/usb' |
100.1.a | 'mntusbbackup' | '/mnt/usb/backup' |
100.2 | 'mntcd' | '/mnt/cd' |
| | |
200 | 'home' | '/home' |
200.1 | 'will' | '/home/will' |
In the above diagram representing the :py:obj:`QStandardItemModel`,
from `100.1` you would be able retrieve `100.1.a`.
"""
if not self.rowCount():
raise RuntimeError(
'%s has no children. Cannot retrieve child at key %s' % (repr(self), key)
)
for i in range(self.rowCount()):
if self.child(i,0).text() == str(key):
return self.child(i,0)
raise KeyError(
'Cannot find child identified by key "%s" in %s' % (key,repr(self))
)
def _get_colindex(self, column):
return self.model()._get_colindex( self.level(), column )
def keys(self):
"""
Returns list containig keys for every
child-row that has been added to this :py:obj:`qconcurrency.models.DictModelRow`
"""
keys = []
for i in range(self.rowCount()):
keys.append( self.child(i,0).text() )
return keys
def id(self):
"""
Returns the `key` this row represents.
(It's value depends on the value passed to :py:meth:`qconcurrency.models.DictModelRow.add_row`
or :py:meth:`qconcurrency.models.DictModelRow.add_child` ).
"""
return self._key
if __name__ == '__main__':
from qconcurrency import QApplication
from Qt import QtWidgets
import sys
def test_simple():
with QApplication():
model = DictModel( columns=('a','b','c') )
# add toplevel rows
model.add_row( 100, columnvals={'a':'AAA', 'b':'BBB'} )
model.add_row( 200, columnvals={'a':'ZZZ', 'b':'XXX'} )
print( model[100].columnvals() )
print( model[200].columnvals() )
# add child-rows (and nested children)
model[100].add_child( 10, columnvals={'c':'CCC'} )
model[100][10].add_row( 11 )
model[100][10].add_row( 12 )
model[100][10].add_child( 1 , columnvals={'c':'DDD'} )
print( model[100][10].columnvals() )
print( model[100][10][1].columnvals() )
# add model to tree (so it is visible)
tree = QtWidgets.QTreeView()
tree.setModel( model )
tree.show()
def test_hierarchy_fixedcols():
with QApplication():
model = QtGui.QStandardItemModel()
model = DictModel(
hierarchy = ('jedi_class','user'),
columns = {'jedi_class':['class'], 'user':('username','firstname','lastname')}
)
model.add_row(10, columnvals={'class':'sith'} )
model.add_row(11, columnvals={'class':'jedi'} )
model[10].add_child( 101, columnvals={'username':'anakins', 'firstname':'anakin', 'lastname':'skywalker'} )
model[10].add_child( 102, columnvals={'username':'epalpatine'} )
model[10].add_row( 12, columnvals={'class':'other'} )
jediclassId = 10
userId = 101
print( model[jediclassId][userId].columnvals() )
# add model to tree (so it is visible)
tree = QtWidgets.QTreeView()
tree.setModel( model )
tree.show()
def runtests():
#test_simple()
test_hierarchy_fixedcols()
runtests()
|
bsd-3-clause
| -3,545,572,550,942,199,300
| 34.660075
| 119
| 0.464577
| false
| 4.625748
| false
| false
| false
|
LittleBun/Personal
|
ics632/tutorial_sim_grid/topic2/generate_xml_bintree_and_hostfile.py
|
1
|
5081
|
#!/usr/bin/env python2.7
import sys
import os
import math
# Link parameters
link_latency = "10us"
link_bandwidth = 10
link_bandwidth_unit = "Gbps"
# Convenient math wrappers
def floor(x):
return int(math.floor(x))
def ceil(x):
return int(math.ceil(x))
def pow2(x):
return int(math.pow(2,x))
# XML generation functions
def issueHead():
head = ("<?xml version='1.0'?>\n"
"<!DOCTYPE platform SYSTEM \"http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd\">\n"
"<platform version=\"4\">\n\n")
config_clause = ("<!-- WARNING: This <config></config> clause below\n"
"makes it so that NO COMPUTATION TIME is simulated. This is because\n"
"in this module, for pedagogic purposes, we don't want to muddy the\n"
"(simulation) waters with computational times. As a results, this\n"
"XML platform file may not be suitable for running other\n"
"simulations, unless you remove the <config></config> clause.\n"
"-->\n"
"<config>\n"
"<prop id=\"smpi/simulate-computation\" value=\"0\"></prop>\n"
"<prop id=\"smpi/running-power\" value=\"200000000000\"></prop>\n"
"</config>\n\n")
AS_head = "<AS id=\"AS0\" routing=\"Full\">\n"
return head + config_clause + AS_head
def issueTail():
return "</AS>\n</platform>\n"
def issueLink1(x):
return " <link id=\"link-"+str(x)+"\" latency=\""+str(link_latency)+"\" bandwidth=\""+str(link_bandwidth)+link_bandwidth_unit+"\"/>\n"
def issueLink2(x,y):
return " <link id=\"link-"+str(x)+"-"+str(y)+"\" latency=\""+str(link_latency)+"\" bandwidth=\""+str(link_bandwidth)+link_bandwidth_unit+"\"/>\n"
def issueLink3(x,y,bw):
return " <link id=\"link-"+str(x)+"-"+str(y)+"\" latency=\""+str(link_latency)+"\" bandwidth=\""+str(bw)+link_bandwidth_unit+"\"/>\n"
def issueHost(index):
return " <host id=\"host-"+str(index)+".hawaii.edu\" speed=\"200Gf\"/>\n"
def issueRouteHead(index1, index2):
return " <route src=\"host-"+str(index1)+".hawaii.edu\" dst=\"host-"+str(index2)+".hawaii.edu\">\n"
def issueRouteTail():
return " </route>\n"
def issueRouteLink1(x):
return "\t<link_ctn id=\"link-"+str(x)+"\"/>\n"
def issueRouteLink2(x,y):
return "\t<link_ctn id=\"link-"+str(x)+"-"+str(y)+"\"/>\n"
######################################################################
# Parse command-line arguments
if (len(sys.argv) != 2):
print >> sys.stderr, "Usage:a"+sys.argv[0]+" <num hosts>\n"
print >> sys.stderr, " Will generate a bintree_<num hosts>.xml and hostfile_<num hosts>.txt file\n"
exit(1)
num_hosts = int(sys.argv[1])
###############################################################
# Generate Binary Tree XML file
filename = "./bintree_"+str(num_hosts)+".xml"
fh = open(filename, 'w')
fh.write(issueHead())
# Create all hosts and links
for i in range(0,num_hosts):
fh.write(issueHost(i))
if (i*2+1 < num_hosts):
fh.write(issueLink2(i,i*2+1))
if (i*2+2 < num_hosts):
fh.write(issueLink2(i,i*2+2))
# Create all routes
for i in range(0,num_hosts):
level_i = floor(math.log(1+i,2))
for j in range(i+1,num_hosts):
fh.write(issueRouteHead(j,i))
# Host j is at the same of lower level than host i
level_j = floor(math.log(1+j,2))
current_host_path_j = j
# Go up to the same level of that of host i
for l in range(level_j,level_i,-1):
parent_host = floor(float(current_host_path_j-1)/2)
fh.write(issueRouteLink2(min(current_host_path_j,parent_host),max(current_host_path_j,parent_host)))
current_host_path_j = parent_host
# Find the common ancestor
current_host_path_i = i
while (current_host_path_j != current_host_path_i):
fh.write(issueRouteLink2(min(current_host_path_j,floor(float(current_host_path_j-1)/2)), max(current_host_path_j,floor(float(current_host_path_j-1)/2))))
current_host_path_i = floor(float(current_host_path_i-1)/2)
current_host_path_j = floor(float(current_host_path_j-1)/2)
common_ancestor = current_host_path_j
# Go back from i to the common ancestor
current_host_path_i = i
sequence = []
sequence.append(current_host_path_i)
while (current_host_path_i != common_ancestor):
parent_host = floor(float(current_host_path_i-1)/2)
sequence.append(parent_host)
current_host_path_i = parent_host
# Issue links in the common ancestor -> i order
sequence = sequence[::-1]
for k in range(0,len(sequence)-1):
fh.write(issueRouteLink2(min(sequence[k],sequence[k+1]),max(sequence[k],sequence[k+1])))
fh.write(issueRouteTail())
fh.write(issueTail())
fh.close()
print >> sys.stderr, "BinTree XML platform description file created: "+filename
###############################################################
## Generate host file
filename = "./hostfile_"+str(num_hosts)+".txt"
fh = open(filename, 'w')
for i in range(0,num_hosts):
fh.write("host-"+str(i)+".hawaii.edu\n")
fh.close()
print >> sys.stderr, "Hostfile created: "+filename
|
unlicense
| -6,806,824,665,409,877,000
| 34.78169
| 156
| 0.604212
| false
| 2.936994
| true
| false
| false
|
shubhamchaudhary/biggboss
|
biggboss-checker.py
|
1
|
6274
|
#!/usr/bin/env python3
#
# Copyright (c) 2014 Shubham Chaudhary <me@shubhamchaudhary.in>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import os.path
import platform
import random
import re
import sys
import time
if sys.version_info >= (3,):
import urllib.request as urllib2
import urllib.parse as urlparse
import urllib.error as urlerror
else:
import urllib2
import urlparse
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [atoi(c) for c in re.split('(\d+)', text)]
def notify_user(message=None):
''' Notify the user about a particular event with given message
'''
if not message:
message = 'Notification!!!'
#print('-'*len(message))
print('-'*int((len(message)-6)/2), 'NOTIFICATION', '-'*int((len(message)-6)/2))
print(message)
def get_page_data():
''' Get page data as string from server
'''
print('Sending request to servers of Colors . . .')
full_url = 'http://colors.in.com/in/biggboss'
full_url = 'http://colors.in.com/in/biggboss/videos/episodes'
# Send request
try:
#res.geturl(), .url=str, .status=200, .info=200, .msg=OK,
response = urllib2.urlopen(full_url)
except urlerror.HTTPError as exep:
print('The server couldn\'t fulfill the request.',
'Error code: ', exep.code)
except urlerror.URLError as exep:
print('We failed to reach a server.')
print('Reason: ', exep.reason)
else:
# everything is fine
#if verbose:
print('Data received, Decoding . . .')
web_page = str(response.read()) # More pythonic than .decode('utf-8')
return web_page
def get_link(web_page):
''' Get Latest episode link
'''
month = time.strftime('%B')
new_link_pattern = r'http://colors.in.com/in/biggboss/videos/bigg-boss-8-full-episode\d\d-' + month.lower() + r'-\d+\w\w-2014.*?.html'
#print('Checking: ', new_link_pattern)
link_reg = re.findall(new_link_pattern, web_page)
if link_reg:
#print(link_reg.group())
success_set = sorted(set(link_reg), key=natural_keys)
return success_set[-1]
def get_episode_list(web_page, new_episode_pattern=None):
''' Get latest episode list from webpage
'''
if not new_episode_pattern:
### PATTERN used by colors
#<li><a title="Bigg Boss 8, Full Episode-8, 29th September, 2014"
#href="http://colors.in.com/in/biggboss/videos/bigg-boss-8-full-episode8-29th-october-2014-69087-2.html#nav">
#http://colors.in.com/in/biggboss/videos/bigg-boss-8-full-episode23-november-14th-2014-10101036-2.html#nav
#Bigg Boss 8, Full Episode-8, 29th September, 2014</a></li>
#Bigg Boss 8, Full Episode-10, October 1st, 2014</a></li>
new_episode_pattern = time.strftime(r'%B-\d+\w\w').lower()
month = time.strftime('%B')
new_episode_pattern = r'Bigg Boss \d+, Full Episode-\d+, ' + month + r' \d+\w\w, 2014';
#new_episode_pattern = r'Bigg Boss \d+, Full Episode-\d+'
print('Checking for new episode with pattern:', new_episode_pattern)
success = re.findall(new_episode_pattern, web_page)
success_set = sorted(set(success), key=natural_keys)
return success_set
def check_biggboss_episode(new_episode_pattern=None, verbose=False):
''' Check for the latest bigg boss episode
'''
web_page = get_page_data()
if verbose:
print('Page Received:\n', web_page)
success_set = get_episode_list(web_page, new_episode_pattern)
# Parse for success or failure
print('Found:')
for item in success_set:
print('\t', item)
current_date = int(time.strftime('%d'))
current_hour = int(time.strftime('%H'))
current_month = time.strftime('%B')
if (current_month.lower() in success_set[-1].lower() and (
(str(current_date) in success_set[-1] and
(current_hour >= 20)) or
(str(current_date-1) in success_set[-1] and
(current_hour >= 0 and current_hour < 20))
)
):
msg = 'Found new episode online'
notify_user(msg)
latest_link = get_link(web_page)
if latest_link:
print('Here\'s the link: ', latest_link)
else:
print('No new episode right now')
def main():
''' Main function - Parse command line arguments
'''
from argparse import ArgumentParser
parser = ArgumentParser(prog='BiggBoss-checker')
parser.add_argument("-p", "--pattern", type=str, dest="pattern",
help="Search for this pattern instead of default")
parser.add_argument("-v", "--verbose", dest="verbosity",
action='store_true', default=False, help='Show verbose output')
args = parser.parse_args()
# Check input
try:
check_biggboss_episode(args.pattern, verbose=args.verbosity)
except:
raise
return 0
if __name__ == '__main__':
try:
main()
if os.name == 'nt' or platform.system() == 'Windows':
input('Press Enter or Close the window to exit !')
except KeyboardInterrupt:
print('\nClosing garacefully :)', sys.exc_info()[1])
except urlerror.HTTPError:
print('HTTP Error:', sys.exc_info()[1])
except SystemExit:
pass
except:
print('Unexpected Error:', sys.exc_info()[0])
print('Details:', sys.exc_info()[1])
raise
|
gpl-3.0
| 3,544,284,820,136,747,500
| 35.057471
| 138
| 0.626076
| false
| 3.493318
| true
| false
| false
|
jankim/deepnl
|
bin/dl-sentiwords.py
|
1
|
7318
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Learn sentiment-specific word embeddings from tweets.
Author: Giuseppe Attardi
"""
import logging
import numpy as np
import argparse
from ConfigParser import ConfigParser
from itertools import chain
# allow executing from anywhere without installing the package
import sys
import os
import distutils.util
builddir = os.path.dirname(os.path.realpath(__file__)) + '/../build/lib.'
libdir = builddir + distutils.util.get_platform() + '-' + '.'.join(map(str, sys.version_info[:2]))
sys.path.append(libdir)
# local
from deepnl import *
from deepnl.extractors import *
from deepnl.reader import TweetReader
from deepnl.network import Network
from deepnl.sentiwords import SentimentTrainer
# ----------------------------------------------------------------------
# Auxiliary functions
def create_trainer(args, converter):
"""
Creates or loads a neural network according to the specified args.
"""
logger = logging.getLogger("Logger")
if args.load:
logger.info("Loading provided network...")
trainer = SentimentTrainer.load(args.load)
trainer.learning_rate = args.learning_rate
else:
logger.info('Creating new network...')
trainer = SentimentTrainer(converter, args.learning_rate,
args.window/2, args.window/2,
args.hidden, args.ngrams, args.alpha)
trainer.saver = saver(args.output, args.vectors)
logger.info("... with the following parameters:")
logger.info(trainer.nn.description())
return trainer
def saver(model_file, vectors_file):
"""Function for saving model periodically"""
def save(trainer):
# save embeddings also separately
if vectors_file:
trainer.save_vectors(vectors_file)
if model_file:
trainer.save(model_file)
return save
# ----------------------------------------------------------------------
if __name__ == '__main__':
# set the seed for replicability
np.random.seed(42)
defaults = {}
parser = argparse.ArgumentParser(description="Learn word embeddings.")
parser.add_argument('-c', '--config', dest='config_file',
help='Specify config file', metavar='FILE')
# args, remaining_argv = parser.parse_known_args()
# if args.config_file:
# config = ConfigParser.SafeConfigParser()
# config.read([args.config_file])
# defaults = dict(config.items('Defaults'))
# parser.set_defaults(**defaults)
parser.add_argument('-w', '--window', type=int, default=5,
help='Size of the word window (default 5)',
dest='window')
parser.add_argument('-s', '--embeddings-size', type=int, default=50,
help='Number of features per word (default 50)',
dest='embeddings_size')
parser.add_argument('-e', '--epochs', type=int, default=100,
help='Number of training epochs (default 100)',
dest='iterations')
parser.add_argument('-l', '--learning-rate', type=float, default=0.001,
help='Learning rate for network weights (default 0.001)',
dest='learning_rate')
parser.add_argument('-n', '--hidden', type=int, default=200,
help='Number of hidden neurons (default 200)')
parser.add_argument('--ngrams', type=int, default=2,
help='Length of ngrams (default 2)')
parser.add_argument('--alpha', type=float, default=0.5,
help='Relative weight of normal wrt sentiment score (default 0.5)')
parser.add_argument('train', type=str,
help='File with text corpus for training.')
parser.add_argument('-o', '--output', type=str, default=None,
help='File where to save the model')
parser.add_argument('--vocab', type=str, required=True,
help='Vocabulary file, either read or created')
parser.add_argument('--vectors', type=str, required=True,
help='Embeddings file, either read and updated or created')
parser.add_argument('--load', type=str, default=None,
help='Load previously saved model')
parser.add_argument('--threads', type=int, default=1,
help='Number of threads (default 1)')
parser.add_argument('--variant', type=str, default=None,
help='Either "senna" (default), "polyglot" or "word2vec".')
parser.add_argument('-v', '--verbose', help='Verbose mode',
action='store_true')
args = parser.parse_args()
log_format = '%(message)s'
log_level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(format=log_format, level=log_level)
logger = logging.getLogger("Logger")
config = ConfigParser()
if args.config_file:
config.read(args.config_file)
# merge args with config
reader = TweetReader(args.ngrams)
reader.read(args.train)
vocab, bigrams, trigrams = reader.create_vocabulary(reader.sentences,
min_occurrences=2)
if os.path.exists(args.vocab):
# start with the given vocabulary
base_vocab = reader.load_vocabulary(args.vocab)
if os.path.exists(args.vectors):
embeddings = Embeddings(vectors=args.vectors, vocab=base_vocab,
variant=args.variant)
else:
embeddings = Embeddings(args.embeddings_size, vocab=base_vocab,
variant=args.variant)
# add the ngrams from the corpus
embeddings.merge(vocab)
logger.info("Overriding vocabulary in %s" % args.vocab)
embeddings.save_vocabulary(args.vocab)
elif args.variant == 'word2vec' and os.path.exists(args.vectors):
embeddings = Embeddings(vectors=args.vectors,
variant=args.variant)
embeddings.merge(vocab)
else:
embeddings = Embeddings(args.embeddings_size, vocab=vocab,
variant=args.variant)
# Assume bigrams are prefix of trigrams, or else we should put a terminator
# on trie
trie = {}
for b in chain(bigrams, trigrams):
tmp = trie
for w in b:
tmp = tmp.setdefault(embeddings.dict[w], {})
converter = Converter()
converter.add(embeddings)
trainer = create_trainer(args, converter)
report_intervals = max(args.iterations / 200, 1)
report_intervals = 10000 # DEBUG
logger.info("Starting training")
# a generator expression (can be iterated several times)
# It caches converted sentences, avoiding repeated conversions
converted_sentences = converter.generator(reader.sentences, cache=True)
trainer.train(converted_sentences, reader.polarities, trie,
args.iterations, report_intervals)
logger.info("Overriding vectors to %s" % args.vectors)
embeddings.save_vectors(args.vectors)
if args.output:
logger.info("Saving trained model to %s" % args.output)
trainer.save(args.output)
|
gpl-3.0
| -4,546,892,508,208,062,000
| 37.114583
| 98
| 0.599344
| false
| 4.222735
| true
| false
| false
|
lhilt/scipy
|
runtests.py
|
1
|
17145
|
#!/usr/bin/env python
"""
runtests.py [OPTIONS] [-- ARGS]
Run tests, building the project first.
Examples::
$ python runtests.py
$ python runtests.py -s {SAMPLE_SUBMODULE}
$ python runtests.py -t {SAMPLE_TEST}
$ python runtests.py --ipython
$ python runtests.py --python somescript.py
$ python runtests.py --bench
Run a debugger:
$ gdb --args python runtests.py [...other args...]
Generate C code coverage listing under build/lcov/:
(requires http://ltp.sourceforge.net/coverage/lcov.php)
$ python runtests.py --gcov [...other args...]
$ python runtests.py --lcov-html
"""
#
# This is a generic test runner script for projects using NumPy's test
# framework. Change the following values to adapt to your project:
#
PROJECT_MODULE = "scipy"
PROJECT_ROOT_FILES = ['scipy', 'LICENSE.txt', 'setup.py']
SAMPLE_TEST = "scipy.fftpack.tests.test_real_transforms::TestIDSTIIIInt"
SAMPLE_SUBMODULE = "optimize"
EXTRA_PATH = ['/usr/lib/ccache', '/usr/lib/f90cache',
'/usr/local/lib/ccache', '/usr/local/lib/f90cache']
# ---------------------------------------------------------------------
if __doc__ is None:
__doc__ = "Run without -OO if you want usage info"
else:
__doc__ = __doc__.format(**globals())
import sys
import os
# In case we are run from the source directory, we don't want to import the
# project from there:
sys.path.pop(0)
from argparse import ArgumentParser, REMAINDER
import shutil
import subprocess
import time
import datetime
try:
from types import ModuleType as new_module
except ImportError: # old Python
from imp import new_module
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("--verbose", "-v", action="count", default=1,
help="more verbosity")
parser.add_argument("--no-build", "-n", action="store_true", default=False,
help="do not build the project (use system installed version)")
parser.add_argument("--build-only", "-b", action="store_true", default=False,
help="just build, do not run any tests")
parser.add_argument("--doctests", action="store_true", default=False,
help="Run doctests in module")
parser.add_argument("--refguide-check", action="store_true", default=False,
help="Run refguide check (do not run regular tests.)")
parser.add_argument("--coverage", action="store_true", default=False,
help=("report coverage of project code. HTML output"
" goes under build/coverage"))
parser.add_argument("--gcov", action="store_true", default=False,
help=("enable C code coverage via gcov (requires GCC)."
" gcov output goes to build/**/*.gc*"))
parser.add_argument("--lcov-html", action="store_true", default=False,
help=("produce HTML for C code coverage information "
"from a previous run with --gcov. "
"HTML output goes to build/lcov/"))
parser.add_argument("--mode", "-m", default="fast",
help="'fast', 'full', or something that could be "
"passed to nosetests -A [default: fast]")
parser.add_argument("--submodule", "-s", default=None,
help="Submodule whose tests to run (cluster,"
" constants, ...)")
parser.add_argument("--pythonpath", "-p", default=None,
help="Paths to prepend to PYTHONPATH")
parser.add_argument("--tests", "-t", action='append',
help="Specify tests to run")
parser.add_argument("--python", action="store_true",
help="Start a Python shell with PYTHONPATH set")
parser.add_argument("--ipython", "-i", action="store_true",
help="Start IPython shell with PYTHONPATH set")
parser.add_argument("--shell", action="store_true",
help="Start Unix shell with PYTHONPATH set")
parser.add_argument("--debug", "-g", action="store_true",
help="Debug build")
parser.add_argument("--parallel", "-j", type=int, default=1,
help="Number of parallel jobs during build (requires "
"NumPy 1.10 or greater).")
parser.add_argument("--show-build-log", action="store_true",
help="Show build output rather than using a log file")
parser.add_argument("--bench", action="store_true",
help="Run benchmark suite instead of test suite")
parser.add_argument("--bench-compare", action="append", metavar="BEFORE",
help=("Compare benchmark results of current HEAD to"
" BEFORE. Use an additional "
"--bench-compare=COMMIT to override HEAD with"
" COMMIT. Note that you need to commit your "
"changes first!"
))
parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER,
help="Arguments to pass to Nose, Python or shell")
parser.add_argument("--pep8", action="store_true", default=False,
help="Perform pep8 check with pycodestyle.")
args = parser.parse_args(argv)
if args.pep8:
# os.system("flake8 scipy --ignore=F403,F841,F401,F811,F405,E121,E122,"
# "E123,E125,E126,E127,E128,E226,E231,E251,E265,E266,E302,"
# "E402,E501,E712,E721,E731,E741,W291,W293,W391,W503,W504"
# "--exclude=scipy/_lib/six.py")
os.system("pycodestyle scipy benchmarks/benchmarks")
sys.exit(0)
if args.bench_compare:
args.bench = True
args.no_build = True # ASV does the building
if args.lcov_html:
# generate C code coverage output
lcov_generate()
sys.exit(0)
if args.pythonpath:
for p in reversed(args.pythonpath.split(os.pathsep)):
sys.path.insert(0, p)
if args.gcov:
gcov_reset_counters()
if args.debug and args.bench:
print("*** Benchmarks should not be run against debug version; "
"remove -g flag ***")
if not args.no_build:
site_dir = build_project(args)
sys.path.insert(0, site_dir)
os.environ['PYTHONPATH'] = site_dir
extra_argv = args.args[:]
if extra_argv and extra_argv[0] == '--':
extra_argv = extra_argv[1:]
if args.python:
if extra_argv:
# Don't use subprocess, since we don't want to include the
# current path in PYTHONPATH.
sys.argv = extra_argv
with open(extra_argv[0], 'r') as f:
script = f.read()
sys.modules['__main__'] = new_module('__main__')
ns = dict(__name__='__main__',
__file__=extra_argv[0])
exec_(script, ns)
sys.exit(0)
else:
import code
code.interact()
sys.exit(0)
if args.ipython:
import IPython
IPython.embed(user_ns={})
sys.exit(0)
if args.shell:
shell = os.environ.get('SHELL', 'sh')
print("Spawning a Unix shell...")
os.execv(shell, [shell] + extra_argv)
sys.exit(1)
if args.coverage:
dst_dir = os.path.join(ROOT_DIR, 'build', 'coverage')
fn = os.path.join(dst_dir, 'coverage_html.js')
if os.path.isdir(dst_dir) and os.path.isfile(fn):
shutil.rmtree(dst_dir)
extra_argv += ['--cov-report=html:' + dst_dir]
if args.refguide_check:
cmd = [os.path.join(ROOT_DIR, 'tools', 'refguide_check.py'),
'--doctests']
if args.submodule:
cmd += [args.submodule]
os.execv(sys.executable, [sys.executable] + cmd)
sys.exit(0)
if args.bench:
# Run ASV
items = extra_argv
if args.tests:
items += args.tests
if args.submodule:
items += [args.submodule]
bench_args = []
for a in items:
bench_args.extend(['--bench', a])
if not args.bench_compare:
cmd = [os.path.join(ROOT_DIR, 'benchmarks', 'run.py'),
'run', '-n', '-e', '--python=same'] + bench_args
os.execv(sys.executable, [sys.executable] + cmd)
sys.exit(1)
else:
if len(args.bench_compare) == 1:
commit_a = args.bench_compare[0]
commit_b = 'HEAD'
elif len(args.bench_compare) == 2:
commit_a, commit_b = args.bench_compare
else:
p.error("Too many commits to compare benchmarks for")
# Check for uncommitted files
if commit_b == 'HEAD':
r1 = subprocess.call(['git', 'diff-index', '--quiet',
'--cached', 'HEAD'])
r2 = subprocess.call(['git', 'diff-files', '--quiet'])
if r1 != 0 or r2 != 0:
print("*"*80)
print("WARNING: you have uncommitted changes --- "
"these will NOT be benchmarked!")
print("*"*80)
# Fix commit ids (HEAD is local to current repo)
p = subprocess.Popen(['git', 'rev-parse', commit_b],
stdout=subprocess.PIPE)
out, err = p.communicate()
commit_b = out.strip()
p = subprocess.Popen(['git', 'rev-parse', commit_a],
stdout=subprocess.PIPE)
out, err = p.communicate()
commit_a = out.strip()
cmd = [os.path.join(ROOT_DIR, 'benchmarks', 'run.py'),
'continuous', '-e', '-f', '1.05',
commit_a, commit_b] + bench_args
os.execv(sys.executable, [sys.executable] + cmd)
sys.exit(1)
if args.build_only:
sys.exit(0)
else:
__import__(PROJECT_MODULE)
test = sys.modules[PROJECT_MODULE].test
if args.submodule:
tests = [PROJECT_MODULE + "." + args.submodule]
elif args.tests:
tests = args.tests
else:
tests = None
# Run the tests
if not args.no_build:
test_dir = site_dir
else:
test_dir = os.path.join(ROOT_DIR, 'build', 'test')
if not os.path.isdir(test_dir):
os.makedirs(test_dir)
shutil.copyfile(os.path.join(ROOT_DIR, '.coveragerc'),
os.path.join(test_dir, '.coveragerc'))
cwd = os.getcwd()
try:
os.chdir(test_dir)
result = test(args.mode,
verbose=args.verbose,
extra_argv=extra_argv,
doctests=args.doctests,
coverage=args.coverage,
tests=tests,
parallel=args.parallel)
finally:
os.chdir(cwd)
if isinstance(result, bool):
sys.exit(0 if result else 1)
elif result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
def build_project(args):
"""
Build a dev version of the project.
Returns
-------
site_dir
site-packages directory where it was installed
"""
root_ok = [os.path.exists(os.path.join(ROOT_DIR, fn))
for fn in PROJECT_ROOT_FILES]
if not all(root_ok):
print("To build the project, run runtests.py in "
"git checkout or unpacked source")
sys.exit(1)
dst_dir = os.path.join(ROOT_DIR, 'build', 'testenv')
env = dict(os.environ)
cmd = [sys.executable, 'setup.py']
# Always use ccache, if installed
env['PATH'] = os.pathsep.join(EXTRA_PATH +
env.get('PATH', '').split(os.pathsep))
if args.debug or args.gcov:
# assume everyone uses gcc/gfortran
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
if args.gcov:
import distutils.sysconfig
cvars = distutils.sysconfig.get_config_vars()
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
env['CC'] = cvars['CC'] + ' --coverage'
env['CXX'] = cvars['CXX'] + ' --coverage'
env['F77'] = 'gfortran --coverage '
env['F90'] = 'gfortran --coverage '
env['LDSHARED'] = cvars['LDSHARED'] + ' --coverage'
env['LDFLAGS'] = " ".join(cvars['LDSHARED'].split()[1:]) +\
' --coverage'
cmd += ['build']
if args.parallel > 1:
cmd += ['-j', str(args.parallel)]
# Install; avoid producing eggs so scipy can be imported from dst_dir.
cmd += ['install', '--prefix=' + dst_dir,
'--single-version-externally-managed',
'--record=' + dst_dir + 'tmp_install_log.txt']
from distutils.sysconfig import get_python_lib
site_dir = get_python_lib(prefix=dst_dir, plat_specific=True)
# easy_install won't install to a path that Python by default cannot see
# and isn't on the PYTHONPATH. Plus, it has to exist.
if not os.path.exists(site_dir):
os.makedirs(site_dir)
env['PYTHONPATH'] = site_dir
log_filename = os.path.join(ROOT_DIR, 'build.log')
start_time = datetime.datetime.now()
if args.show_build_log:
ret = subprocess.call(cmd, env=env, cwd=ROOT_DIR)
else:
log_filename = os.path.join(ROOT_DIR, 'build.log')
print("Building, see build.log...")
with open(log_filename, 'w') as log:
p = subprocess.Popen(cmd, env=env, stdout=log, stderr=log,
cwd=ROOT_DIR)
try:
# Wait for it to finish, and print something to indicate the
# process is alive, but only if the log file has grown (to
# allow continuous integration environments kill a hanging
# process accurately if it produces no output)
last_blip = time.time()
last_log_size = os.stat(log_filename).st_size
while p.poll() is None:
time.sleep(0.5)
if time.time() - last_blip > 60:
log_size = os.stat(log_filename).st_size
if log_size > last_log_size:
elapsed = datetime.datetime.now() - start_time
print(" ... build in progress ({0} "
"elapsed)".format(elapsed))
last_blip = time.time()
last_log_size = log_size
ret = p.wait()
except: # noqa: E722
p.terminate()
raise
elapsed = datetime.datetime.now() - start_time
if ret == 0:
print("Build OK ({0} elapsed)".format(elapsed))
else:
if not args.show_build_log:
with open(log_filename, 'r') as f:
print(f.read())
print("Build failed! ({0} elapsed)".format(elapsed))
sys.exit(1)
return site_dir
#
# GCOV support
#
def gcov_reset_counters():
print("Removing previous GCOV .gcda files...")
build_dir = os.path.join(ROOT_DIR, 'build')
for dirpath, dirnames, filenames in os.walk(build_dir):
for fn in filenames:
if fn.endswith('.gcda') or fn.endswith('.da'):
pth = os.path.join(dirpath, fn)
os.unlink(pth)
#
# LCOV support
#
LCOV_OUTPUT_FILE = os.path.join(ROOT_DIR, 'build', 'lcov.out')
LCOV_HTML_DIR = os.path.join(ROOT_DIR, 'build', 'lcov')
def lcov_generate():
try:
os.unlink(LCOV_OUTPUT_FILE)
except OSError:
pass
try:
shutil.rmtree(LCOV_HTML_DIR)
except OSError:
pass
print("Capturing lcov info...")
subprocess.call(['lcov', '-q', '-c',
'-d', os.path.join(ROOT_DIR, 'build'),
'-b', ROOT_DIR,
'--output-file', LCOV_OUTPUT_FILE])
print("Generating lcov HTML output...")
ret = subprocess.call(['genhtml', '-q', LCOV_OUTPUT_FILE,
'--output-directory', LCOV_HTML_DIR,
'--legend', '--highlight'])
if ret != 0:
print("genhtml failed!")
else:
print("HTML output generated under build/lcov/")
#
# Python 3 support
#
if sys.version_info[0] >= 3:
import builtins
exec_ = getattr(builtins, "exec")
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
if __name__ == "__main__":
main(argv=sys.argv[1:])
|
bsd-3-clause
| -2,131,450,785,985,568,300
| 34.205339
| 87
| 0.533508
| false
| 3.883352
| true
| false
| false
|
YourCyborg/Sun-RPI
|
docs/sphinx/src2rest/src2rest.py
|
1
|
2118
|
#! /usr/bin/python
#
# Auto-generate reST documentation for Sphinx from Evennia source
# code.
#
# Uses etinenned's sphinx autopackage script. Install it to folder
# "autogen" in this same directory:
#
# hg clone https://bitbucket.org/etienned/sphinx-autopackage-script autogen
#
# Create a directory tree "code/" containing one directory for every
# package in the PACKAGE dictionary below. Make sure EVENNIA_DIR
# points to an Evennia root dir. Then just run this script. A new
# folder sphinx/source/code will be created with the reST sources.
#
# Note - this is not working very well at the moment, not all sources
# seems to be properly detected and you get lots of errors when
# compiling. To nevertheless make a link to the code from the doc
# front page, edit docs/sphinx/sources/index.rst to reference
# code/modules.
#
import os, subprocess, shutil
EVENNIA_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
SPHINX_DIR = os.path.join(os.path.join(EVENNIA_DIR, "docs"), "sphinx")
SPHINX_SRC_DIR = os.path.join(SPHINX_DIR, "source")
SPHINX_CODE_DIR = os.path.join(SPHINX_SRC_DIR, "code")
CONVERT_DIR = os.path.join(SPHINX_DIR, 'src2rest')
AUTOGEN_EXE = os.path.join(CONVERT_DIR, os.path.join("autogen", "generate_modules.py"))
def src2rest():
"""
Run import
"""
try:
shutil.rmtree(SPHINX_CODE_DIR)
print "Emptied old %s." % SPHINX_CODE_DIR
except OSError:
pass
os.mkdir(SPHINX_CODE_DIR)
inpath = EVENNIA_DIR
outpath = SPHINX_CODE_DIR
excludes = [r".*/migrations/.*", r"evennia\.py$", r"manage\.py$",
r"runner\.py$", r"server.py$", r"portal.py$"]
subprocess.call(["python", AUTOGEN_EXE,
"-n", "Evennia",
"-d", outpath,
"-s", "rst",
"-f",
inpath] + excludes)
if __name__ == '__main__':
try:
src2rest()
except Exception, e:
print e
print "Make sure to read the header of this file so that it's properly set up."
|
bsd-3-clause
| 6,890,642,337,181,281,000
| 32.09375
| 107
| 0.635505
| false
| 3.228659
| false
| false
| false
|
BartGo/bottle-stack
|
nonpip-dl.py
|
1
|
1774
|
""" Download external components (non-Python)
"""
import requests
import zipfile
""" Download additional (non-Python) files needed by the project
"""
import os
import shutil
import sys
sys.path.append("./lib")
# http://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py
def download_file(url):
"""Download a file"""
local_filename = url.split('/')[-1]
# NOTE the stream=True parameter
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return local_filename
# http://stackoverflow.com/questions/9431918/extracting-zip-file-contents-to-specific-directory-in-python-2-7
def unzip_file(file_in, file_out):
"""Unzip a file"""
with open(file_in, 'rb') as fh:
z = zipfile.ZipFile(fh)
for name in z.namelist():
outpath = file_out
z.extract(name, outpath)
print "Collecting assets (jquery, skeleton-css)"
if True:
shutil.rmtree("app/static/assets/jquery", True)
shutil.os.mkdir("app/static/assets/jquery")
shutil.os.mkdir("app/static/assets/jquery/js")
download_file("http://code.jquery.com/jquery-1.11.3.min.js")
shutil.move("jquery-1.11.3.min.js", "app/static/assets/jquery/js/jquery-1.11.3.min.js")
if True:
shutil.rmtree("app/static/assets/bootstrap", True)
download_file("https://github.com/twbs/bootstrap/releases/download/v3.3.4/bootstrap-3.3.4-dist.zip")
unzip_file("bootstrap-3.3.4-dist.zip", ".")
os.remove("bootstrap-3.3.4-dist.zip")
shutil.move("bootstrap-3.3.4-dist", "app/static/assets/bootstrap")
print "Completed"
|
mit
| 1,648,368,742,869,832,000
| 31.851852
| 109
| 0.669109
| false
| 3.184919
| false
| false
| false
|
wondie/batch_gps_importer
|
batch_gps_importer.py
|
1
|
3604
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Batch GPS Importer
A QGIS plugin
Initializer of the plugin.
-------------------
begin : 2017-03-18
copyright : (C) 2017 by Wondimagegn Tesfaye Beshah
email : wondim81@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 3 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt5.QtCore import QSettings, QFileInfo, QTranslator, qVersion, \
QCoreApplication
from PyQt5.QtWidgets import QAction
from PyQt5.QtGui import QIcon
from .ui.gps_importer_starter import GpsImporter
from . import PLUGIN_DIR
class BatchGpsImporter(object):
"""
BatchGpsImport initializes the whole plugin and adds the plugin on toolbar
and Vector menu of GGIS.
"""
def __init__(self, iface):
"""
Initializes iface and importer object.
:param iface:
:type iface:
"""
self.iface = iface
self.importer = None
# Setup locale
locale_path = ''
locale = QSettings().value("locale/userLocale")[0:2]
if QFileInfo(PLUGIN_DIR).exists():
# Replace forward slash with backslash
# PLUGIN_DIR = PLUGIN_DIR.replace("\\", "/")
locale_path = PLUGIN_DIR + "/i18n/batch_gps_importer_%s.qm" % (locale,)
if QFileInfo(locale_path).exists():
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
def initGui(self):
"""
Initializes the plugin GUI.
"""
self.action = QAction(
QIcon('{}/images/batch.png'.format(PLUGIN_DIR)),
'Batch GPS Importer', self.iface.mainWindow()
)
self.action.setObjectName('gps_importer_action')
self.action.setWhatsThis('Configuration for Batch GPS Importer')
self.action.setStatusTip('Batch import GPX files')
self.action.triggered.connect(self.run)
# add toolbar button and menu item
self.iface.addToolBarIcon(self.action)
self.iface.addPluginToVectorMenu('&Batch GPS Importer', self.action)
def unload(self):
"""
Removes the plugin properly.
"""
# remove the plugin menu item and icon
self.iface.removePluginMenu('&Batch GPS Importer', self.action)
self.iface.removeToolBarIcon(self.action)
# disconnect form signal of the canvas
self.action.triggered.disconnect(self.run)
def run(self):
"""
Starts the plugin GUI.
:return:
:rtype:
"""
if self.importer is None:
self.importer = GpsImporter(self.iface)
self.importer.show()
else:
self.importer.show()
self.importer.activateWindow()
|
gpl-3.0
| 602,411,071,137,016,400
| 36.936842
| 83
| 0.508879
| false
| 4.650323
| false
| false
| false
|
ggood/adsbTheremin
|
aircraft_map.py
|
1
|
6296
|
# aircraft_map: maintains a list of aircraft "seen" by an ADSB
# receiver.
import math
import time
DEFAULT_PURGE_TIME = 120 # Forget planes not heard from in this many seconds
DEFAULT_PURGE_INTERVAL = 1 # How often to purge stale aircraft
EARTH_RADIUS = 6371000 # Earth's radius in meters
class Aircraft(object):
"""Represents a single aircraft"""
def __init__(self, id):
self._id = id
self._altitude = 0
self._latitude = 0.0
self._longitude = 0.0
self._update = 0.0
@property
def id(self):
return self._id
@property
def altitude(self):
return self._altitude
@property
def latitude(self):
return self._latitude
@property
def longitude(self):
return self._longitude
def __str__(self):
return "%s: alt %d lat %f lon %f" % (
self.id, self.altitude, self.latitude, self.longitude)
def __repr__(self):
return self.__str__()
def update(self, altitude, latitude, longitude):
"""Update an aircraft's altitude, latitude, and longitude"""
self._altitude = altitude
self._latitude = latitude
self._longitude = longitude
self._update = time.time()
def distance_to(self, lat, lon):
"""
Compute the distance from the aircraft to the point given by
lat and lon. This does not consider the aircraft's altitude. In
other words, this computes the distance to the projection
of the aircraft on the ground.
"""
d_lat = math.radians(lat - self._latitude)
d_lon = math.radians(lon - self._longitude)
lat1_rad = math.radians(self._latitude)
lat2_rad = math.radians(lat)
a = (math.sin(d_lat/2) * math.sin(d_lat/2) +
math.sin(d_lon/2) * math.sin(d_lon/2) *
math.cos(lat1_rad) * math.cos(lat2_rad))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a));
d = EARTH_RADIUS * c;
return d
def bearing_from(self, lat, lon):
"""
Compute the bearing, in degrees, of the aircraft as seen from
the position given by lat and lon.
"""
lat1_rad = math.radians(self._latitude)
long1_rad = math.radians(self._longitude)
lat2_rad = math.radians(lat)
long2_rad = math.radians(lon)
d_lon = long2_rad - long1_rad
d_phi = math.log(
math.tan(
lat2_rad/2.0+math.pi/4.0)/math.tan(lat1_rad/2.0+math.pi/4.0))
if abs(d_lon) > math.pi:
if d_lon > 0.0:
d_lon = -(2.0 * math.pi - dLong)
else:
d_lon = (2.0 * math.pi + dLong)
bearing = (math.degrees(math.atan2(d_lon, d_phi)) + 360.0) % 360.0;
return bearing
class AircraftMap(object):
"""
This class keeps track of aircraft heard by an ADSB receiver.
You can feed all lines returned by the ADSB receiver into this
code, and it will consume all airborne position messages and update
the list of aircraft.
Aircraft not heard from in purge_age seconds will be discarded.
"""
def __init__(self, latitude, longitude, purge_age=DEFAULT_PURGE_TIME):
"""
Arguments:
latitude: the latitude, in fractional degrees, of the observer.
longitude: the longitude, in fractional degrees, of the observer.
purge_age: the time, in seconds, after which aircraft will be
discarded if no position updates have been seen.
"""
self._aircraft = {} # ADSB ID -> aircraft
self._latitude = latitude
self._longitude = longitude
self._purge_age = purge_age
self._last_purge = time.time()
def update(self, line):
self._purge()
parts = line.split(",")
if parts and (parts[0] == "MSG"):
if parts[1] == "3":
# Airborne position message
try:
aircraft_id = parts[4]
try:
altitude = int(parts[11])
lat = float(parts[14])
lon = float(parts[15])
aircraft = self._aircraft.get(aircraft_id)
if aircraft is None:
aircraft = Aircraft(aircraft_id)
self._aircraft[aircraft_id] = aircraft
aircraft.update(altitude, lat, lon)
except ValueError:
# Some position messages omit the lat/lon. Ignore.
return
except:
print("big oops: %s" % line)
raise
def _purge(self):
if time.time() - self._last_purge < DEFAULT_PURGE_INTERVAL:
return
n = 0
for id, aircraft in self._aircraft.items():
if aircraft._update < time.time() - self._purge_age:
del self._aircraft[id]
n += 1
#print("purged %d aircraft, %d remaining" % (n, len(self._aircraft)))
self._last_purge = time.time()
def print_summary(self):
print("%d aircraft" % len(self._aircraft))
def closest(self, count, min_altitude=0, max_altitude=100000):
"""
Return the closest [count] aircraft. If min_altitude or
max_altitude is provided, limit the retured results to
aircraft in that range. May return fewer than <count>
aircraft.
"""
# I know there's a one-line list comprehension that will do
# this, but I suck.
ret = []
dist_map = {} # distance -> aircraft
for id, aircraft in self._aircraft.items():
dist = aircraft.distance_to(self._latitude, self._longitude)
dist_map[dist] = aircraft
closest = sorted(dist_map.keys())
for d in closest:
aircraft = dist_map[d]
if (aircraft.altitude <= max_altitude and
aircraft.altitude >= min_altitude):
ret.append(aircraft)
if len(ret) >= count:
return ret
return ret
def count(self):
"""
Return the count of aircraft in the map.
"""
return len(self._aircraft)
|
unlicense
| 2,571,144,089,346,902,500
| 33.032432
| 77
| 0.546537
| false
| 3.797346
| false
| false
| false
|
cmdunkers/DeeperMind
|
PythonEnv/lib/python2.7/site-packages/theano/sandbox/gpuarray/opt.py
|
1
|
35148
|
import copy
import numpy
import logging
from six.moves import xrange
import theano
from theano import tensor, scalar, gof
from theano.compile import optdb
from theano.compile.ops import shape_i
from theano.gof import (local_optimizer, EquilibriumDB,
SequenceDB, Optimizer, toolbox)
from theano.gof.optdb import LocalGroupDB
from theano.scalar.basic import Scalar, Pow, Cast
from theano.scan_module import scan_utils, scan_op, scan_opt
from theano.tensor.nnet.conv import ConvOp
from theano.tests.breakpoint import PdbBreakpoint
from .type import (GpuArrayType, GpuArrayConstant, get_context,
ContextNotDefined)
from .basic_ops import (as_gpuarray_variable, infer_context_name,
host_from_gpu, GpuToGpu,
HostFromGpu, GpuFromHost,
GpuSplit, GpuContiguous,
GpuAlloc, GpuAllocEmpty, GpuReshape,
GpuEye, gpu_join, GpuJoin)
from .blas import (gpu_dot22, GpuGemv, GpuGemm, GpuGer,
gpugemm_no_inplace)
from .conv import GpuConv
from .nnet import (GpuCrossentropySoftmaxArgmax1HotWithBias,
GpuCrossentropySoftmax1HotWithBiasDx,
GpuSoftmaxWithBias, GpuSoftmax)
from .elemwise import (GpuElemwise, GpuDimShuffle, GpuCAReduceCuda,
GpuCAReduceCPY)
from .subtensor import (GpuIncSubtensor, GpuSubtensor,
GpuAdvancedSubtensor1,
GpuAdvancedIncSubtensor1,
GpuAdvancedIncSubtensor1_dev20)
from .opt_util import alpha_merge, output_merge
_logger = logging.getLogger("theano.sandbox.gpuarray.opt")
gpu_optimizer = EquilibriumDB()
gpu_cut_copies = EquilibriumDB()
gpu_seqopt = SequenceDB()
# Don't register this right now
conv_groupopt = LocalGroupDB()
conv_groupopt.__name__ = "gpua_conv_opts"
gpu_seqopt.register('gpuarray_local_optimiziations', gpu_optimizer, 1,
'fast_compile', 'fast_run', 'inplace', 'gpuarray')
gpu_seqopt.register('gpuarray_cut_transfers', gpu_cut_copies, 2,
'fast_compile', 'fast_run', 'gpuarray')
# do not add 'fast_run' to these two as this would always enable gpuarray mode
optdb.register('gpuarray_opt', gpu_seqopt,
optdb.__position__.get('add_destroy_handler', 49.5) - 1,
'gpuarray')
def register_opt(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
gpu_optimizer.register(name, local_opt, 'fast_run', 'gpuarray', *tags)
return local_opt
return f
register_opt('fast_compile')(theano.tensor.opt.local_track_shape_i)
gpu_optimizer.register('local_remove_all_assert',
theano.tensor.opt.local_remove_all_assert,
'unsafe')
def safe_to_gpu(x, ctx_name):
if isinstance(x.type, tensor.TensorType):
return GpuFromHost(ctx_name)(x)
else:
return x
def safe_to_cpu(x):
if isinstance(x.type, GpuArrayType):
return host_from_gpu(x)
else:
return x
def op_lifter(OP, cuda_only=False):
"""
OP(..., host_from_gpu(), ...) -> host_from_gpu(GpuOP(...))
gpu_from_host(OP(inp0, ...)) -> GpuOP(inp0, ...)
"""
def f(maker):
def local_opt(node):
if type(node.op) in OP:
# Either one of our inputs is on the gpu or
# all of our clients are on the gpu
replace = False
# TODO: Maybe set context_name with infer_context_name()?
context_name = None
# We replace if any input is a host_from_gpu
for i in node.inputs:
if i.owner and i.owner.op == host_from_gpu:
context_name = i.owner.inputs[0].type.context_name
replace = True
break
if not replace:
# We replace if *all* clients are on the GPU
clients = [c for o in node.outputs for c in o.clients]
replace = len(clients) != 0
for c, idx in clients:
if (c == 'output' or
not isinstance(c.op, GpuFromHost)):
replace = False
# TODO: check that the clients want the same context?
if replace:
# All clients are GpuFromHost and we have at least one
context_name = clients[0][0].op.context_name
# Check if we should replace
if (not replace or
(cuda_only and
get_context(context_name).kind != 'cuda')):
return False
new_op = maker(node, context_name)
# This is needed as sometimes new_op inherits from OP.
if new_op and new_op != node.op:
if isinstance(new_op, theano.Op):
# tag the inputs with the context in case
# the context was derived from the outputs
def tag(i, ctx):
i.tag.context_name = ctx
return i
inputs = [tag(i, context_name) for i in node.inputs]
return [safe_to_cpu(o) for o in
new_op(*inputs, return_list=True)]
elif isinstance(new_op, (tuple, list)):
return [safe_to_cpu(o) for o in new_op]
else: # suppose it is a variable on the GPU
return [host_from_gpu(new_op)]
return False
local_opt.__name__ = maker.__name__
return local_optimizer(OP)(local_opt)
return f
class InputToGpuOptimizer(Optimizer):
"""
Transfer the input to the gpu to start the rolling wave.
"""
def add_requirements(self, fgraph):
fgraph.attach_feature(toolbox.ReplaceValidate())
def apply(self, fgraph):
for input in fgraph.inputs:
if isinstance(input.type, GpuArrayType):
continue
# If all clients are outputs or transfers don't do anything.
if (all(cl[0] == 'output' or isinstance(cl[0].op, GpuFromHost)
for cl in input.clients)):
continue
ctx_name = getattr(input.tag, 'context_name', None)
try:
new_input = host_from_gpu(GpuFromHost(ctx_name)(input))
fgraph.replace_validate(input, new_input,
"InputToGpuOptimizer")
except TypeError:
# This could fail if the inputs are not TensorTypes
pass
except ContextNotDefined:
if hasattr(input.tag, 'context_name'):
raise
# If there is no context tag and no default context
# then it stays on the CPU
pass
gpu_seqopt.register('InputToGpuArrayOptimizer', InputToGpuOptimizer(),
0, 'fast_run', 'fast_compile', 'merge')
@local_optimizer([GpuFromHost, GpuToGpu, host_from_gpu])
def local_cut_gpu_transfers(node):
# gpu[ab] -> host -> gpub
if (isinstance(node.op, GpuFromHost) and
node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, HostFromGpu)):
other = node.inputs[0].owner.inputs[0]
if node.op.context_name == other.type.context_name:
return [other]
else:
return [GpuToGpu(node.op.context_name)(other)]
# ? -> gpua -> host
elif (isinstance(node.op, HostFromGpu) and
node.inputs[0].owner):
n2 = node.inputs[0].owner
# host ->
if isinstance(n2.op, GpuFromHost):
return [n2.inputs[0]]
# gpub ->
if isinstance(n2.op, GpuToGpu):
return [host_from_gpu(n2.inputs[0])]
# ? -> gpua -> gpub
elif isinstance(node.op, GpuToGpu):
# Transfer within same context
if node.inputs[0].type.context_name == node.op.context_name:
return [node.inputs[0]]
if node.inputs[0].owner:
n2 = node.inputs[0].owner
# host ->
if isinstance(n2.op, GpuFromHost):
return [GpuFromHost(node.op.context_name)(n2.inputs[0])]
# gpuc ->
if isinstance(n2.op, GpuToGpu):
if node.op.context_name == n2.inputs[0].type.context_name:
return [n2.inputs[0]]
else:
return [node.op(n2.inputs[0])]
gpu_cut_copies.register('cut_gpua_host_transfers', local_cut_gpu_transfers,
'fast_compile', 'fast_run', 'inplace', 'gpuarray')
gpu_cut_copies.register('cut_gpua_constant_transfers',
tensor.opt.constant_folding,
'fast_compile', 'fast_run', 'gpuarray')
optdb['canonicalize'].register('local_cut_gpua_host_gpua',
local_cut_gpu_transfers,
'fast_compile', 'fast_run', 'gpuarray')
@register_opt('fast_compile')
@local_optimizer([tensor.Alloc])
def local_gpuaalloc2(node):
"""
Join(axis, {Alloc or HostFromGPU}, ...) -> Join(axis, GpuAlloc, Alloc, ...)
Moves an alloc that is an input to join to the gpu.
"""
try:
get_context(None)
except ContextNotDefined:
# If there is no default context then we do not perform the move here.
return
if (isinstance(node.op, tensor.Alloc) and
all(c != 'output' and
c.op == tensor.join and
all(i.owner and
i.owner.op in [host_from_gpu, tensor.alloc]
for i in c.inputs[1:])
for c, idx in node.outputs[0].clients)):
return [host_from_gpu(GpuAlloc(None)(*node.inputs))]
@register_opt('fast_compile')
@op_lifter([tensor.Alloc])
def local_gpuaalloc(node, context_name):
return GpuAlloc(context_name)(*node.inputs)
@register_opt()
@local_optimizer([GpuAlloc])
def local_gpualloc_memset_0(node):
if isinstance(node.op, GpuAlloc) and not node.op.memset_0:
inp = node.inputs[0]
if (isinstance(inp, GpuArrayConstant) and
inp.data.size == 1 and
(numpy.asarray(inp.data) == 0).all()):
new_op = GpuAlloc(node.op.context_name, memset_0=True)
return [new_op(*node.inputs)]
@register_opt()
@local_optimizer([GpuContiguous])
def local_gpu_contiguous_gpu_contiguous(node):
"""
gpu_contiguous(gpu_contiguous(x)) -> gpu_contiguous(x)
"""
if isinstance(node.op, GpuContiguous):
inp = node.inputs[0]
if inp.owner and isinstance(inp.owner.op, GpuContiguous):
return [inp]
@register_opt('fast_compile')
@op_lifter([tensor.Reshape])
def local_gpureshape(node, context_name):
op = node.op
name = op.name
if name:
name = 'Gpu' + name
res = GpuReshape(op.ndim, op.name)
return res
@register_opt('fast_compile')
@op_lifter([tensor.Rebroadcast])
def local_gpu_rebroadcast(node, context_name):
if isinstance(node.inputs[0].owner.op, HostFromGpu):
return node.op(node.inputs[0].owner.inputs[0])
@register_opt('fast_compile')
@op_lifter([tensor.Flatten])
def local_gpuflatten(node, context_name):
op = node.op
shp = []
if op.outdim != 1:
shp = [node.inputs[0].shape[i] for i in range(op.outdim - 1)]
shp += [-1]
res = GpuReshape(op.outdim, None)
o = res(node.inputs[0], theano.tensor.as_tensor_variable(shp))
return o
@register_opt('fast_compile')
@op_lifter([tensor.Elemwise])
def local_gpu_elemwise(node, context_name):
op = node.op
scal_op = op.scalar_op
name = op.name
if name:
name = 'Gpu' + name
if len(node.outputs) > 1:
return
res = GpuElemwise(scal_op, name=name,
inplace_pattern=copy.copy(op.inplace_pattern),
nfunc_spec=op.nfunc_spec)
# If the elemwise operation is a pow, casts might be required on the
# inputs and or outputs because only the (float, float)->float and
# (double, double)->double cases are implemented at the moment.
if isinstance(op.scalar_op, Pow):
# Only transfer the computation on the gpu if the output dtype is
# floating point. Else, give up on the transfer to the gpu.
out_dtype = node.outputs[0].dtype
if out_dtype not in ['float16', 'float32', 'float64']:
return
# Transfer the inputs on the GPU and cast them to the right dtype.
new_inputs = []
for inp in node.inputs:
if inp.dtype != out_dtype:
gpu_cast_op = GpuElemwise(Cast(Scalar(out_dtype)))
new_inputs.append(gpu_cast_op(as_gpuarray_variable(inp)))
else:
new_inputs.append(as_gpuarray_variable(inp))
# Perform the exponent on the gpu and transfer the output back to the
# cpu.
gpu_output = res(*new_inputs)
cpu_output = host_from_gpu(gpu_output)
return [cpu_output]
else:
return res
def max_inputs_to_GpuElemwise(node):
ptr_size = 8
int_size = 4
# we take the limit from CUDA for now
argument_limit = 232
ndim = node.inputs[0].type.ndim
# number of elements and shape
size_param_mandatory = (int_size * (ndim + 1)) + \
(ptr_size + int_size * ndim) * len(node.outputs)
nb_bytes_avail = argument_limit - size_param_mandatory
nb_bytes_per_input = ptr_size + ndim * int_size
max_nb_inputs = nb_bytes_avail // nb_bytes_per_input
return max_nb_inputs
gpu_local_elemwise_fusion = tensor.opt.local_elemwise_fusion_op(
GpuElemwise,
max_inputs_to_GpuElemwise)
optdb.register('gpua_elemwise_fusion',
tensor.opt.FusionOptimizer(gpu_local_elemwise_fusion), 71.00,
'fast_run', 'fusion', 'local_elemwise_fusion', 'gpuarray')
inplace_gpu_elemwise_opt = tensor.opt.inplace_elemwise_optimizer_op(
GpuElemwise)
optdb.register('gpua_inplace_opt', inplace_gpu_elemwise_opt, 75,
'inplace_elemwise_optimizer', 'fast_run', 'inplace', 'gpuarray')
@register_opt('fast_compile')
@op_lifter([tensor.DimShuffle])
def local_gpua_dimshuffle(node, context_name):
return GpuDimShuffle(node.op.input_broadcastable,
node.op.new_order)
@register_opt('fast_compile')
@op_lifter([tensor.SpecifyShape])
def local_gpua_specifyShape(node, context_name):
if isinstance(node.inputs[0].type, GpuArrayType):
return
inp = [GpuFromHost(context_name)(node.inputs[0])] + node.inputs[1:]
return tensor.specify_shape(*inp)
@register_opt('fast_compile')
@op_lifter([theano.compile.ops.Shape])
def local_gpua_shape(node, context_name):
# op_lifter will call this opt too frequently as the output is
# always on the CPU.
if isinstance(node.inputs[0].type, GpuArrayType):
return
return [GpuFromHost(context_name)(node.inputs[0]).shape]
def gpu_print_wrapper(op, cnda):
op.old_op.global_fn(op.old_op, numpy.asarray(cnda))
@register_opt('fast_compile')
@op_lifter([tensor.printing.Print])
def local_gpu_print_op(node, context_name):
x, = node.inputs
gpu_x, = x.owner.inputs
new_op = node.op.__class__(global_fn=gpu_print_wrapper)
new_op.old_op = node.op
return new_op(gpu_x)
@register_opt('fast_compile')
@local_optimizer([PdbBreakpoint])
def local_gpu_pdbbreakpoint_op(node):
if isinstance(node.op, PdbBreakpoint):
old_inputs = node.inputs
old_outputs = node.outputs
new_inputs = node.inputs[:1]
input_transfered = []
# Go through the monitored variables, only transfering on GPU those
# for which the input comes from the GPU or the output will be
# transfered on the GPU.
nb_monitored_vars = len(node.outputs)
for i in range(nb_monitored_vars):
inp = old_inputs[i + 1]
out = old_outputs[i]
input_is_from_gpu = (inp.owner and
isinstance(inp.owner.op, HostFromGpu))
output_goes_to_gpu = False
for c in out.clients:
if c == 'output':
continue
if isinstance(c[0].op, GpuFromHost):
output_goes_to_gpu = True
context_name = c[0].op.context_name
break
if input_is_from_gpu:
# The op should be applied on the GPU version of the input
new_inputs.append(inp.owner.inputs[0])
input_transfered.append(True)
elif output_goes_to_gpu:
# The input should be transfered to the gpu
new_inputs.append(GpuFromHost(context_name)(inp))
input_transfered.append(True)
else:
# No transfer is required.
new_inputs.append(inp)
input_transfered.append(False)
# Only continue the optimization if at least one input has been
# transfered to the gpu
if not any(input_transfered):
return False
# Apply the op on the new inputs
new_op_outputs = node.op(*new_inputs, return_list=True)
# Propagate the transfer to the gpu through the outputs that require
# it
new_outputs = []
for i in range(len(new_op_outputs)):
if input_transfered[i]:
new_outputs.append(host_from_gpu(new_op_outputs[i]))
else:
new_outputs.append(new_op_outputs[i])
return new_outputs
return False
@register_opt('fast_compile')
@op_lifter([tensor.Join])
def local_gpua_join(node, context_name):
return gpu_join
@register_opt('fast_compile')
@local_optimizer([GpuJoin])
def local_gpuajoin_1(node):
# join of a single element
if (isinstance(node.op, GpuJoin) and
len(node.inputs) == 2):
return [node.inputs[1]]
@register_opt('fast_compile')
@op_lifter([tensor.Split])
def local_gpua_split(node, context_name):
return GpuSplit(node.op.len_splits)
@register_opt('fast_compile')
@op_lifter([tensor.Subtensor])
def local_gpua_subtensor(node, context_name):
x = node.inputs[0]
if (x.owner and isinstance(x.owner.op, HostFromGpu)):
gpu_x = x.owner.inputs[0]
if (gpu_x.owner and
isinstance(gpu_x.owner.op, GpuFromHost) and
# And it is a shared var or an input of the graph.
not gpu_x.owner.inputs[0].owner):
if len(x.clients) == 1:
if any([n == 'output' or any([isinstance(v.type, GpuArrayType)
for v in n.inputs + n.outputs])
for n, _ in node.outputs[0].clients]):
return
else:
return [host_from_gpu(gpu_x.owner.op(node.outputs[0]))]
return GpuSubtensor(node.op.idx_list)
@register_opt('fast_compile')
@op_lifter([tensor.IncSubtensor])
def local_gpua_incsubtensor(node, context_name):
return GpuIncSubtensor(node.op.idx_list, node.op.inplace,
node.op.set_instead_of_inc,
node.op.destroyhandler_tolerate_aliased)
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedSubtensor1])
def local_gpua_advanced_subtensor(node, context_name):
return GpuAdvancedSubtensor1()
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedIncSubtensor1])
def local_gpua_advanced_incsubtensor(node, context_name):
# This is disabled on non-cuda contexts
if get_context(context_name).kind != 'cuda':
return None
x, y, ilist = node.inputs
# Gpu Ops needs both inputs to have the same dtype
if (x.type.dtype != y.type.dtype):
dtype = scalar.upcast(x.type.dtype, y.type.dtype)
if x.type.dtype != dtype:
x = tensor.cast(x, dtype)
if y.type.dtype != dtype:
y = tensor.cast(y, dtype)
set_instead_of_inc = node.op.set_instead_of_inc
active_device_no = theano.sandbox.cuda.active_device_number()
device_properties = theano.sandbox.cuda.device_properties
compute_capability = device_properties(active_device_no)['major']
if (compute_capability < 2 or x.ndim != 2 or y.ndim != 2):
return [GpuAdvancedIncSubtensor1(
set_instead_of_inc=set_instead_of_inc)(x, y, ilist)]
else:
return [GpuAdvancedIncSubtensor1_dev20(
set_instead_of_inc=set_instead_of_inc)(x, y, ilist)]
@register_opt('fast_compile')
@op_lifter([tensor.CAReduce, tensor.Sum, tensor.elemwise.Prod])
def local_gpua_careduce(node, context_name):
if isinstance(node.op.scalar_op, (scalar.Add, scalar.Mul,
scalar.Maximum, scalar.Minimum)):
ctx = get_context(context_name)
if ctx.kind == 'opencl':
op = GpuCAReduceCPY
if node.op.scalar_op not in [scalar.add, scalar.mul]:
# We don't support yet all reduction with cpy code.
return
elif ctx.kind == 'cuda':
op = GpuCAReduceCuda
else:
return False
x, = node.inputs
greduce = op(
node.op.scalar_op, axis=node.op.axis,
dtype=getattr(node.op, 'dtype', None),
acc_dtype=getattr(node.op, 'acc_dtype', None))
x.tag.context_name = context_name
gvar = greduce(x)
# We need to have the make node called, otherwise the mask can
# be None
if (op is GpuCAReduceCPY or
gvar.owner.op.supports_c_code([GpuFromHost(context_name)(x)])):
return greduce
else:
# Try to make a simpler pattern based on reshaping
# The principle is that if two adjacent dimensions have
# the same value in the reduce_mask, then we can reshape
# to make them a single dimension, do the reduction, and
# then reshape to get them back.
if node.op.axis is None:
reduce_mask = [1] * x.type.ndim
else:
reduce_mask = [0] * x.type.ndim
for a in node.op.axis:
assert reduce_mask[a] == 0
reduce_mask[a] = 1
shape_of = node.fgraph.shape_feature.shape_of
x_shape = shape_of[x]
new_in_shp = [x_shape[0]]
new_mask = [reduce_mask[0]]
for i in xrange(1, x.type.ndim):
if reduce_mask[i] == reduce_mask[i - 1]:
new_in_shp[-1] *= x_shape[i]
else:
new_mask.append(reduce_mask[i])
new_in_shp.append(x_shape[i])
new_axis = []
for idx, m in enumerate(new_mask):
if m == 1:
new_axis.append(idx)
greduce = op(
node.op.scalar_op,
axis=new_axis, reduce_mask=new_mask,
dtype=getattr(node.op, 'dtype', None),
acc_dtype=getattr(node.op, 'acc_dtype', None))
reshaped_x = x.reshape(tensor.stack(new_in_shp))
gpu_reshaped_x = GpuFromHost(context_name)(reshaped_x)
gvar = greduce(gpu_reshaped_x)
# We need to have the make node called, otherwise the mask can
# be None
reshaped_gpu_inputs = [gpu_reshaped_x]
if greduce.supports_c_code(reshaped_gpu_inputs):
reduce_reshaped_x = host_from_gpu(
greduce(gpu_reshaped_x))
if reduce_reshaped_x.ndim != node.outputs[0].ndim:
unreshaped_reduce = reduce_reshaped_x.reshape(
tensor.stack(shape_of[node.outputs[0]]))
else:
unreshaped_reduce = reduce_reshaped_x
return [unreshaped_reduce]
@register_opt('fast_compile')
@op_lifter([tensor.blas.Gemv, tensor.blas_c.CGemv])
def local_gpua_gemv(node, context_name):
return GpuGemv(inplace=node.op.inplace)
@register_opt('fast_compile')
@op_lifter([tensor.blas.Gemm])
def local_gpua_gemm(node, context_name):
return GpuGemm(inplace=node.op.inplace)
@register_opt('fast_compile')
@op_lifter([tensor.basic.Dot])
def local_gpua_hgemm(node, context_name):
from theano.sandbox.cuda import nvcc_compiler
if nvcc_compiler.nvcc_version < '7.5':
_logger.warning("Not performing dot of float16 on the GPU since "
"cuda 7.5 is not available. Updating could speed up "
"your code.")
return
A = node.inputs[0]
B = node.inputs[1]
if (A.ndim == 2 and B.ndim == 2 and
A.dtype == 'float16' and B.dtype == 'float16'):
fgraph = node.inputs[0].fgraph
C = GpuAllocEmpty(dtype='float16', context_name=context_name)(
shape_i(A, 0, fgraph),
shape_i(B, 1, fgraph))
return gpugemm_no_inplace(C, 1.0, A, B, 0.0)
@register_opt()
@alpha_merge(GpuGemm, alpha_in=1, beta_in=4)
def local_gpuagemm_alpha_merge(node, *inputs):
return [gpugemm_no_inplace(*inputs)]
@register_opt()
@output_merge(GpuGemm, alpha_in=1, beta_in=4, out_in=0)
def local_gpuagemm_output_merge(node, *inputs):
return [gpugemm_no_inplace(*inputs)]
@register_opt('fast_compile')
@op_lifter([tensor.blas.Ger, tensor.blas_c.CGer, tensor.blas_scipy.ScipyGer])
def local_gpua_ger(node, context_name):
return GpuGer(inplace=node.op.destructive)
@register_opt('fast_compile')
@op_lifter([tensor.blas.Dot22])
def local_gpua_dot22(node, context_name):
return gpu_dot22
@register_opt('fast_compile')
@op_lifter([tensor.basic.Eye])
def local_gpua_eye(node, context_name):
return GpuEye(dtype=node.op.dtype, context_name=context_name)
@register_opt('fast_compile')
@op_lifter([tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias], cuda_only=True)
def local_gpua_crossentropysoftmaxargmax1hotwithbias(node, context_name):
return GpuCrossentropySoftmaxArgmax1HotWithBias()
@register_opt('fast_compile')
@op_lifter([tensor.nnet.CrossentropySoftmax1HotWithBiasDx], cuda_only=True)
def local_gpua_crossentropysoftmax1hotwithbiasdx(node, context_name):
return GpuCrossentropySoftmax1HotWithBiasDx()
@register_opt('fast_compile')
@op_lifter([tensor.nnet.Softmax], cuda_only=True)
def local_gpua_softmax(node, context_name):
return GpuSoftmax()
@register_opt('fast_compile')
@op_lifter([tensor.nnet.SoftmaxWithBias], cuda_only=True)
def local_gpua_softmaxwithbias(node, context_name):
return GpuSoftmaxWithBias()
@register_opt('fast_compile')
@op_lifter([theano.tensor.opt.Assert])
def local_assert(node, context_name):
if (node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, HostFromGpu)):
return [host_from_gpu(node.op(node.inputs[0].owner.inputs[0],
*node.inputs[1:]))]
@register_opt('fast_compile')
@op_lifter([ConvOp])
def local_gpu_conv(node, context_name):
def GpuConvOp_from_ConvOp(op):
logical_img_hw = None
if op.kshp_logical is not None and op.kshp_logical != op.kshp:
return None
ret = GpuConv(border_mode=op.out_mode,
subsample=(op.dx, op.dy),
logical_img_hw=logical_img_hw,
logical_kern_hw=op.kshp_logical,
logical_kern_align_top=op.kshp_logical_top_aligned,
kshp=op.kshp,
version=op.version,
direction_hint=op.direction_hint,
verbose=op.verbose,
imshp=op.imshp,
nkern=op.nkern,
bsize=op.bsize,
fft_opt=op.fft_opt)
if op.imshp_logical is not None:
logical_img_hw = op.imshp_logical[1:3]
if logical_img_hw != op.imshp[1:3]:
rstride = int(numpy.ceil(op.imshp_logical[1] /
float(op.imshp[1])))
cstride = int(numpy.ceil(op.imshp_logical[2] /
float(op.imshp[2])))
def make_graph(img, kern):
buf = tensor.alloc(numpy.asarray(0, dtype=img.dtype),
img.shape[0], *op.imshp_logical)
img = tensor.set_subtensor(buf[:, :, ::rstride, ::cstride],
img)
img = GpuFromHost(context_name)(img)
return ret(img, kern)
return make_graph
return ret
def values_eq_approx(a, b):
"""
This fct is needed to don't have DebugMode raise useless
error due to ronding error.
This happen as We reduce on the two last dimensions, so this
can raise the absolute error if the number of element we
reduce on is significant.
"""
assert a.ndim == 4
atol = None
if a.shape[-1] * a.shape[-2] > 100:
# For float32 the default atol is 1e-5
atol = 3e-5
return GpuArrayType.values_eq_approx(a, b, atol=atol)
img, kern = node.inputs
gpu_conv = GpuConvOp_from_ConvOp(node.op)
if gpu_conv is None:
return
out = gpu_conv(GpuFromHost(context_name)(img),
GpuFromHost(context_name)(kern))
assert isinstance(out.type, GpuArrayType)
# Make sure to keep the broadcastable pattern of the original
# convolution even if we might gain or lose some due to different
# information at the node level.
out = tensor.patternbroadcast(out, node.outputs[0].broadcastable)
out.values_eq_approx = values_eq_approx
return [out]
# Register this here so that it goes after 'local_gpu_conv'
register_opt()(conv_groupopt)
@register_opt("low_memory")
@local_optimizer([GpuCAReduceCuda])
def local_gpu_elemwise_careduce(node):
"""
Merge some GpuCAReduceCuda and GPUElemwise.
"""
if (isinstance(node.op, GpuCAReduceCuda) and
node.op.pre_scalar_op is None and
node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, GpuElemwise) and
# The Op support all scalar with 1 inputs. We don't
# automatically add more case, as some like trigonometic
# operation with some reduction pattern will probably result
# to slow down.
isinstance(node.inputs[0].owner.op.scalar_op, scalar.basic.Sqr)):
op = node.op
inp = node.inputs[0].owner.inputs[0]
return [GpuCAReduceCuda(scalar_op=op.scalar_op,
reduce_mask=op.reduce_mask,
pre_scalar_op=scalar.basic.sqr)(inp)]
def tensor_to_gpu(x, context_name):
if isinstance(x.type, tensor.TensorType):
y = GpuArrayType(broadcastable=x.type.broadcastable,
context_name=context_name,
dtype=x.type.dtype)()
if x.name:
y.name = x.name + '[Gpua]'
return y
else:
return x
def gpu_safe_new(x, tag=''):
"""
Internal function that constructs a new variable from x with the same
type, but with a different name (old name + tag). This function is used
by gradient, or the R-op to construct new variables for the inputs of
the inner graph such that there is no interference between the original
graph and the newly constructed graph.
"""
if hasattr(x, 'name') and x.name is not None:
nw_name = x.name + tag
else:
nw_name = None
if isinstance(x, theano.Constant):
return x.clone()
nw_x = x.type()
nw_x.name = nw_name
return nw_x
def gpu_reconstruct_graph(inputs, outputs, tag=None):
"""
Different interface to clone, that allows you to pass inputs.
Compared to clone, this method always replaces the inputs with
new variables of the same type, and returns those (in the same
order as the original inputs).
"""
if tag is None:
tag = ''
nw_inputs = [gpu_safe_new(x, tag) for x in inputs]
givens = {}
for nw_x, x in zip(nw_inputs, inputs):
givens[x] = nw_x
nw_outputs = scan_utils.clone(outputs, replace=givens)
return (nw_inputs, nw_outputs)
@register_opt('scan', 'fast_compile')
@op_lifter([scan_op.Scan])
def local_scan_to_gpua(node, context_name):
info = copy.deepcopy(node.op.info)
if info.get('gpua', False):
return
info['gpua'] = True
nw_ins = [node.inputs[0]]
e = (1 +
node.op.n_seqs +
node.op.n_mit_mot +
node.op.n_mit_sot +
node.op.n_sit_sot +
node.op.n_shared_outs)
nw_ins += [safe_to_gpu(x, context_name) for x in node.inputs[1:e]]
b = e
e = e + node.op.n_nit_sot
nw_ins += node.inputs[b:e]
nw_ins += [safe_to_gpu(x, context_name) for x in node.inputs[e:]]
scan_ins = [tensor_to_gpu(x, context_name) for x in node.op.inputs]
# The inner output corresponding to the looping condition should not be
# moved to the gpu
if node.op.info['as_while']:
scan_outs = [safe_to_gpu(x, context_name) for x in node.op.outputs[:-1]]
scan_outs += [node.op.outputs[-1]]
else:
scan_outs = [safe_to_gpu(x, context_name) for x in node.op.outputs]
scan_outs = scan_utils.clone(
scan_outs,
replace=list(zip(node.op.inputs,
(safe_to_cpu(x) for x in scan_ins))))
# We need to construct the hash here, because scan
# __init__ does not know about the gpu and can not
# handle graphs with inputs being on the gpu
tmp_in, tmp_out = gpu_reconstruct_graph(scan_ins, scan_outs)
local_fgraph = gof.FunctionGraph(tmp_in, tmp_out, clone=True)
_cmodule_key = gof.CLinker().cmodule_key_(local_fgraph, [])
info['gpu_hash'] = hash(_cmodule_key)
def typebuild(dtype, broadcastable, context_name=context_name):
return GpuArrayType(dtype=dtype, broadcastable=broadcastable,
context_name=context_name)
nw_op = scan_op.Scan(scan_ins, scan_outs, info,
typeConstructor=typebuild).make_node(*nw_ins)
return nw_op.outputs
def _scan_type_infer(node):
context_name = infer_context_name(*node.inputs)
def typebuild(dtype, broadcastable, context_name=context_name):
return GpuArrayType(dtype=dtype, broadcastable=broadcastable,
context_name=context_name)
return typebuild
optdb.register('gpua_scanOp_make_inplace',
scan_opt.ScanInplaceOptimizer(typeInfer=_scan_type_infer,
gpua_flag=True),
75,
'gpuarray',
'fast_run',
'inplace',
'scan')
|
bsd-3-clause
| -188,701,961,609,684,220
| 34.253761
| 80
| 0.584898
| false
| 3.531043
| false
| false
| false
|
astroswego/data-plots
|
src/data_plots/stats.py
|
1
|
3782
|
import numpy
import matplotlib
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
from matplotlib import rcParams
from scipy.optimize import curve_fit
from data_plots.utils import labeler, titler
rcParams['text.usetex'] = True
def scatter_hist(x, y, *args,
bins=10,
linestyle='r--', scatterstyle='k+',
histtype='stepfilled', facecolor='#FFFFFF', hatch='/',
show_mean=True, show_std=True,
**kwargs):
# no labels
nullfmt = NullFormatter()
# definitions for axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left+width+0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
# start with a rectangular Figure
fig = plt.figure(1, figsize=(8, 8))
axScatter = fig.add_axes(rect_scatter)
axHistx = fig.add_axes(rect_histx)
axHisty = fig.add_axes(rect_histy)
# no labels on some axes
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
# the scatter plot:
axScatter.plot(x, y, scatterstyle)
# determine limits
xmin, ymin = numpy.min(x), numpy.min(y)
xmax, ymax = numpy.max(x), numpy.max(y)
x_mean, y_mean = x.mean(), y.mean()
x_std, y_std = x.std(), y.std()
# xlims = ((numpy.array([-xmin, xmax]) // binwidth) + 1) * binwidth
# ylims = ((numpy.array([-ymin, ymax]) // binwidth) + 1) * binwidth
xbins = numpy.linspace(xmin, xmax, bins)
ybins = numpy.linspace(ymin, ymax, bins)
# xbins = numpy.arange(-xlims[0], xlims[1]+binwidth, binwidth)
# ybins = numpy.arange(-ylims[0], ylims[1]+binwidth, binwidth)
n, xbins, xpatches = axHistx.hist(x, bins=xbins, normed=1,
histtype=histtype, facecolor=facecolor,
hatch=hatch)
n, ybins, ypatches = axHisty.hist(y, bins=ybins, normed=1,
histtype=histtype, facecolor=facecolor,
hatch=hatch,
orientation='horizontal')
mean_formatter = r'$\mu = {0:.5f}$'.format
std_formatter = r'$\sigma = {0:.5f}$'.format
xhandles, yhandles = [], []
xlabels, ylabels = [], []
if show_mean:
p = plt.Rectangle((0, 0), 1, 1, fc="r")
xlabels.append(mean_formatter(x_mean))
ylabels.append(mean_formatter(y_mean))
xhandles.append(p)
yhandles.append(p)
if show_std:
p = plt.Rectangle((0, 0), 1, 1, fc="b")
xlabels.append(std_formatter(x_std))
ylabels.append(std_formatter(y_std))
xhandles.append(p)
yhandles.append(p)
if show_mean or show_std:
axHistx.legend(xhandles, xlabels,
fontsize='small', loc='upper right')
axHisty.legend(xhandles, xlabels,
fontsize='small', loc='upper right')
xpdf = mlab.normpdf(xbins, x_mean, x_std)
ypdf = mlab.normpdf(ybins, y_mean, y_std)
axHistx.plot(xbins, xpdf, linestyle)
axHisty.plot(ypdf, ybins, linestyle)
axHistx.set_xlim(axScatter.get_xlim())
axHisty.set_ylim(axScatter.get_ylim())
axHistx.locator_params(tight=False, nbins=3)
axHisty.locator_params(tight=False, nbins=3)
axHistx = titler(axHistx, **kwargs)
axScatter = labeler(axScatter, **kwargs)
return fig
def scatter_hist_from_file(input, *args, usecols=range(2), **kwargs):
x, y = numpy.loadtxt(input, usecols=usecols, unpack=True)
return scatter_hist(x, y, *args, **kwargs)
def _gauss(x, *p):
A, mu, sigma = p
return A*numpy.exp(-(x-mu)**2/(2.*sigma**2))
|
mit
| 7,716,508,680,442,090,000
| 31.886957
| 77
| 0.59651
| false
| 3.107642
| false
| false
| false
|
HLP-R/hlpr_kinesthetic_teaching
|
hlpr_kinesthetic_interaction/src/hlpr_kinesthetic_interaction/jaco_7dof_arm.py
|
1
|
2145
|
#!/usr/bin/env python
import rospy
from hlpr_manipulation_utils.manipulator import Gripper
from kinova_msgs.srv import Start, Stop
from hlpr_kinesthetic_interaction.kinesthetic_interaction import KinestheticInteraction
"""
jaco_7dof_arm.py
Simple wrapper that abstracts out the arm class so that other arms
can use kinesthetic_interaction
"""
class Arm():
ENABLE_7DOF_GRAVITY_COMP_SERVICE = "/j2s7s300_driver/in/start_gravity_comp"
DISABLE_7DOF_GRAVITY_COMP_SERVICE = "/j2s7s300_driver/in/stop_gravity_comp"
ENABLE_7DOF_FORCE_SERVICE = "/j2s7s300_driver/in/start_force_control"
DISABLE_7DOF_FORCE_SERVICE = "/j2s7s300_driver/in/stop_force_control"
def __init__(self):
# Setup gravity compensation
rospy.logwarn("Waiting for gravity compensation service")
rospy.wait_for_service(Arm.ENABLE_7DOF_GRAVITY_COMP_SERVICE)
rospy.wait_for_service(Arm.DISABLE_7DOF_GRAVITY_COMP_SERVICE)
rospy.wait_for_service(Arm.ENABLE_7DOF_FORCE_SERVICE)
rospy.wait_for_service(Arm.DISABLE_7DOF_FORCE_SERVICE)
# Store the services
self.enable_grav_comp = rospy.ServiceProxy(Arm.ENABLE_7DOF_GRAVITY_COMP_SERVICE, Start)
self.disable_grav_comp = rospy.ServiceProxy(Arm.DISABLE_7DOF_GRAVITY_COMP_SERVICE, Stop)
self.enable_force = rospy.ServiceProxy(Arm.ENABLE_7DOF_FORCE_SERVICE, Start)
self.disable_force = rospy.ServiceProxy(Arm.DISABLE_7DOF_FORCE_SERVICE, Stop)
rospy.logwarn("Gravity compenstation service loaded")
# Initialize the gripper
self.gripper = Gripper()
def gravity_comp(self, toggle, ft_mode):
if ft_mode == KinestheticInteraction.TORQUE_MODE:
if toggle:
return self.enable_grav_comp()
else:
return self.disable_grav_comp()
elif ft_mode == KinestheticInteraction.FORCE_MODE:
if toggle:
return self.enable_force()
else:
return self.disable_force()
else:
rospy.logerr("Passed in unsupported ft mode: %s. Nothing will happen" % ft_mode)
return False
|
bsd-3-clause
| 3,589,335,116,544,430,000
| 38.722222
| 96
| 0.682051
| false
| 3.259878
| false
| false
| false
|
ggf84/tupan
|
tupan/particles/body.py
|
1
|
18588
|
# -*- coding: utf-8 -*-
#
"""
TODO.
"""
from __future__ import print_function
import sys
import copy
import numpy as np
from ..lib import extensions
from ..lib.utils.timing import decallmethods, timings
__all__ = ["Bodies"]
class NbodyMethods(object):
"""This class holds common methods for particles in n-body systems.
"""
include_pn_corrections = False
attrs = [ # name, sctype, doc
("id", 'uint', "index"),
("mass", 'real', "mass"),
("eps2", 'real', "squared softening"),
("rx", 'real', "x-position"),
("ry", 'real', "y-position"),
("rz", 'real', "z-position"),
("vx", 'real', "x-velocity"),
("vy", 'real', "y-velocity"),
("vz", 'real', "z-velocity"),
("time", 'real', "current time"),
("nstep", 'uint', "step number"),
("tstep", 'real', "time step"),
]
special_attrs = [ # name, sctype, doc
]
@property # TODO: @classproperty ???
def dtype(self):
from ..lib.utils.ctype import ctypedict
return [(name, ctypedict[sctype])
for name, sctype, _ in self.attrs]
@property # TODO: @classproperty ???
def special_dtype(self):
from ..lib.utils.ctype import ctypedict
return [(name, ctypedict[sctype])
for name, sctype, _ in self.special_attrs]
@property
def pos(self): # XXX: deprecate?
return np.concatenate((self.rx, self.ry, self.rz,)).reshape(3, -1).T
@property
def vel(self): # XXX: deprecate?
return np.concatenate((self.vx, self.vy, self.vz,)).reshape(3, -1).T
@property
def px(self):
return self.mass * self.vx
@property
def py(self):
return self.mass * self.vy
@property
def pz(self):
return self.mass * self.vz
### total mass and center-of-mass methods
@property
def total_mass(self):
"""Total mass of the system.
"""
return float(self.mass.sum())
@property
def com_r(self):
"""Center-of-Mass position of the system.
.. note::
Post-Newtonian corrections, if enabled, are included.
"""
mrx = self.mass * self.rx
mry = self.mass * self.ry
mrz = self.mass * self.rz
if self.include_pn_corrections:
if not "pn_mrx" in self.__dict__:
self.register_auxiliary_attribute("pn_mrx", "real")
if not "pn_mry" in self.__dict__:
self.register_auxiliary_attribute("pn_mry", "real")
if not "pn_mrz" in self.__dict__:
self.register_auxiliary_attribute("pn_mrz", "real")
mrx += self.pn_mrx
mry += self.pn_mry
mrz += self.pn_mrz
mr = np.array([mrx, mry, mrz]).T
return mr.sum(0) / self.total_mass
@property
def com_v(self):
"""Center-of-Mass velocity of the system.
.. note::
Post-Newtonian corrections, if enabled, are included.
"""
mvx, mvy, mvz = self.px, self.py, self.pz
if self.include_pn_corrections:
if not "pn_mvx" in self.__dict__:
self.register_auxiliary_attribute("pn_mvx", "real")
if not "pn_mvy" in self.__dict__:
self.register_auxiliary_attribute("pn_mvy", "real")
if not "pn_mvz" in self.__dict__:
self.register_auxiliary_attribute("pn_mvz", "real")
mvx += self.pn_mvx
mvy += self.pn_mvy
mvz += self.pn_mvz
mv = np.array([mvx, mvy, mvz]).T
return mv.sum(0) / self.total_mass
@property
def com_linear_momentum(self):
"""Center-of-Mass linear momentum of the system.
"""
mtot = self.total_mass
com_v = self.com_v
return mtot * com_v
@property
def com_angular_momentum(self):
"""Center-of-Mass angular momentum of the system.
"""
mtot = self.total_mass
com_r = self.com_r
com_v = self.com_v
return mtot * np.cross(com_r, com_v)
@property
def com_kinetic_energy(self):
"""Center-of-Mass kinetic energy of the system.
"""
mtot = self.total_mass
com_v = self.com_v
return 0.5 * mtot * (com_v**2).sum()
def com_move_to(self, com_r, com_v):
"""Moves the center-of-mass to the given coordinates.
"""
self.rx += com_r[0]
self.ry += com_r[1]
self.rz += com_r[2]
self.vx += com_v[0]
self.vy += com_v[1]
self.vz += com_v[2]
def com_to_origin(self):
"""Moves the center-of-mass to the origin of coordinates.
"""
self.com_move_to(-self.com_r, -self.com_v)
### linear momentum
@property
def lm(self):
"""Individual linear momentum.
.. note::
Post-Newtonian corrections, if enabled, are included.
"""
lmx, lmy, lmz = self.px, self.py, self.pz
if self.include_pn_corrections:
if not "pn_mvx" in self.__dict__:
self.register_auxiliary_attribute("pn_mvx", "real")
if not "pn_mvy" in self.__dict__:
self.register_auxiliary_attribute("pn_mvy", "real")
if not "pn_mvz" in self.__dict__:
self.register_auxiliary_attribute("pn_mvz", "real")
lmx += self.pn_mvx
lmy += self.pn_mvy
lmz += self.pn_mvz
return np.array([lmx, lmy, lmz]).T
@property
def linear_momentum(self):
"""Total linear momentum of the system.
.. note::
This quantity possibly includes the linear momentum of the
center-of-mass w.r.t. the origin of coordinates.
.. note::
Post-Newtonian corrections, if enabled, are included.
"""
return self.lm.sum(0)
### angular momentum
@property
def am(self):
"""Individual angular momentum.
.. note::
Post-Newtonian corrections, if enabled, are included.
"""
px, py, pz = self.px, self.py, self.pz
amx = (self.ry * pz) - (self.rz * py)
amy = (self.rz * px) - (self.rx * pz)
amz = (self.rx * py) - (self.ry * px)
if self.include_pn_corrections:
if not "pn_amx" in self.__dict__:
self.register_auxiliary_attribute("pn_amx", "real")
if not "pn_amy" in self.__dict__:
self.register_auxiliary_attribute("pn_amy", "real")
if not "pn_amz" in self.__dict__:
self.register_auxiliary_attribute("pn_amz", "real")
amx += self.pn_amx
amy += self.pn_amy
amz += self.pn_amz
return np.array([amx, amy, amz]).T
@property
def angular_momentum(self):
"""Total angular momentum of the system.
.. note::
This quantity possibly includes the angular momentum of the
center-of-mass w.r.t. the origin of coordinates.
.. note::
Post-Newtonian corrections, if enabled, are included.
"""
return self.am.sum(0)
### kinetic energy
@property
def ke(self):
"""Individual kinetic energy.
.. note::
Post-Newtonian corrections, if enabled, are included.
"""
ke = 0.5 * self.mass * (self.vx**2 + self.vy**2 + self.vz**2)
if self.include_pn_corrections:
if not "pn_ke" in self.__dict__:
self.register_auxiliary_attribute("pn_ke", "real")
ke += self.pn_ke
return ke
@property
def kinetic_energy(self):
"""Total kinetic energy of the system.
.. note::
This quantity possibly includes the kinetic energy of the
center-of-mass w.r.t. the origin of coordinates.
.. note::
Post-Newtonian corrections, if enabled, are included.
"""
return float(self.ke.sum())
### potential energy
@property
def pe(self):
"""Individual potential energy.
"""
self.set_phi(self)
return self.mass * self.phi
@property
def potential_energy(self):
"""Total potential energy.
"""
return 0.5 * float(self.pe.sum())
### virial energy
@property
def ve(self):
"""Individual virial energy.
"""
return 2 * self.ke + self.pe
@property
def virial_energy(self):
"""Total virial energy.
"""
return 2 * self.kinetic_energy + self.potential_energy
### gravity
def set_tstep(self, ps, eta):
"""Set individual time-steps due to other particles.
"""
extensions.tstep.calc(self, ps, eta)
def set_phi(self, ps):
"""Set individual gravitational potential due to other particles.
"""
extensions.phi.calc(self, ps)
def set_acc(self, ps):
"""Set individual gravitational acceleration due to other particles.
"""
extensions.acc.calc(self, ps)
def set_pnacc(self, ps):
"""Set individual post-Newtonian gravitational acceleration due to
other particles.
"""
extensions.pnacc.calc(self, ps)
def set_acc_jerk(self, ps):
"""Set individual gravitational acceleration and jerk due to other
particles.
"""
extensions.acc_jerk.calc(self, ps)
def set_snap_crackle(self, ps):
"""Set individual gravitational snap and crackle due to other
particles.
"""
extensions.snap_crackle.calc(self, ps)
### miscellaneous methods
def min_tstep(self):
"""Minimum absolute value of tstep.
"""
return abs(self.tstep).min()
def max_tstep(self):
"""Maximum absolute value of tstep.
"""
return abs(self.tstep).max()
### lenght scales
@property
def virial_radius(self):
"""Virial radius of the system.
"""
mtot = self.total_mass
pe = self.potential_energy
return (mtot**2) / (-2*pe)
@property
def radial_size(self):
"""Radial size of the system (a.k.a. radius of gyration).
.. note::
This quantity is calculated w.r.t. the center-of-mass of the
system.
"""
com_r = self.com_r
rx = self.rx - com_r[0]
ry = self.ry - com_r[1]
rz = self.rz - com_r[2]
I = (self.mass * (rx**2 + ry**2 + rz**2)).sum()
s = (I / self.total_mass)**0.5
return s
### rescaling methods
def dynrescale_total_mass(self, total_mass):
"""Rescales the total mass of the system while maintaining its
dynamics unchanged.
"""
m_ratio = total_mass / self.total_mass
self.mass *= m_ratio
self.rx *= m_ratio
self.ry *= m_ratio
self.rz *= m_ratio
def dynrescale_radial_size(self, size):
"""Rescales the radial size of the system while maintaining its
dynamics unchanged.
"""
r_scale = size / self.radial_size
v_scale = 1 / r_scale**0.5
self.rx *= r_scale
self.ry *= r_scale
self.rz *= r_scale
self.vx *= v_scale
self.vy *= v_scale
self.vz *= v_scale
def dynrescale_virial_radius(self, rvir):
"""Rescales the virial radius of the system while maintaining its
dynamics unchanged.
"""
r_scale = rvir / self.virial_radius
v_scale = 1 / r_scale**0.5
self.rx *= r_scale
self.ry *= r_scale
self.rz *= r_scale
self.vx *= v_scale
self.vy *= v_scale
self.vz *= v_scale
def scale_to_virial(self):
"""Rescale system to virial equilibrium (2K + U = 0).
"""
ke = self.kinetic_energy
pe = self.potential_energy
v_scale = ((-0.5 * pe) / ke)**0.5
self.vx *= v_scale
self.vy *= v_scale
self.vz *= v_scale
def to_nbody_units(self):
"""Rescales system to nbody units while maintaining its dynamics
unchanged.
"""
self.dynrescale_total_mass(1.0)
self.dynrescale_virial_radius(1.0)
class PNbodyMethods(NbodyMethods):
"""This class holds some post-Newtonian methods.
"""
### PN stuff
### TODO: move these methods to a more appropriate place...
def pn_kick_ke(self, tau):
"""Kicks kinetic energy due to post-Newtonian terms.
"""
if not "pn_ke" in self.__dict__:
self.register_auxiliary_attribute("pn_ke", "real")
pnfx = self.mass * self.pnax
pnfy = self.mass * self.pnay
pnfz = self.mass * self.pnaz
self.pn_ke -= (self.vx * pnfx + self.vy * pnfy + self.vz * pnfz) * tau
def pn_drift_com_r(self, tau):
"""Drifts center of mass position due to post-Newtonian terms.
"""
if not "pn_mrx" in self.__dict__:
self.register_auxiliary_attribute("pn_mrx", "real")
if not "pn_mry" in self.__dict__:
self.register_auxiliary_attribute("pn_mry", "real")
if not "pn_mrz" in self.__dict__:
self.register_auxiliary_attribute("pn_mrz", "real")
self.pn_mrx += self.pn_mvx * tau
self.pn_mry += self.pn_mvy * tau
self.pn_mrz += self.pn_mvz * tau
def pn_kick_lmom(self, tau):
"""Kicks linear momentum due to post-Newtonian terms.
"""
if not "pn_mvx" in self.__dict__:
self.register_auxiliary_attribute("pn_mvx", "real")
if not "pn_mvy" in self.__dict__:
self.register_auxiliary_attribute("pn_mvy", "real")
if not "pn_mvz" in self.__dict__:
self.register_auxiliary_attribute("pn_mvz", "real")
pnfx = self.mass * self.pnax
pnfy = self.mass * self.pnay
pnfz = self.mass * self.pnaz
self.pn_mvx -= pnfx * tau
self.pn_mvy -= pnfy * tau
self.pn_mvz -= pnfz * tau
def pn_kick_amom(self, tau):
"""Kicks angular momentum due to post-Newtonian terms.
"""
if not "pn_amx" in self.__dict__:
self.register_auxiliary_attribute("pn_amx", "real")
if not "pn_amy" in self.__dict__:
self.register_auxiliary_attribute("pn_amy", "real")
if not "pn_amz" in self.__dict__:
self.register_auxiliary_attribute("pn_amz", "real")
pnfx = self.mass * self.pnax
pnfy = self.mass * self.pnay
pnfz = self.mass * self.pnaz
self.pn_amx -= (self.ry * pnfz - self.rz * pnfy) * tau
self.pn_amy -= (self.rz * pnfx - self.rx * pnfz) * tau
self.pn_amz -= (self.rx * pnfy - self.ry * pnfx) * tau
AbstractNbodyMethods = NbodyMethods
if "--pn_order" in sys.argv:
AbstractNbodyMethods = PNbodyMethods
#@decallmethods(timings)
#@make_attrs
# class Body(AbstractNbodyMethods):
# """
# The most basic particle type.
# """
# attrs = AbstractNbodyMethods.attrs + AbstractNbodyMethods.special_attrs
# names = AbstractNbodyMethods.names + AbstractNbodyMethods.special_names
# dtype = [(_[0], _[1]) for _ in attrs]
# data0 = np.zeros(0, dtype)
#
# def __init__(self, n=0, data=None):
# """
# Initializer
# """
# if data is None:
# if n: data = np.zeros(n, self.dtype)
# else: data = self.data0
# self.data = data
# self.n = len(self)
#
# #
# # miscellaneous methods
# #
#
#
# def append(self, obj):
# if obj.n:
# self.data = np.concatenate((self.data, obj.data))
# self.n = len(self)
#
#
# def remove(self, id):
# slc = np.where(self.id == id)
# self.data = np.delete(self.data, slc, 0)
# self.n = len(self)
#
#
# def insert(self, id, obj):
# index = np.where(self.id == id)[0]
# v = obj.data
# self.data = np.insert(self.data, index*np.ones(len(v)), v, 0)
# self.n = len(self)
#
#
# def pop(self, id=None):
# if id is None:
# index = -1
# id = self.id[-1]
# else:
# index = np.where(self.id == id)[0]
# obj = self[index]
# self.remove(id)
# return obj
#
#
# def get_state(self):
# return self.data
#
#
# def set_state(self, array):
# self.data[...] = array
# self.n = len(self)
###############################################################################
@decallmethods(timings)
class Bodies(AbstractNbodyMethods):
"""
"""
def __init__(self, n=0, items=None):
if items is None:
for (name, dtype) in self.dtype[:1]:
self.__dict__[name] = np.arange(n, dtype=dtype)
for (name, dtype) in self.dtype[1:]+self.special_dtype:
self.__dict__[name] = np.zeros(n, dtype=dtype)
else:
self.__dict__.update(items)
def __repr__(self):
return repr(self.__dict__)
def __str__(self):
fmt = type(self).__name__+"(["
if self.n:
for (k, v) in self.__dict__.items():
fmt += "\n\t{0}: {1},".format(k, v)
fmt += "\n"
fmt += "])"
return fmt
def __contains__(self, id):
return id in self.id
def __len__(self):
return len(self.id)
@property
def n(self):
return len(self)
def copy(self):
return copy.deepcopy(self)
def append(self, obj):
if obj.n:
items = {k: np.concatenate((getattr(self, k), v))
for (k, v) in obj.__dict__.items()}
self.__dict__.update(items)
def __getitem__(self, slc):
if isinstance(slc, int):
slc = [slc]
items = {k: v[slc] for (k, v) in self.__dict__.items()}
return type(self)(items=items)
def __setitem__(self, slc, values):
for (k, v) in self.__dict__.items():
v[slc] = getattr(values, k)
def astype(self, cls):
newobj = cls()
tmp = cls(self.n)
tmp.set_state(self.get_state())
newobj.append(tmp)
return newobj
def get_state(self):
array = np.zeros(self.n, dtype=self.dtype)
for name in array.dtype.names:
array[name] = getattr(self, name)
return array
def set_state(self, array):
for name in array.dtype.names:
if name in self.__dict__:
self.__dict__[name][...] = array[name]
########## end of file ##########
|
mit
| 647,833,605,706,238,800
| 26.497041
| 79
| 0.528298
| false
| 3.43586
| false
| false
| false
|
ValorNaram/isl
|
inputchangers/001.py
|
1
|
15830
|
from __future__ import print_function
import os
globe = {}
generateIndex = False
lang = "en"
brackets = ["[&]", "{&}", "(&)", "<&>"]
class compiler():
def __init__(self):
self = self
def languageSupport(self, f):
if "langBuffer" in globe:
langBuffer = globe["langBuffer"]
else:
langBuffer = {}
if not f in langBuffer:
if os.path.exists(os.path.join(os.getcwd(), "lang", lang, f)):
sfile = open(os.path.join(os.getcwd(), "lang", lang, f), "r")
langBuffer[f] = sfile.read()
sfile.close()
else:
return False
globe["langBuffer"] = langBuffer
return langBuffer[f]
def searchBracketMatch(self, text, startPos=0):
global brackets
count = 0
toMatch = ""
for bracket in brackets:
first, second = bracket.split("&")
if text[startPos] == first:
toMatch = bracket
break
if toMatch == "":
return [startPos, -1]
first, second = bracket.split("&")
bStart = startPos
while True:
bEnd = text.find(second, bStart)
tmp = text[bStart:bEnd+1]
if tmp.find(first) > -1:
if text[bEnd-1] == "\\":
count -= 1
pass
count += 1
bStart = bEnd
else:
count -= 1
if count == 0:
break
return [startPos, bEnd]
def canApplyCode(self, src, curPosition):
bStart = -1
bEnd = curPosition
while True:
if src.find("<", bStart+1, bEnd) > curPosition or src.find("<", bStart+1, bEnd) == -1:
break
else:
bStart = src.find("<", bStart+1, bEnd)
bStart, bEnd = self.searchBracketMatch(src, bStart)
if curPosition > bStart and bEnd > curPosition:
return False #Position is in between HTML code, so no threatment as ISL code
else:
return True #Position isn't in between HTML code, so threatment as ISL code
def linkOptionTrigger(self, data):
text, url = data
if url.find(";") > -1:
target, url = url.split(";", 1)
target = target.strip()
url = url.strip()
if url.find("@") > -1:
url.replace("mailto:", "", 1)
url = "mailto:" + url
return "<a href='" + url + "' tabindex=%tabindex gridindex='%gridindex' onfocus='getIndex(this)' target='_" + target.replace("_", "", 1) + "'>" + text + "</a>"
if url.find("@") > -1:
url.replace("mailto:", "", 1)
url = "mailto:" + url
return "<a href='" + url + "' tabindex=%tabindex gridindex='%gridindex' onfocus='getIndex(this)'>" + text + "</a>"
def imageOptionTrigger(self, data):
symbol, alt, url = data
return "<img tabindex=%tabindex gridindex='%gridindex' onfocus='getIndex(this)' src='" + url + "' alt='" + alt + "' title='" + alt + "' />"
def colourOptionTrigger(self, data):
text, colour = data
if colour.count(";") > 0:
return "<span style='" + colour + "'>" + text + "</span>"
else:
return "<span style='color:" + colour + ";'>" + text + "</span>"
def abbrOptionTrigger(self, data):
abbr, text = data
if text.find(";") > -1:
ariaText, text = text.split(";", 1)
ariaText = ariaText.strip()
text = text.strip()
if ariaText.lower() == abbr.lower():
message = "Text explicit for assistive technology users specified."
result = self.languageSupport("abbr1")
if not result == False:
message = result
print(" \033[0;33m" + message + "\033[0;m")
return "<a aria-label='" + str(ariaText) + "' title='" + text + "'>" + abbr + "</a>"
else:
message = "Incorrect use of abbreviation cheat prevented"
result = self.languageSupport("abbr0")
if not result == False:
message = result
print(" \033[0;33m" + message + "\033[0;m")
return "<abbr title='" + text + "'>" + abbr + "</abbr>"
def videoORaudio(self, data):
tag, title, url = data
result = self.languageSupport("no" + tag + "support")
if not result:
result = "No " + tag + " support"
return "<" + tag + " controls tabindex=%tabindex gridindex='%gridindex' aria-label='" + title + "' src='" + url + "'>" + result + "</" + tag + ">"
def formOptionTrigger(self, data):
output = "<form enctype='text/plain' method='POST' action='" + data[2] + "'>"
fieldKeys = ["title", "type", "name", "placeholder"]
fields = data[1].split(";")
keys = {"postal code": {"title" : "Postal code" ,"type" : "text", "name" : "postalcode"},
"housenumber" : {"title" : "Housenumber", "type" : "text", "name" : "housenumber", "placeholder" : "e.g. 42"},
"street": {"title" : "Street" ,"type" : "text", "name" : "street", "placeholder" : "e.g. Wallstreet"},
"country": {"title" : "Country" ,"type" : "text", "name" : "country", "placeholder" : "e.g. USA"},
"city": {"title" : "City" ,"type" : "text", "name" : "city", "placeholder" : "e.g. New York"},
"firstname": {"title" : "First name" ,"type" : "text", "name" : "firstname"},
"lastname": {"title" : "Last name" ,"type" : "text", "name" : "lastname"},
"email": {"title" : "Mail Address" ,"type" : "email", "name" : "email", "placeholder" : "e.g. info@example.com"},
"mobile": {"title" : "Mobile" ,"type" : "text", "name" : "mobile"},
"telephone": {"title" : "Telephone" ,"type" : "text", "name" : "telephone"},
"password": {"title" : "Password" ,"type" : "password", "name" : "password"},
"search" : {"type" : "text", "name" : "search", "placeholder" : "Search"}}
for field in fields:
if field in keys:
attr = keys[field]
title = ("title" in attr) and attr["title"] or ""
_type = ("type" in attr) and attr["type"] or ""
name = ("name" in attr) and attr["name"] or ""
placeholder = ("placeholder" in attr) and attr["placeholder"] or ""
output += "<label>" + title + "<input type='" + _type + "' name='" + name + "' placeholder='" + placeholder + "' tabindex='%tabindex' gridindex='%gridindex' /></label><br/>"
output += "<input type='submit' /></form>"
return output
def searchFunctions(self, src):
#search brackets
triggers = {"(" : self.linkOptionTrigger, "!" : self.imageOptionTrigger, "{" : self.colourOptionTrigger, "[" : self.abbrOptionTrigger, "video" : self.videoORaudio, "audio" : self.videoORaudio, "form" : self.formOptionTrigger}
while src.find("[") > -1:
data = []
original = ""
new = ""
function = ""
start, end = self.searchBracketMatch(src, src.find("["))
if end > 0:
if start > 0:
bStart = start
bEnd = start
#going backwards for a whitespace, beginning of variable 'src', for a newline ('\n') character
while True:
#check, if index exists in variable 'src'
if bStart-1 > -1:
#ANSWER: Yes
#It is a whitespace?
if src[bStart] == " ":
#ANSWER: Yes, stop searching, take the results you have
break
#Check for the possibility to go one step backward
elif bStart-1 > -1:
#ANSWER: Yes. Check for the newline character
if src[bStart-1:bStart] == "\r":
#ANSWER: Yes, stop searching, take the results you have
break
bStart -= 1
else:
#ANSWER: Yes, stop searching, take the results you have
break
if src[bStart+1:bEnd] in triggers:
#A cheat was found (prefix)
function = triggers[src[bStart+1:bEnd]]
original += src[bStart+1:bEnd]
data.append(src[bStart+1:bEnd])
if src[end+1] in triggers and function == "":
#A cheat was found
function = triggers[src[end+1]]
data.append(src[start+1:end])
original += src[start:end+1]
start, end = self.searchBracketMatch(src, end+1)
if end > 0 and not function == "":
data.append(src[start+1:end])
original += src[start:end+1]
new = function(data)
src = src.replace(original, new)
src = src.replace(original, new)
return src
def buffering_intern(self, buffer, path, langFile, errortext):
global globe
if os.path.exists(path):
sfile = open(path, "r")
filebuffer = sfile.read()
sfile.close()
globe[buffer] = filebuffer
return filebuffer
else:
result = self.languageSupport("emojirefmissing")
if not result == False:
print(" \033[0;31m" + result + "\033[0;m")
else:
print(" \033[0;31mFile" + errortext + "\033[0;m")
return ""
def code(self, src, char):
global globe
unicodeBuffer = ""
bStart = 0
bEnd = 0
if "unicodeBuffer" in globe:
unicodeBuffer = globe["unicodeBuffer"]
if unicodeBuffer == "":
unicodeBuffer = self.buffering_intern("unicodeBuffer", os.path.join("unicode", "code"), "coderefmissing", "Code cheat not available due to missing file 'unicode/code'")
if unicodeBuffer == "":
return src
while src.find(char, 0, src.find(char) +len(char)) > -1:
bStart = src.find(char, 0, src.find(char) +len(char)) + len(char)
bEnd = src.find(char, bStart)
if bEnd == -1:
break
text = src[bStart:bEnd]
text_new = text
filebuffer = ""
for item in unicodeBuffer.split("\n"):
if item.find(" | ") > -1:
code, unicode = item.split(" | ", 1)
text_new = text_new.replace(code.strip(), unicode.strip())
src = src.replace(str(char) + text + str(char), "<code>" + text_new + "</code>")
return src
def headings(self, src):
global generateIndex
index = ""
headings = {"#" : True, "##" : True, "###" : True, "####" : True, "#####" : True, "######" : True}
if src.find("%index") > -1:
generateIndex = True
for entry in src.split("\r"):
if entry.find(" ") > -1:
heading, name = entry.split(" ", 1)
if heading in headings:
if generateIndex:
if index == "":
index = "[" + name + "](#" + name.lower().replace(" ", "_") + ")"
else:
index += "\r[" + name + "](#" + name.lower().replace(" ", "_") + ")"
replaceWith = "<h" + str(len(heading)) + " id='" + name.lower().replace(" ", "_") + "' tabindex=%tabindex gridindex='%gridindex' onfocus='getIndex(this)'>" + name + "</h" + str(len(heading)) + ">"
src = src.replace(entry, replaceWith, 1)
else:
src = src.replace(entry, "<h" + str(len(heading)) + " tabindex=%tabindex gridindex='%gridindex' onfocus='getIndex(this)'>" + name + "</h" + str(len(heading)) + ">", 1)
if generateIndex:
index = searchFunctions(index)
src = src.replace("%index", index, 1)
return src
def emojis(self, src):
global globe
emojiBuffer = ""
if "emojiBuffer" in globe:
emojiBuffer = globe["emojiBuffer"]
if emojiBuffer == "":
emojiBuffer = self.buffering_intern("emojiBuffer", os.path.join("unicode", "emojis"), "emojirefmissing", "Emoji cheat not available due to missing file 'unicode/emojis'")
if emojiBuffer == "":
return src
for item in emojiBuffer.split("\n"):
if item.find(" | ") > -1:
code, emoji = item.split(" | ", 1)
src = src.replace(code.strip(), emoji.strip())
return src
def table_intern(self, item, formatting, tag):
# ToDo
index = 0
output = "<tr>"
item = item.split("|")
for entry in formatting.split("|"):
entry = entry.strip()
if entry.startswith(":") and entry.endswith(":"):
output += "<" + tag + " style='text-align:center;'>"
elif entry.startswith(":"):
output += "<" + tag + " style='text-align:left;'>"
elif entry.endswith(":"):
output += "<" + tag + " style='text-align:right;'>"
elif entry.endswith("---"):
output += "<" + tag + " style='text-align:justify;'>"
else:
output += "<" + tag + ">"
output += item[index].strip() + "</" + tag + ">"
index += 1
output += "</tr>"
return output
def table(self, src):
# ToDo
# It is just designed to parse one table per file.
if src.find("|") == -1:
return src
tables = []
oldTables = []
data = []
dataOld = []
for item in src.split("\r"):
if item == "" and len(data) > 2 or item.find("|") == -1 and len(data) > 2:
tables.append(data)
oldTables.append(dataOld)
data = []
dataOld = []
if item.count("|") > 0:
itemOld = item
item = list(item.strip())
if item[0] == "|":
del item[0]
if item[len(item)-1] == "|":
del item[len(item)-1]
data.append("".join(item).strip())
dataOld.append("".join(itemOld))
#Prepairing
dataIndex = 0
for data in tables:
output = "<table>"
oldData = data
heading = data[0]
formatting = data[1]
del data[0], data[0]
output += self.table_intern(heading.strip(), formatting, "th")
#Table Content
for item in data:
item = item.strip()
output += self.table_intern(item, formatting, "td")
output += "</table>"
data = "\r".join(data)
src = src.replace("\r".join(oldTables[dataIndex]), output)
dataIndex += 1
return src
def translate_intern(self, src, startTag, endTag, xml):
while src.find(startTag, 0, src.find(endTag, src.find(startTag)+len(startTag))) > -1:
bStart = src.find(startTag, 0, src.find(endTag, src.find(startTag)+len(startTag))) + len(startTag)
bEnd = src.find(endTag, bStart)
if bEnd == -1:
break
text = src[bStart:bEnd]
if self.canApplyCode(src, bStart) and self.canApplyCode(src, bEnd):
src = src.replace(startTag + text + endTag, xml.replace("%s", text, 1))
else:
src = src.replace(startTag + text + endTag, "\\mox1" + text + endTag)
src = src.replace("\\mox1", startTag)
return src
def translate(self, src, startTag, endTag, xml):
src = self.translate_intern(src, " " + startTag, endTag, " " + xml)
#src = self.translate_intern(src, ">" + startTag, endTag, ">" + xml) deprecated
src = self.translate_intern(src, "\r" + startTag, endTag, "\r" + xml)
src = self.translate_intern(src, startTag, endTag, xml)
return src
def main(islinput, inputfile, pluginData, globalData):
global lang, globe
compile = compiler()
globe = globalData
currentIndex = 0
if "doclang" in pluginData:
lang = pluginData["doclang"]
for item in islinput:
if item.startswith(": ") and item.endswith(" :") or item == "::" or item == "":
islinput[currentIndex] = item
currentIndex += 1
continue
else:
key = item.split("\r")
if key[0].startswith("- ") and key[0].endswith(" -") or key[0] == "--":
key, src = item.split("\r", 1)
key = key + "\r"
else:
key = ""
src = item
src = "\r" + src #Entry start needs an extra marker
src = compile.code(src, "`") #Markdown code, change font
src = compile.table(src) #Added Markdown tables
src = src.replace("\r---\r", "\r<hr>\r").replace("\r---", "\r<hr>\r") #Markdown splitline
src = compile.headings(src) #Partial support for Markdown headings
src = compile.searchFunctions(src) #Partial Markdown link- and image implementation. My own implementation for form, abbreviation, coloured text, audio and video.
src = compile.translate(src, "bbb'", "'","<big><big><big>%s</big></big></big>") #My own specification for biggest text
src = compile.translate(src, "bb'", "'", "<big><big>%s</big></big>") #My own specification for bigger text
src = compile.translate(src, "b'", "'", "<big>%s</big>") #My own specification for big text
src = compile.translate(src, "s'", "'", "<small>%s</small>") #My own specification for small text
src = compile.translate(src, "**", "**", "<b>%s</b>") #Markdown bold
src = compile.translate(src, "*", "*", "<b>%s</b>") #WhatsApp code bold
src = compile.translate(src, "_", "_", "<i>%s</i>") #Markdown italic, WhatsApp code italic
src = compile.translate(src, "~", "~", "<del>%s</del>") #unofficial Markdown strikethrough; official WhatsApp strikethrough
src = compile.translate(src, "°", "°", "<mark>%s</mark>") #My own specification for highlighted text
src = compile.translate(src, "^", "^", "<sup>%s</sup>") #Markdown superscript
src = compile.translate(src, "\r> ", "\r", "\r<blockquote>\r%s</blockquote>\r") #Markdown citation, E-Mail style citation
src = src.replace("</blockquote>\r<blockquote>\r", "\r")
src = compile.emojis(src) #Following emoji- (Unicode Consortium) and its code specifications
if src.startswith("\r"):
src = src.replace("\r", "", 1) #Remove extra marker from entry start
islinput[currentIndex] = key + src
currentIndex += 1
return islinput, pluginData, globe
|
mit
| -5,764,654,230,822,029,000
| 39.480818
| 227
| 0.601655
| false
| 2.993758
| false
| false
| false
|
graingert/maluroam
|
maluroam/eduroam_snort/models.py
|
1
|
3623
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# models.py
#
# Copyright 2012 Thomas Grainger <tagrain@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation; version 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from django.db import models
class Event(models.Model):
id = models.BigIntegerField(db_column= "event_id", primary_key=True)
username = models.CharField(max_length=765)
radius_account_id = models.CharField(max_length=765)
radius_session_id = models.CharField(max_length=765)
radius_info = models.TextField()
ip_src = models.CharField(max_length=765)
ip_dst = models.CharField(max_length=765)
start = models.DateTimeField()
finish = models.DateTimeField()
alerts = models.BigIntegerField()
blacklist = models.ForeignKey("Blacklist", db_column = "blacklist")
rule = models.ForeignKey("Rule", db_column = "rule")
rule_class = models.CharField(max_length=93)
def __unicode__(self):
return "{username}@{ip_src} accessed {ip_dst} from {start} till {finish}. Rule class: {rule_class}".format(
username = self.username,
ip_src = self.ip_src,
ip_dst = self.ip_dst,
start = self.start,
finish = self.finish,
rule_class = self.rule_class
)
class Meta:
db_table = u'event'
unique_together = ("username", "ip_src", "ip_dst", "start", "finish")
class Rule(models.Model):
id = models.BigIntegerField(primary_key=True, db_column="rule_id", editable=False)
name = models.CharField(max_length=765, db_column = "rule_name")
hide = models.BooleanField()
@models.permalink
def get_absolute_url(self):
return ('rule', (), {"pk":str(self.pk)});
def __unicode__(self):
return "{name}[{pk}]".format(name=self.name, pk=self.pk)
class Meta:
db_table = u'rules'
class Blacklist(models.Model):
id = models.BigIntegerField(primary_key=True, db_column="bl_id", editable=False)
name = models.CharField(max_length=765, editable=False)
url = models.CharField(max_length=765, editable=False)
serialized = models.TextField(editable=False)
updated = models.DateTimeField(editable=False)
hide = models.BooleanField()
@models.permalink
def get_absolute_url(self):
return ('blacklist', (), {"pk":str(self.pk)});
def __unicode__(self):
return self.name
class Meta:
db_table = u'blacklists'
class Script(models.Model):
id = models.AutoField(primary_key=True, db_column = "script_id", editable=False)
name = models.CharField(max_length=765)
updated = models.DateTimeField(db_column="lastupdated", editable=False)
@models.permalink
def get_absolute_url(self):
return ('script', (), {"pk":str(self.pk)});
def __unicode__(self):
return "{name}[{pk}]".format(
name=self.name,
pk=self.pk
)
class Meta:
db_table = u'scripts'
|
agpl-3.0
| -4,906,065,184,165,069,000
| 33.504762
| 115
| 0.642009
| false
| 3.663296
| false
| false
| false
|
harlequin/sickbeard
|
sickbeard/metadata/tivo.py
|
1
|
13263
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# Author: Gordon Turner <gordonturner@gordonturner.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import datetime
import os
import sickbeard
#from sickbeard.common import *
from sickbeard import logger, exceptions, helpers
from sickbeard.metadata import generic
from sickbeard import encodingKludge as ek
from lib.tvdb_api import tvdb_api, tvdb_exceptions
class TIVOMetadata(generic.GenericMetadata):
"""
Metadata generation class for TIVO
The following file structure is used:
show_root/Season 01/show - 1x01 - episode.avi.txt (* existing episode)
show_root/Season 01/.meta/show - 1x01 - episode.avi.txt (episode metadata)
This class only generates episode specific metadata files, it does NOT generated a default.txt file.
"""
def __init__(self,
show_metadata=False,
episode_metadata=False,
poster=False,
fanart=False,
episode_thumbnails=False,
season_thumbnails=False):
generic.GenericMetadata.__init__(self,
show_metadata,
episode_metadata,
poster,
fanart,
episode_thumbnails,
season_thumbnails)
self._ep_nfo_extension = "txt"
self.generate_ep_metadata = True
self.name = 'TIVO'
self.eg_show_metadata = "<i>not supported</i>"
self.eg_episode_metadata = "Season##\\.meta\\<i>filename</i>.txt"
self.eg_fanart = "<i>not supported</i>"
self.eg_poster = "<i>not supported</i>"
self.eg_episode_thumbnails = "<i>not supported</i>"
self.eg_season_thumbnails = "<i>not supported</i>"
# Override with empty methods for unsupported features.
def create_show_metadata(self, show_obj):
pass
def create_fanart(self, show_obj):
pass
def get_episode_thumb_path(self, ep_obj):
pass
def get_season_thumb_path(self, show_obj, season):
pass
def retrieveShowMetadata(self, dir):
return (None, None)
# Override and implement features for Tivo.
def get_episode_file_path(self, ep_obj):
"""
Returns a full show dir/.meta/episode.txt path for Tivo
episode metadata files.
Note, that pyTivo requires the metadata filename to include the original extention.
ie If the episode name is foo.avi, the metadata name is foo.avi.txt
ep_obj: a TVEpisode object to get the path for
"""
if ek.ek(os.path.isfile, ep_obj.location):
metadata_file_name = ek.ek(os.path.basename, ep_obj.location) + "." + self._ep_nfo_extension
metadata_dir_name = ek.ek(os.path.join, ek.ek(os.path.dirname, ep_obj.location), '.meta')
metadata_file_path = ek.ek(os.path.join, metadata_dir_name, metadata_file_name)
else:
logger.log(u"Episode location doesn't exist: "+str(ep_obj.location), logger.DEBUG)
return ''
return metadata_file_path
def _ep_data(self, ep_obj):
"""
Creates a key value structure for a Tivo episode metadata file and
returns the resulting data object.
ep_obj: a TVEpisode instance to create the metadata file for.
Lookup the show in http://thetvdb.com/ using the python library:
https://github.com/dbr/tvdb_api/
The results are saved in the object myShow.
The key values for the tivo metadata file are from:
http://pytivo.sourceforge.net/wiki/index.php/Metadata
"""
data = "";
eps_to_write = [ep_obj] + ep_obj.relatedEps
tvdb_lang = ep_obj.show.lang
try:
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if tvdb_lang and not tvdb_lang == 'en':
ltvdb_api_parms['language'] = tvdb_lang
t = tvdb_api.Tvdb(actors=True, **ltvdb_api_parms)
myShow = t[ep_obj.show.tvdbid]
except tvdb_exceptions.tvdb_shownotfound, e:
raise exceptions.ShowNotFoundException(str(e))
except tvdb_exceptions.tvdb_error, e:
logger.log("Unable to connect to TVDB while creating meta files - skipping - "+str(e), logger.ERROR)
return False
for curEpToWrite in eps_to_write:
try:
myEp = myShow[curEpToWrite.season][curEpToWrite.episode]
except (tvdb_exceptions.tvdb_episodenotfound, tvdb_exceptions.tvdb_seasonnotfound):
logger.log("Unable to find episode " + str(curEpToWrite.season) + "x" + str(curEpToWrite.episode) + " on tvdb... has it been removed? Should I delete from db?")
return None
if myEp["firstaired"] == None and ep_obj.season == 0:
myEp["firstaired"] = str(datetime.date.fromordinal(1))
if myEp["episodename"] == None or myEp["firstaired"] == None:
return None
if myShow["seriesname"] != None:
# Title of the series (The Simpsons, Seinfeld, etc.) or title of the movie (The Mummy, Spiderman, etc).
data += ("title : " + myShow["seriesname"] + "\n")
# Name of series (The Simpsons, Seinfeld, etc.). This should be included if the show is episodic.
# For movies, you may repeat the name of the movie (The Mummy, Spiderman, etc), leave blank, or omit.
data += ("seriesTitle : " + myShow["seriesname"] + "\n")
# Title of the episode (Pilot, Homer's Night Out, Episode 02, etc.) Should be included for episodic shows.
# Leave blank or omit for movies.
data += ("episodeTitle : " + curEpToWrite.name + "\n")
# This should be entered for episodic shows and omitted for movies. The standard tivo format is to enter
# the season number followed by the episode number for that season. For example, enter 201 for season 2
# episode 01.
# This only shows up if you go into the Details from the Program screen.
# This seems to disappear once the video is transferred to TiVo.
# NOTE: May not be correct format, missing season, but based on description from wiki leaving as is.
data += ("episodeNumber : " + str(curEpToWrite.episode) + "\n")
# Must be entered as true or false. If true, the year from originalAirDate will be shown in parentheses
# after the episode's title and before the description on the Program screen.
# FIXME: Hardcode isEpisode to true for now, not sure how to handle movies
data += ("isEpisode : true\n")
# Write the synopsis of the video here.
# Micrsoft Word's smartquotes can die in a fire.
sanitizedDescription = curEpToWrite.description
# Replace double curly quotes
sanitizedDescription = sanitizedDescription.replace(u"\u201c", "\"").replace(u"\u201d", "\"")
# Replace single curly quotes
sanitizedDescription = sanitizedDescription.replace(u"\u2018", "'").replace(u"\u2019", "'").replace(u"\u02BC", "'")
data += ("description : " + sanitizedDescription + "\n")
# Usually starts with "SH" and followed by 6-8 digits.
# Tivo uses zap2it for thier data, so the series id is the zap2it_id.
if myShow["zap2it_id"] != None:
data += ("seriesId : " + myShow["zap2it_id"] + "\n")
# This is the call sign of the channel the episode was recorded from.
if myShow["network"] != None:
data += ("callsign : " + myShow["network"] + "\n")
# This must be entered as yyyy-mm-ddThh:mm:ssZ (the t is capitalized and never changes, the Z is also
# capitalized and never changes). This is the original air date of the episode.
# NOTE: Hard coded the time to T00:00:00Z as we really don't know when during the day the first run happened.
if curEpToWrite.airdate != datetime.date.fromordinal(1):
data += ("originalAirDate : " + str(curEpToWrite.airdate) + "T00:00:00Z\n")
# This shows up at the beginning of the description on the Program screen and on the Details screen.
if myShow["actors"]:
for actor in myShow["actors"].split('|'):
if actor:
data += ("vActor : " + actor + "\n")
# This is shown on both the Program screen and the Details screen. It uses a single digit to determine the
# number of stars: 1 for 1 star, 7 for 4 stars
if myShow["rating"] != None:
try:
rating = float(myShow['rating'])
except ValueError:
rating = 0.0
rating = rating / 10 * 4
data += ("starRating : " + str(rating) + "\n")
# This is shown on both the Program screen and the Details screen.
# It uses the standard TV rating system of: TV-Y7, TV-Y, TV-G, TV-PG, TV-14, TV-MA and TV-NR.
if myShow["contentrating"]:
data += ("tvRating : " + str(myShow["contentrating"]) + "\n")
# This field can be repeated as many times as necessary or omitted completely.
if ep_obj.show.genre:
for genre in ep_obj.show.genre.split('|'):
if genre:
data += ("vProgramGenre : " + str(genre) + "\n")
# NOTE: The following are metadata keywords are not used
# displayMajorNumber
# showingBits
# displayMinorNumber
# colorCode
# vSeriesGenre
# vGuestStar, vDirector, vExecProducer, vProducer, vWriter, vHost, vChoreographer
# partCount
# partIndex
return data
def write_ep_file(self, ep_obj):
"""
Generates and writes ep_obj's metadata under the given path with the
given filename root. Uses the episode's name with the extension in
_ep_nfo_extension.
ep_obj: TVEpisode object for which to create the metadata
file_name_path: The file name to use for this metadata. Note that the extension
will be automatically added based on _ep_nfo_extension. This should
include an absolute path.
"""
data = self._ep_data(ep_obj)
if not data:
return False
nfo_file_path = self.get_episode_file_path(ep_obj)
nfo_file_dir = ek.ek(os.path.dirname, nfo_file_path)
try:
if not ek.ek(os.path.isdir, nfo_file_dir):
logger.log("Metadata dir didn't exist, creating it at "+nfo_file_dir, logger.DEBUG)
ek.ek(os.makedirs, nfo_file_dir)
helpers.chmodAsParent(nfo_file_dir)
logger.log(u"Writing episode nfo file to "+nfo_file_path)
nfo_file = ek.ek(open, nfo_file_path, 'w')
# Calling encode directly, b/c often descriptions have wonky characters.
nfo_file.write( data.encode( "utf-8" ) )
nfo_file.close()
helpers.chmodAsParent(nfo_file_path)
except IOError, e:
logger.log(u"Unable to write file to "+nfo_file_path+" - are you sure the folder is writable? "+str(e).decode('utf-8'), logger.ERROR)
return False
return True
# present a standard "interface"
metadata_class = TIVOMetadata
|
gpl-3.0
| 8,133,758,677,926,244,000
| 40.839117
| 176
| 0.562618
| false
| 4.213151
| false
| false
| false
|
ktan2020/legacy-automation
|
win/Lib/site-packages/wx-3.0-msw/wx/lib/agw/toasterbox.py
|
1
|
46688
|
# --------------------------------------------------------------------------- #
# TOASTERBOX wxPython IMPLEMENTATION
# Ported And Enhanced From wxWidgets Contribution (Aj Bommarito) By:
#
# Andrea Gavana, @ 16 September 2005
# Latest Revision: 14 Mar 2012, 21.00 GMT
#
#
# TODO/Caveats List
#
# 1. Any Idea?
#
#
# For All Kind Of Problems, Requests Of Enhancements And Bug Reports, Please
# Write To Me At:
#
# andrea.gavana@gmail.com
# andrea.gavana@maerskoil.com
#
# Or, Obviously, To The wxPython Mailing List!!!
#
#
# End Of Comments
# --------------------------------------------------------------------------- #
"""
ToasterBox is a cross-platform widget to make the creation of MSN style "toaster"
popups easier.
Description
===========
ToasterBox is a cross-platform widget to make the creation of MSN style "toaster"
popups easier. The syntax is really easy especially if you are familiar with the
syntax of wxPython.
It has 2 main styles:
- ``TB_SIMPLE``: using this style, you will be able to specify a background image for
ToasterBox, text properties as text colour, font and label;
- ``TB_COMPLEX``: this style will allow you to put almost any control inside a
ToasterBox. You can add a panel in which you can put all the controls you like.
Both styles support the setting of ToasterBox position (on screen coordinates),
size, the time after which the ToasterBox is destroyed (linger), and the scroll
speed of ToasterBox.
Usage
=====
Usage example::
import wx
import wx.lib.agw.toasterbox as TB
class MyFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, "ToasterBox Demo")
toaster = TB.ToasterBox(self, tbstyle=TB.TB_COMPLEX)
toaster.SetPopupPauseTime(3000)
tbpanel = toaster.GetToasterBoxWindow()
panel = wx.Panel(tbpanel, -1)
sizer = wx.BoxSizer(wx.VERTICAL)
button = wx.Button(panel, wx.ID_ANY, "Simple button")
sizer.Add(button, 0, wx.EXPAND)
panel.SetSizer(sizer)
toaster.AddPanel(panel)
wx.CallLater(1000, toaster.Play)
# our normal wxApp-derived class, as usual
app = wx.App(0)
frame = MyFrame(None)
app.SetTopWindow(frame)
frame.Show()
app.MainLoop()
Supported Platforms
===================
ToasterBox has been tested on the following platforms:
- Windows (verified on Windows XP, 2000)
- Linux
- Mac
Window Styles
=============
This class supports the following window styles:
==================== =========== ==================================================
Window Styles Hex Value Description
==================== =========== ==================================================
``TB_SIMPLE`` 0x1 A simple `ToasterBox`, with background image and text customization can be created.
``TB_ONTIME`` 0x1 `ToasterBox` will close after a specified amount of time.
``TB_COMPLEX`` 0x2 ToasterBoxes with different degree of complexity can be created. You can add as many controls as you want, provided that you call the meth:~ToasterBox.AddPanel` method and pass to it a dummy frame and a :class:`Panel`. See the demo for details.
``TB_ONCLICK`` 0x2 `ToasterBox` can be closed by clicking anywhere on the `ToasterBox` frame.
``TB_DEFAULT_STYLE`` 0x2008002 Default window style for `ToasterBox`, with no caption nor close box.
``TB_CAPTION`` 0x22009806 `ToasterBox` will have a caption, with the possibility to set a title for the `ToasterBox` frame, and a close box.
==================== =========== ==================================================
Events Processing
=================
`No custom events are available for this class.`
License And Version
===================
ToasterBox is distributed under the wxPython license.
Latest revision: Andrea Gavana @ 14 Mar 2012, 21.00 GMT
Version 0.3
"""
import textwrap
import wx
# Define Window List, We Use It Globally
winlist = []
""" Globally defined window list. """
TB_SIMPLE = 1
""" A simple ToasterBox, with background image and text customization can be created. """
TB_COMPLEX = 2
""" ToasterBoxes with different degree of complexity can be created. You can add as many controls as you want, provided that you call the AddPanel() method and pass to it a dummy frame and a wx.Panel. See the demo for details. """
TB_DEFAULT_STYLE = wx.SIMPLE_BORDER | wx.STAY_ON_TOP | wx.FRAME_NO_TASKBAR
""" Default window style for `ToasterBox`, with no caption nor close box. """
TB_CAPTION = TB_DEFAULT_STYLE | wx.CAPTION | wx.SYSTEM_MENU | wx.CLOSE_BOX | wx.FRAME_NO_TASKBAR
""" `ToasterBox` will have a caption, with the possibility to set a title for the `ToasterBox` frame, and a close box. """
TB_ONTIME = 1
""" `ToasterBox` will close after a specified amount of time. """
TB_ONCLICK = 2
""" `ToasterBox` can be closed by clicking anywhere on the `ToasterBox` frame. """
# scroll from up to down
TB_SCR_TYPE_UD = 1
""" Scroll from up to down. """
# scroll from down to up
TB_SCR_TYPE_DU = 2
""" Scroll from down to up. """
# fade in/out
TB_SCR_TYPE_FADE = 4
""" Fade in and out. """
# ------------------------------------------------------------------------------ #
# Class ToasterBox
# Main Class Implementation. It Is Basically A wx.Timer. It Creates And
# Displays Popups And Handles The "Stacking".
# ------------------------------------------------------------------------------ #
class ToasterBox(wx.Timer):
"""
ToasterBox is a cross-platform widget to make the creation of MSN style "toaster"
popups easier.
"""
def __init__(self, parent, tbstyle=TB_SIMPLE, windowstyle=TB_DEFAULT_STYLE,
closingstyle=TB_ONTIME, scrollType=TB_SCR_TYPE_DU):
"""
Default class constructor.
:param `parent`: the window parent;
:param `tbstyle`: the :class:`ToasterBox` main style. Can be one of the following
bits:
====================== ======= ================================
`ToasterBox` Style Value Description
====================== ======= ================================
``TB_SIMPLE`` 0x1 A simple :class:`ToasterBox`, with background image and text customization can be created
``TB_COMPLEX`` 0x2 `ToasterBoxes` with different degree of complexity can be created. You can add as many controls as you want, provided that you call the :meth:`~ToasterBox.AddPanel` method and pass to it a dummy frame and a :class:`Panel`.
====================== ======= ================================
:param `windowstyle`: this parameter influences the visual appearance of
:class:`ToasterBox`, and can be one of the following styles:
====================== ========== ================================
Window Style Hex Value Description
====================== ========== ================================
``TB_DEFAULT_STYLE`` 0x2008002 Default window style for :class:`ToasterBox`, with no caption nor close box.
``TB_CAPTION`` 0x22009806 :class:`ToasterBox` will have a caption, with the possibility to set a title for the :class:`ToasterBox` frame, and a close box.
====================== ========== ================================
:param `closingstyle`: the closing style for :class:`ToasterBox`. Can be one of the
following bits:
==================== =========== ==================================================
Closing Styles Hex Value Description
==================== =========== ==================================================
``TB_ONTIME`` 0x1 :class:`ToasterBox` will close after a specified amount of time.
``TB_ONCLICK`` 0x2 :class:`ToasterBox` can be closed by clicking anywhere on the :class:`ToasterBox` frame.
==================== =========== ==================================================
:param `scrollType`: the scrolling direction for :class:`ToasterBox`. Can be one of the
following bits:
==================== =========== ==================================================
Scroll Styles Hex Value Description
==================== =========== ==================================================
``TB_SCR_TYPE_UD`` 0x1 :class:`ToasterBox` will scroll from up to down
``TB_SCR_TYPE_DU`` 0x2 :class:`ToasterBox` will scroll from down to up
``TB_SCR_TYPE_FADE`` 0x4 :class:`ToasterBox` will fade in/out (without scrolling).
==================== =========== ==================================================
"""
self._parent = parent
self._sleeptime = 10
self._pausetime = 1700
self._popuptext = "default"
self._popupposition = wx.Point(100,100)
self._popuptop = wx.Point(0,0)
self._popupsize = wx.Size(150, 170)
self._usefocus = True
self._originalfocus = wx.Window.FindFocus()
self._backgroundcolour = wx.WHITE
self._foregroundcolour = wx.BLACK
self._textfont = wx.Font(8, wx.SWISS, wx.NORMAL, wx.NORMAL, False, "Verdana")
self._bitmap = None
self._tbstyle = tbstyle
self._windowstyle = windowstyle
self._closingstyle = closingstyle
self._scrollType = scrollType
self._panel = None
self._bottomright = wx.Point(wx.GetDisplaySize().GetWidth(),
wx.GetDisplaySize().GetHeight())
if parent is not None:
parent.Bind(wx.EVT_ICONIZE, lambda evt: [w.Hide() for w in winlist])
self._moveTimer = wx.Timer(parent, -1)
parent.Bind(wx.EVT_TIMER, self.OnMoveTimer, self._moveTimer)
self._tb = ToasterBoxWindow(self._parent, self, self._tbstyle, self._windowstyle,
self._closingstyle, scrollType=self._scrollType)
def SetPopupPosition(self, pos):
"""
Sets the :class:`ToasterBox` position on screen.
:param `pos`: the widget position, an instance of :class:`Point`.
"""
self._popupposition = pos
def SetPopupPositionByInt(self, pos):
"""
Sets the :class:`ToasterBox` position on screen, at one of the screen corners.
:param `pos`: an integer specifying the screen corner, namely:
============= ========================================
Corner Number Position
============= ========================================
0 Top left screen corner
1 Top right screen corner
2 Bottom left screen corner
3 Bottom right screen corner
============= ========================================
"""
w, h = wx.GetDisplaySize()
self._bottomright = wx.Point(w, h)
# top left
if pos == 0:
popupposition = wx.Point(0,0)
# top right
elif pos == 1:
popupposition = wx.Point(w - self._popupsize[0], 0)
# bottom left
elif pos == 2:
popupposition = wx.Point(0, h - self._popupsize[1])
# bottom right
elif pos == 3:
popupposition = wx.Point(self._bottomright.x - self._popupsize[0],
self._bottomright.y - self._popupsize[1])
self._bottomright = wx.Point(popupposition.x + self._popupsize[0],
popupposition.y + self._popupsize[1])
self._popupposition = popupposition
def CenterOnParent(self, direction=wx.BOTH):
"""
Centres the window on its parent (if any). If the :class:`ToasterBox` parent is ``None``,
it calls :meth:`~ToasterBox.CenterOnScreen`.
:param `direction`: specifies the direction for the centering. May be ``wx.HORIZONTAL``,
``wx.VERTICAL`` or ``wx.BOTH``.
:note: This methods provides for a way to center :class:`ToasterBox` over their parents instead of the
entire screen. If there is no parent, then behaviour is the same as :meth:`~ToasterBox.CenterOnScreen`.
:see: :meth:`~ToasterBox.CenterOnScreen`.
"""
if not self._parent:
self.CenterOnScreen(direction)
return
parent = self._parent
screenrect = parent.GetScreenRect()
toast_width, toast_height = self._popupsize
x, y = screenrect.GetX(), screenrect.GetY()
width, height = screenrect.GetWidth(), screenrect.GetHeight()
if direction == wx.VERTICAL:
pos = wx.Point(x, (y + (height/2) - (toast_height/2)))
elif direction == wx.HORIZONTAL:
pos = wx.Point((x + (width/2) - (toast_width/2)), y)
else:
pos = wx.Point((x + (width/2) - (toast_width/2)), (y + (height/2) - (toast_height/2)))
tb.SetPopupPosition(pos)
CentreOnParent = CenterOnParent
def CenterOnScreen(self, direction=wx.BOTH):
"""
Centres the :class:`ToasterBox` on screen.
:param `direction`: specifies the direction for the centering. May be ``wx.HORIZONTAL``,
``wx.VERTICAL`` or ``wx.BOTH``.
:see: :meth:`~ToasterBox.CenterOnParent`.
"""
screenSize = wx.GetDisplaySize()
toast_width, toast_height = self._popupsize
width, height = screenSize.GetWidth(), screenSize.GetHeight()
if direction == wx.VERTICAL:
pos = wx.Point(0, (height/2) - (toast_height/2))
elif direction == wx.HORIZONTAL:
pos = wx.Point((width/2) - (toast_width/2), 0)
else:
pos = wx.Point((width/2) - (toast_width/2), (height/2) - (toast_height/2))
tb.SetPopupPosition(pos)
CentreOnScreen = CenterOnScreen
def SetPopupBackgroundColour(self, colour=None):
"""
Sets the :class:`ToasterBox` background colour.
:param `colour`: a valid :class:`Colour` object. If defaulted to ``None``, then
the background colour will be white.
:note: Use this method only for a :class:`ToasterBox` created with the ``TB_SIMPLE`` style.
"""
if colour is None:
colour = wx.WHITE
if isinstance(colour, basestring):
colour = wx.NamedColour(colour)
self._backgroundcolour = colour
self._tb.SetPopupBackgroundColour(self._backgroundcolour)
def SetPopupTextColour(self, colour=None):
"""
Sets the :class:`ToasterBox` foreground colour.
:param `colour`: a valid :class:`Colour` object. If defaulted to ``None``, then
the background colour will be black.
:note: Use this method only for a :class:`ToasterBox` created with the ``TB_SIMPLE`` style.
"""
if colour is None:
colour = wx.BLACK
if isinstance(colour, basestring):
colour = wx.NamedColour(colour)
self._foregroundcolour = colour
def SetPopupTextFont(self, font=None):
"""
Sets the :class:`ToasterBox` text font.
:param `colour`: a valid :class:`Colour` object. If defaulted to ``None``, then
a simple generic font will be generated.
:note: Use this method only for a :class:`ToasterBox` created with the ``TB_SIMPLE`` style.
"""
if font is None:
font = wx.Font(8, wx.SWISS, wx.NORMAL, wx.NORMAL, False)
self._textfont = font
def SetPopupSize(self, size):
"""
Sets the :class:`ToasterBox` size.
:param `size`: the new control size, an instance of :class:`Size`.
"""
self._popupsize = size
def SetPopupPauseTime(self, pausetime):
"""
Sets the time after which the :class:`ToasterBox` is destroyed (linger).
:param `pausetime`: the delay after which the control is destroyed, in seconds.
"""
self._pausetime = pausetime
def SetPopupBitmap(self, bitmap=None):
"""
Sets the :class:`ToasterBox` background image.
:param `bitmap`: a valid :class:`Bitmap` object or filename. If defaulted
to ``None``, then no background bitmap is used.
:note: Use this method only for a :class:`ToasterBox` created with the ``TB_SIMPLE`` style.
"""
if bitmap is not None:
if isinstance(bitmap, basestring):
bitmap = wx.Bitmap(bitmap)
self._bitmap = bitmap
def SetPopupScrollSpeed(self, speed):
"""
Sets the :class:`ToasterBox` scroll speed.
:param `speed`: it is the pause time (in milliseconds) for every step in the
`ScrollUp` method.
"""
self._sleeptime = speed
def SetPopupText(self, text):
"""
Sets the :class:`ToasterBox` text label.
:param `text`: the widget label.
:note: Use this method only for a :class:`ToasterBox` created with the ``TB_SIMPLE`` style.
"""
self._popuptext = text
def AddPanel(self, panel):
"""
Adds a panel to the :class:`ToasterBox`.
:param `panel`: an instance of :class:`Window`.
:note: Use this method only for a :class:`ToasterBox` created with the ``TB_COMPLEX`` style.
"""
if not self._tbstyle & TB_COMPLEX:
raise Exception("\nERROR: Panel Can Not Be Added When Using TB_SIMPLE ToasterBox Style")
self._panel = panel
def Play(self):
""" Creates the :class:`ToasterBoxWindow`, that does all the job. """
# create new window
self._tb.SetPopupSize((self._popupsize[0], self._popupsize[1]))
self._tb.SetPopupPosition((self._popupposition[0], self._popupposition[1]))
self._tb.SetPopupPauseTime(self._pausetime)
self._tb.SetPopupScrollSpeed(self._sleeptime)
self._tb.SetUseFocus(self._usefocus, self._originalfocus)
if self._tbstyle == TB_SIMPLE:
self._tb.SetPopupTextColour(self._foregroundcolour)
self._tb.SetPopupBackgroundColour(self._backgroundcolour)
self._tb.SetPopupTextFont(self._textfont)
if self._bitmap is not None:
self._tb.SetPopupBitmap(self._bitmap)
self._tb.SetPopupText(self._popuptext)
if self._tbstyle == TB_COMPLEX:
if self._panel is not None:
self._tb.AddPanel(self._panel)
# clean up the list
self.CleanList()
# check to see if there is already a window displayed
# by looking at the linked list
if len(winlist) > 0:
# there ARE other windows displayed already
# reclac where it should display
self.MoveAbove(self._tb)
# shift new window on to the list
winlist.append(self._tb)
if not self._tb.Play():
# if we didn't show the window properly, remove it from the list
winlist.remove(winlist[-1])
# delete the object too
self._tb.Destroy()
return
def MoveAbove(self, tb):
"""
If a :class:`ToasterBox` already exists, move the new one above the existing one.
:param `tb`: another instance of :class:`ToasterBox`.
"""
# recalc where to place this popup
self._tb.SetPopupPosition((self._popupposition[0], self._popupposition[1] -
self._popupsize[1]*len(winlist)))
def GetToasterBoxWindow(self):
""" Returns the :class:`ToasterBox` frame. """
return self._tb
def SetTitle(self, title):
"""
Sets the :class:`ToasterBox` title if it was created with ``TB_CAPTION`` window style.
:param `title`: the :class:`ToasterBox` caption title.
"""
self._tb.SetTitle(title)
def SetUseFocus(self, focus):
"""
If `focus` is ``True``, Instructs :class:`ToasterBox` to steal the focus from the
parent application, otherwise it returns the focus to the original owner.
:param `focus`: ``True`` to set the focus on :class:`ToasterBox`, ``False`` to
return it to the original owner.
"""
self._usefocus = focus
def GetUseFocus(self):
""" Returns whether :class:`ToasterBox` will steal the focus from the parent application. """
return self._usefocus
def Notify(self):
""" It's time to hide a :class:`ToasterBox`. """
if len(winlist) == 0:
return
# clean the window list
self.CleanList()
# figure out how many blanks we have
try:
node = winlist[0]
except:
return
if not node:
return
self._startPos = node.GetPosition()[1]
self._moveTimer.Start(self._sleeptime)
def OnMoveTimer(self, event):
"""
Handles the ``wx.EVT_TIMER`` event for :class:`ToasterBox`, moving the new window
on top of the last one created.
:param `event`: a :class:`TimerEvent` event to be processed.
"""
current = self._startPos
if current >= self._popupposition[1]:
self._moveTimer.Stop()
# move windows to fill in blank space
if current > self._popupposition[1]:
current = self._popupposition[1]
# loop through all the windows
for j in xrange(0, len(winlist)):
ourNewHeight = current - (j*self._popupsize[1] - 8)
tmpTb = winlist[j]
# reset where the object THINKS its supposed to be
tmpTb.SetPopupPosition((self._popupposition[0], ourNewHeight))
# actually move it
tmpTb.SetDimensions(self._popupposition[0], ourNewHeight, tmpTb.GetSize().GetWidth(),
tmpTb.GetSize().GetHeight())
self._startPos += 4
def CleanList(self):
""" Cleans the window list, erasing the stack of :class:`ToasterBox` objects. """
if len(winlist) == 0:
return
node = winlist[0]
while node:
if not node.IsShown():
winlist.remove(node)
node.Close()
try:
node = winlist[0]
except:
node = 0
else:
indx = winlist.index(node)
try:
node = winlist[indx+1]
except:
node = 0
# ------------------------------------------------------------------------------ #
# Class ToasterBoxWindow
# This Class Does All The Job, By Handling Background Images, Text Properties
# And Panel Adding. Depending On The Style You Choose, ToasterBoxWindow Will
# Behave Differently In Order To Handle Widgets Inside It.
# ------------------------------------------------------------------------------ #
class ToasterBoxWindow(wx.Frame):
"""
This class does all the job, by handling background images, text properties
and panel adding. Depending on the style you choose, :class:`ToasterBoxWindow` will
behave differently in order to handle widgets inside it.
"""
def __init__(self, parent, parent2, tbstyle, windowstyle, closingstyle,
scrollType=TB_SCR_TYPE_DU):
"""
Default class constructor.
Used internally. Do not call directly this class in your application!
:param `parent`: the window parent;
:param `parent2`: the :class:`ToasterBox` calling this window;
:param `tbstyle`: the :class:`ToasterBoxWindow` main style. Can be one of the following
bits:
====================== ======= ================================
`ToasterBox` Style Value Description
====================== ======= ================================
``TB_SIMPLE`` 0x1 A simple :class:`ToasterBox`, with background image and text customization can be created
``TB_COMPLEX`` 0x2 `ToasterBoxes` with different degree of complexity can be created. You can add as many controls as you want, provided that you call the :meth:`~ToasterBoxWindow.AddPanel` method and pass to it a dummy frame and a :class:`Panel`.
====================== ======= ================================
:param `windowstyle`: this parameter influences the visual appearance of
:class:`ToasterBoxWindow`, and can be one of the following styles:
====================== ========== ================================
Window Style Hex Value Description
====================== ========== ================================
``TB_DEFAULT_STYLE`` 0x2008002 Default window style for :class:`ToasterBox`, with no caption nor close box.
``TB_CAPTION`` 0x22009806 :class:`ToasterBox` will have a caption, with the possibility to set a title for the :class:`ToasterBox` frame, and a close box.
====================== ========== ================================
:param `closingstyle`: the closing style for :class:`ToasterBoxWindow`. Can be one of the
following bits:
==================== =========== ==================================================
Closing Styles Hex Value Description
==================== =========== ==================================================
``TB_ONTIME`` 0x1 :class:`ToasterBox` will close after a specified amount of time.
``TB_ONCLICK`` 0x2 :class:`ToasterBox` can be closed by clicking anywhere on the :class:`ToasterBox` frame.
==================== =========== ==================================================
:param `scrollType`: the scrolling direction for :class:`ToasterBoxWindow`. Can be one of the
following bits:
==================== =========== ==================================================
Scroll Styles Hex Value Description
==================== =========== ==================================================
``TB_SCR_TYPE_UD`` 0x1 :class:`ToasterBox` will scroll from up to down
``TB_SCR_TYPE_DU`` 0x2 :class:`ToasterBox` will scroll from down to up
``TB_SCR_TYPE_FADE`` 0x4 :class:`ToasterBox` will fade in/out (without scrolling).
==================== =========== ==================================================
"""
wx.Frame.__init__(self, parent, wx.ID_ANY, "window", wx.DefaultPosition,
wx.DefaultSize, style=windowstyle | wx.CLIP_CHILDREN)
self._starttime = wx.GetLocalTime()
self._parent2 = parent2
self._parent = parent
self._sleeptime = 10
self._step = 4
self._pausetime = 1700
self._textcolour = wx.BLACK
self._popuptext = "Change Me!"
# the size we want the dialog to be
framesize = wx.Size(150, 170)
self._count = 1
self._tbstyle = tbstyle
self._windowstyle = windowstyle
self._closingstyle = closingstyle
self._backgroundcolour = wx.WHITE
if tbstyle == TB_COMPLEX:
self.sizer = wx.BoxSizer(wx.VERTICAL)
else:
self._staticbitmap = None
if self._windowstyle == TB_CAPTION:
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.SetTitle("")
if scrollType == TB_SCR_TYPE_FADE and not self.CanSetTransparent():
import warnings
warnings.warn("The style ``TB_SCR_TYPE_FADE`` is not supported on this platform.")
scrollType = TB_SCR_TYPE_DU
self._scrollType = scrollType
if self._closingstyle & TB_ONCLICK and self._windowstyle != TB_CAPTION:
self.Bind(wx.EVT_LEFT_DOWN, self.OnMouseDown)
self._bottomright = wx.Point(wx.GetDisplaySize().GetWidth(),
wx.GetDisplaySize().GetHeight())
self.SetDimensions(self._bottomright.x, self._bottomright.y,
framesize.GetWidth(), framesize.GetHeight())
self._scrollTimer = wx.Timer(self, -1)
self._alphaTimer = wx.Timer(self, -1)
self.Bind(wx.EVT_TIMER, self.OnScrollTimer, self._scrollTimer)
self.Bind(wx.EVT_TIMER, self.AlphaCycle, self._alphaTimer)
if not self._tbstyle & TB_COMPLEX:
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
def OnClose(self, event):
"""
Handles the ``wx.EVT_CLOSE`` event for :class:`ToasterBoxWindow`.
:param `event`: a :class:`CloseEvent` event to be processed.
"""
self.NotifyTimer(None)
event.Skip()
def OnMouseDown(self, event):
"""
Handles the ``wx.EVT_LEFT_DOWN`` event for :class:`ToasterBoxWindow`.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
self.NotifyTimer(None)
event.Skip()
def SetPopupBitmap(self, bitmap=None):
"""
Sets the :class:`ToasterBox` background image.
:param `bitmap`: a valid :class:`Bitmap` object. If defaulted to ``None``, then
no background bitmap is used.
:note: Use this method only for a :class:`ToasterBox` created with the ``TB_SIMPLE`` style.
"""
if bitmap is None:
self._staticbitmap = None
else:
bitmap = bitmap.ConvertToImage()
xsize, ysize = self.GetSize()
bitmap = bitmap.Scale(xsize, ysize)
self._staticbitmap = bitmap.ConvertToBitmap()
def SetPopupSize(self, size):
"""
Sets the :class:`ToasterBox` size.
:param `size`: the new control size, an instance of :class:`Size`.
"""
self.SetDimensions(self._bottomright.x, self._bottomright.y, size[0], size[1])
def SetPopupPosition(self, pos):
"""
Sets the :class:`ToasterBox` position on screen.
:param `pos`: the widget position, an instance of :class:`Point`.
"""
self._bottomright = wx.Point(pos[0] + self.GetSize().GetWidth(),
pos[1] + self.GetSize().GetHeight())
self._dialogtop = pos
def SetPopupPositionByInt(self, pos):
"""
Sets the :class:`ToasterBox` position on screen, at one of the screen corners.
:param `pos`: an integer specifying the screen corner, namely:
============= ========================================
Corner Number Position
============= ========================================
0 Top left screen corner
1 Top right screen corner
2 Bottom left screen corner
3 Bottom right screen corner
============= ========================================
"""
w, h = wx.GetDisplaySize()
self._bottomright = wx.Point(w, h)
# top left
if pos == 0:
popupposition = wx.Point(0, 0)
# top right
elif pos == 1:
popupposition = wx.Point(w - self._popupsize[0], 0)
# bottom left
elif pos == 2:
popupposition = wx.Point(0, h - self._popupsize[1])
# bottom right
elif pos == 3:
popupposition = wx.Point(self._bottomright.x - self._popupsize[0],
self._bottomright.y - self._popupsize[1])
self._bottomright = wx.Point(popupposition.x + self._popupsize[0],
popupposition.y + self._popupsize[1])
self._dialogtop = popupposition
def SetPopupPauseTime(self, pausetime):
"""
Sets the time after which the :class:`ToasterBox` is destroyed (linger).
:param `pausetime`: the delay after which the control is destroyed, in seconds.
"""
self._pausetime = pausetime
def SetPopupScrollSpeed(self, speed):
"""
Sets the :class:`ToasterBox` scroll speed.
:param `speed`: it is the pause time (in milliseconds) for every step in the
:meth:`~ToasterBoxWindow.ScrollUp` method.
"""
self._sleeptime = speed
def AddPanel(self, panel):
"""
Adds a panel to the :class:`ToasterBox`.
:param `panel`: an instance of :class:`Window`.
:note: Use this method only for a :class:`ToasterBox` created with the ``TB_COMPLEX`` style.
"""
if not self._tbstyle & TB_COMPLEX:
raise Exception("\nERROR: Panel Can Not Be Added When Using TB_SIMPLE ToasterBox Style")
self.sizer.Add(panel, 1, wx.EXPAND)
self.SetSizer(self.sizer)
self.Layout()
if self._closingstyle & TB_ONCLICK and self._windowstyle != TB_CAPTION:
panel.Bind(wx.EVT_LEFT_DOWN, self.OnMouseDown)
def SetPopupText(self, text):
"""
Sets the :class:`ToasterBox` text label.
:param `text`: the widget label.
:note: Use this method only for a :class:`ToasterBox` created with the ``TB_SIMPLE`` style.
"""
self._popuptext = text
def SetPopupTextFont(self, font):
"""
Sets the :class:`ToasterBox` text font.
:param `colour`: a valid :class:`Colour` object. If defaulted to ``None``, then
a simple generic font will be generated.
:note: Use this method only for a :class:`ToasterBox` created with the ``TB_SIMPLE`` style.
"""
self._textfont = font
def GetPopupText(self):
"""
Returns the :class:`ToasterBox` text.
:note: Use this method only for a :class:`ToasterBox` created with the ``TB_SIMPLE`` style.
"""
return self._popuptext
def Play(self):
""" Creates the :class:`ToasterBoxWindow`, that does all the job. """
# do some checks to make sure this window is valid
if self._bottomright.x < 1 or self._bottomright.y < 1:
return False
if self.GetSize().GetWidth() < 50 or self.GetSize().GetWidth() < 50:
# toasterbox launches into a endless loop for some reason
# when you try to make the window too small.
return False
self._direction = wx.UP
self.SetupPositions()
self.ScrollUp()
timerid = wx.NewId()
self.showtime = wx.Timer(self, timerid)
self.showtime.Start(self._pausetime)
self.Bind(wx.EVT_TIMER, self.NotifyTimer, id=timerid)
return True
def NotifyTimer(self, event):
""" Hides gradually the :class:`ToasterBoxWindow`. """
if self._scrollType != TB_SCR_TYPE_FADE:
self.showtime.Stop()
del self.showtime
self._direction = wx.DOWN
self.SetupPositions()
self.ScrollDown()
def SetPopupBackgroundColour(self, colour):
"""
Sets the :class:`ToasterBox` background colour.
:param `colour`: a valid :class:`Colour` object. If defaulted to ``None``, then
the background colour will be white.
:note: Use this method only for a :class:`ToasterBox` created with the ``TB_SIMPLE`` style.
"""
self.SetBackgroundColour(colour)
self._backgroundcolour = colour
def SetPopupTextColour(self, colour):
"""
Sets the :class:`ToasterBox` foreground colour.
:param `colour`: a valid :class:`Colour` object. If defaulted to ``None``, then
the background colour will be black.
:note: Use this method only for a :class:`ToasterBox` created with the ``TB_SIMPLE`` style.
"""
self._textcolour = colour
def SetUseFocus(self, focus, originalfocus):
"""
If `focus` is ``True``, Instructs :class:`ToasterBoxWindow` to steal the focus from the
parent application, otherwise it returns the focus to the original owner.
:param `focus`: ``True`` to set the focus on :class:`ToasterBoxWindow`, ``False`` to
return it to the original owner;
:param `originalfocus`: an instance of :class:`Window`, representing a pointer to
the window which originally had the focus
"""
self._usefocus = focus
self._originalfocus = originalfocus
def OnScrollTimer(self, event):
"""
Handles the ``wx.EVT_TIMER`` event for :class:`ToasterBoxWindow` scrolling up/down.
:param `event`: a :class:`TimerEvent` event to be processed.
"""
if self._direction == wx.UP:
self.TearUp()
else:
self.TearDown()
def TearUp(self):
""" Scrolls the :class:`ToasterBox` up, which means gradually showing it. """
self._windowsize = self._windowsize + self._step
step = self._currentStep
if step < self._dialogtop[1]:
step = self._dialogtop[1]
# checking the type of the scroll (from up to down or from down to up)
if self._scrollType == TB_SCR_TYPE_UD:
dimY = self._dialogtop[1]
elif self._scrollType == TB_SCR_TYPE_DU:
dimY = step
self.SetDimensions(self._dialogtop[0], dimY, self.GetSize().GetWidth(), self._windowsize)
self.Refresh(False)
self._currentStep += self._scrollStep
if self._currentStep not in range(self._start, self._stop, self._scrollStep):
self._scrollTimer.Stop()
self.Update()
if self._tbstyle == TB_SIMPLE:
self.DrawText()
if self._usefocus:
self.SetFocus()
else:
self._originalfocus.SetFocus()
def TearDown(self):
""" Scrolls the :class:`ToasterBox` down, which means gradually hiding it. """
self._windowsize = self._windowsize - self._step
step = self._currentStep
if step > self._bottomright.y:
step = self._bottomright.y
if self._windowsize > 0:
# checking the type of the scroll (from up to down or from down to up)
if self._scrollType == TB_SCR_TYPE_UD:
dimY = self._dialogtop[1]
elif self._scrollType == TB_SCR_TYPE_DU:
dimY = step
self.SetDimensions(self._dialogtop[0], dimY,
self.GetSize().GetWidth(), self._windowsize)
self.Update()
self.Refresh()
self._currentStep += self._scrollStep
else:
self._scrollTimer.Stop()
self.Hide()
if self._parent2:
self._parent2.Notify()
def SetupPositions(self):
""" Sets up the position, size and scrolling step for :class:`ToasterBoxWindow`. """
if self._scrollType == TB_SCR_TYPE_FADE:
self.SetPosition(wx.Point(*self._dialogtop))
return
if self._direction == wx.UP:
# walk the Y value up in a raise motion
self._xpos = self.GetPosition().x
self._ypos = self._bottomright[1]
self._windowsize = 0
# checking the type of the scroll (from up to down or from down to up)
if self._scrollType == TB_SCR_TYPE_UD:
self._start = self._dialogtop[1]
self._stop = self._ypos
self._scrollStep = self._step
elif self._scrollType == TB_SCR_TYPE_DU:
self._start = self._ypos
self._stop = self._dialogtop[1]
self._scrollStep = -self._step
else:
# walk down the Y value
self._windowsize = self.GetSize().GetHeight()
# checking the type of the scroll (from up to down or from down to up)
if self._scrollType == TB_SCR_TYPE_UD:
self._start = self._bottomright.y
self._stop = self._dialogtop[1]
self._scrollStep = -self._step
elif self._scrollType == TB_SCR_TYPE_DU:
self._start = self._dialogtop[1]
self._stop = self._bottomright.y
self._scrollStep = self._step
self._currentStep = self._start
def ScrollUp(self):
""" Scrolls the :class:`ToasterBox` up, which means gradually showing it. """
if self._scrollType == TB_SCR_TYPE_FADE:
self._amount = 0
self._delta = 5
self.SetSize(self.GetSize())
self._alphaTimer.Start(self._sleeptime)
else:
self.Show(True)
self._scrollTimer.Start(self._sleeptime)
def ScrollDown(self):
""" Scrolls the :class:`ToasterBox` down, which means gradually hiding it. """
if self._scrollType == TB_SCR_TYPE_FADE:
self._amount = 255
self._delta = -5
self._alphaTimer.Start(self._sleeptime)
else:
self._scrollTimer.Start(self._sleeptime)
def OnPaint(self, event):
"""
Handles the ``wx.EVT_PAINT`` event for :class:`ToasterBoxWindow`.
:param `event`: a :class:`PaintEvent` event to be processed.
:note: This event is handled and processed only if the style ``TB_SIMPLE`` is
given to :class:`ToasterBox`.
"""
dc = wx.AutoBufferedPaintDC(self)
self.DrawText(dc)
def DrawText(self, dc=None):
"""
Draws the text label for a :class:`ToasterBox` with ``TB_SIMPLE`` style set.
:param `dc`: an instance of :class:`DC`. If defaulted to ``None``, a :class:`ClientDC`
will be created on the fly.
"""
if dc is None:
dc = wx.ClientDC(self)
dc.SetBackground(wx.Brush(self._backgroundcolour))
dc.Clear()
if self._staticbitmap:
dc.DrawBitmap(self._staticbitmap, 0, 0)
dc.SetFont(self._textfont)
dc.SetTextForeground(self._textcolour)
if not hasattr(self, "text_coords"):
self._getTextCoords(dc)
dc.DrawTextList(*self.text_coords)
def AlphaCycle(self, event):
"""
Handles the ``wx.EVT_TIMER`` event for :class:`ToasterBoxWindow`.
:param `event`: a :class:`TimerEvent` event to be processed.
"""
# Increase (or decrease) the alpha channel
self._amount += self._delta
if self._tbstyle == TB_SIMPLE:
self.Refresh(False)
if self._amount > 255 or self._amount < 0:
# We're done, stop the timer
self._alphaTimer.Stop()
if self._amount < 0:
self.Hide()
if self._parent2:
self._parent2.Notify()
elif self._amount > 255:
if self._usefocus:
self.SetFocus()
else:
self._originalfocus.SetFocus()
return
# Make the ToasterBoxWindow more or less transparent
self.MakeWindowTransparent(self._amount)
if not self.IsShown():
self.Show()
def MakeWindowTransparent(self, amount):
"""
Makes the :class:`ToasterBoxWindow` window transparent.
:param `amount`: the alpha channel value.
"""
if not self.CanSetTransparent():
return
self.SetTransparent(amount)
def _getTextCoords(self, dc):
"""
Draw the user specified text.
:param `dc`: an instance of :class:`DC`.
:note: Use this method only for a :class:`ToasterBox` created with the ``TB_SIMPLE`` style.
"""
# border from sides and top to text (in pixels)
border = 7
# how much space between text lines
textPadding = 2
pText = self.GetPopupText()
max_len = len(pText)
tw, th = self._parent2._popupsize
if self._windowstyle == TB_CAPTION:
th = th - 20
while 1:
lines = textwrap.wrap(pText, max_len)
for line in lines:
w, h = dc.GetTextExtent(line)
if w > tw - border * 2:
max_len -= 1
break
else:
break
fh = 0
for line in lines:
w, h = dc.GetTextExtent(line)
fh += h + textPadding
y = (th - fh) / 2; coords = []
for line in lines:
w, h = dc.GetTextExtent(line)
x = (tw - w) / 2
coords.append((x, y))
y += h + textPadding
self.text_coords = (lines, coords)
|
mit
| -4,192,503,519,830,455,000
| 33.077385
| 279
| 0.525103
| false
| 4.279377
| false
| false
| false
|
funkyfuture/deck-chores
|
deck_chores/indexes.py
|
1
|
2095
|
from functools import lru_cache
from types import MappingProxyType
from typing import Dict, Tuple
from deck_chores.config import cfg, CONTAINER_CACHE_SIZE
from deck_chores.utils import log
####
@lru_cache(maxsize=CONTAINER_CACHE_SIZE)
def container_name(container_id: str) -> str:
return cfg.client.containers.get(container_id).name
####
_service_locks_by_container_id: Dict[str, Tuple[str, ...]] = {}
service_locks_by_container_id = MappingProxyType(_service_locks_by_container_id)
_service_locks_by_service_id: Dict[Tuple[str, ...], str] = {}
service_locks_by_service_id = MappingProxyType(_service_locks_by_service_id)
def lock_service(service_id: Tuple[str, ...], container_id: str):
assert service_id not in service_locks_by_service_id
_service_locks_by_service_id[service_id] = container_id
assert container_id not in service_locks_by_container_id
_service_locks_by_container_id[container_id] = service_id
log.debug(f"Added lock for service {service_id} on container {container_id}.")
def reassign_service_lock(old_container_id: str, new_container_id: str):
service_id = _service_locks_by_container_id.pop(old_container_id)
assert old_container_id not in service_locks_by_container_id
assert new_container_id not in service_locks_by_container_id
_service_locks_by_container_id[new_container_id] = service_id
assert service_id in service_locks_by_service_id
_service_locks_by_service_id[service_id] = new_container_id
log.debug(
f"Reassigned lock for service {service_id} from container {old_container_id} "
f"to {new_container_id}."
)
def unlock_service(container_id: str):
service_id = _service_locks_by_container_id.pop(container_id, None)
if service_id is None:
return
_service_locks_by_service_id.pop(service_id)
log.debug(f"Removed lock for service {service_id} on container {container_id}.")
__all__ = (
"service_locks_by_container_id",
"service_locks_by_service_id",
lock_service.__name__,
reassign_service_lock.__name__,
unlock_service.__name__,
)
|
isc
| 137,992,603,129,664,260
| 33.344262
| 86
| 0.71074
| false
| 3.193598
| false
| false
| false
|
Tim-Erwin/sanic
|
sanic/router.py
|
1
|
13870
|
import re
from collections import defaultdict, namedtuple
from collections.abc import Iterable
from functools import lru_cache
from sanic.exceptions import NotFound, InvalidUsage
from sanic.views import CompositionView
Route = namedtuple(
'Route',
['handler', 'methods', 'pattern', 'parameters', 'name', 'uri'])
Parameter = namedtuple('Parameter', ['name', 'cast'])
REGEX_TYPES = {
'string': (str, r'[^/]+'),
'int': (int, r'\d+'),
'number': (float, r'[0-9\\.]+'),
'alpha': (str, r'[A-Za-z]+'),
'path': (str, r'[^/].*?'),
}
ROUTER_CACHE_SIZE = 1024
def url_hash(url):
return url.count('/')
class RouteExists(Exception):
pass
class RouteDoesNotExist(Exception):
pass
class Router:
"""Router supports basic routing with parameters and method checks
Usage:
.. code-block:: python
@sanic.route('/my/url/<my_param>', methods=['GET', 'POST', ...])
def my_route(request, my_param):
do stuff...
or
.. code-block:: python
@sanic.route('/my/url/<my_param:my_type>', methods['GET', 'POST', ...])
def my_route_with_type(request, my_param: my_type):
do stuff...
Parameters will be passed as keyword arguments to the request handling
function. Provided parameters can also have a type by appending :type to
the <parameter>. Given parameter must be able to be type-casted to this.
If no type is provided, a string is expected. A regular expression can
also be passed in as the type. The argument given to the function will
always be a string, independent of the type.
"""
routes_static = None
routes_dynamic = None
routes_always_check = None
parameter_pattern = re.compile(r'<(.+?)>')
def __init__(self):
self.routes_all = {}
self.routes_names = {}
self.routes_static = {}
self.routes_dynamic = defaultdict(list)
self.routes_always_check = []
self.hosts = set()
@classmethod
def parse_parameter_string(cls, parameter_string):
"""Parse a parameter string into its constituent name, type, and
pattern
For example::
parse_parameter_string('<param_one:[A-z]>')` ->
('param_one', str, '[A-z]')
:param parameter_string: String to parse
:return: tuple containing
(parameter_name, parameter_type, parameter_pattern)
"""
# We could receive NAME or NAME:PATTERN
name = parameter_string
pattern = 'string'
if ':' in parameter_string:
name, pattern = parameter_string.split(':', 1)
default = (str, pattern)
# Pull from pre-configured types
_type, pattern = REGEX_TYPES.get(pattern, default)
return name, _type, pattern
def add(self, uri, methods, handler, host=None, strict_slashes=False,
version=None, name=None):
"""Add a handler to the route list
:param uri: path to match
:param methods: sequence of accepted method names. If none are
provided, any method is allowed
:param handler: request handler function.
When executed, it should provide a response object.
:param strict_slashes: strict to trailing slash
:param version: current version of the route or blueprint. See
docs for further details.
:return: Nothing
"""
if version is not None:
if uri.startswith('/'):
uri = "/".join(["/v{}".format(str(version)), uri[1:]])
else:
uri = "/".join(["/v{}".format(str(version)), uri])
# add regular version
self._add(uri, methods, handler, host, name)
if strict_slashes:
return
# Add versions with and without trailing /
slash_is_missing = (
not uri[-1] == '/' and not self.routes_all.get(uri + '/', False)
)
without_slash_is_missing = (
uri[-1] == '/' and not
self.routes_all.get(uri[:-1], False) and not
uri == '/'
)
# add version with trailing slash
if slash_is_missing:
self._add(uri + '/', methods, handler, host, name)
# add version without trailing slash
elif without_slash_is_missing:
self._add(uri[:-1], methods, handler, host, name)
def _add(self, uri, methods, handler, host=None, name=None):
"""Add a handler to the route list
:param uri: path to match
:param methods: sequence of accepted method names. If none are
provided, any method is allowed
:param handler: request handler function.
When executed, it should provide a response object.
:return: Nothing
"""
if host is not None:
if isinstance(host, str):
uri = host + uri
self.hosts.add(host)
else:
if not isinstance(host, Iterable):
raise ValueError("Expected either string or Iterable of "
"host strings, not {!r}".format(host))
for host_ in host:
self.add(uri, methods, handler, host_, name)
return
# Dict for faster lookups of if method allowed
if methods:
methods = frozenset(methods)
parameters = []
properties = {"unhashable": None}
def add_parameter(match):
name = match.group(1)
name, _type, pattern = self.parse_parameter_string(name)
parameter = Parameter(
name=name, cast=_type)
parameters.append(parameter)
# Mark the whole route as unhashable if it has the hash key in it
if re.search(r'(^|[^^]){1}/', pattern):
properties['unhashable'] = True
# Mark the route as unhashable if it matches the hash key
elif re.search(r'/', pattern):
properties['unhashable'] = True
return '({})'.format(pattern)
pattern_string = re.sub(self.parameter_pattern, add_parameter, uri)
pattern = re.compile(r'^{}$'.format(pattern_string))
def merge_route(route, methods, handler):
# merge to the existing route when possible.
if not route.methods or not methods:
# method-unspecified routes are not mergeable.
raise RouteExists(
"Route already registered: {}".format(uri))
elif route.methods.intersection(methods):
# already existing method is not overloadable.
duplicated = methods.intersection(route.methods)
raise RouteExists(
"Route already registered: {} [{}]".format(
uri, ','.join(list(duplicated))))
if isinstance(route.handler, CompositionView):
view = route.handler
else:
view = CompositionView()
view.add(route.methods, route.handler)
view.add(methods, handler)
route = route._replace(
handler=view, methods=methods.union(route.methods))
return route
if parameters:
# TODO: This is too complex, we need to reduce the complexity
if properties['unhashable']:
routes_to_check = self.routes_always_check
ndx, route = self.check_dynamic_route_exists(
pattern, routes_to_check)
else:
routes_to_check = self.routes_dynamic[url_hash(uri)]
ndx, route = self.check_dynamic_route_exists(
pattern, routes_to_check)
if ndx != -1:
# Pop the ndx of the route, no dups of the same route
routes_to_check.pop(ndx)
else:
route = self.routes_all.get(uri)
# prefix the handler name with the blueprint name
# if available
if hasattr(handler, '__blueprintname__'):
handler_name = '{}.{}'.format(
handler.__blueprintname__, name or handler.__name__)
else:
handler_name = name or getattr(handler, '__name__', None)
if route:
route = merge_route(route, methods, handler)
else:
route = Route(
handler=handler, methods=methods, pattern=pattern,
parameters=parameters, name=handler_name, uri=uri)
self.routes_all[uri] = route
pairs = self.routes_names.get(handler_name)
if not (pairs and (pairs[0] + '/' == uri or uri + '/' == pairs[0])):
self.routes_names[handler_name] = (uri, route)
if properties['unhashable']:
self.routes_always_check.append(route)
elif parameters:
self.routes_dynamic[url_hash(uri)].append(route)
else:
self.routes_static[uri] = route
@staticmethod
def check_dynamic_route_exists(pattern, routes_to_check):
for ndx, route in enumerate(routes_to_check):
if route.pattern == pattern:
return ndx, route
else:
return -1, None
def remove(self, uri, clean_cache=True, host=None):
if host is not None:
uri = host + uri
try:
route = self.routes_all.pop(uri)
for handler_name, pairs in self.routes_names.items():
if pairs[0] == uri:
self.routes_names.pop(handler_name)
break
except KeyError:
raise RouteDoesNotExist("Route was not registered: {}".format(uri))
if route in self.routes_always_check:
self.routes_always_check.remove(route)
elif url_hash(uri) in self.routes_dynamic \
and route in self.routes_dynamic[url_hash(uri)]:
self.routes_dynamic[url_hash(uri)].remove(route)
else:
self.routes_static.pop(uri)
if clean_cache:
self._get.cache_clear()
@lru_cache(maxsize=ROUTER_CACHE_SIZE)
def find_route_by_view_name(self, view_name):
"""Find a route in the router based on the specified view name.
:param view_name: string of view name to search by
:return: tuple containing (uri, Route)
"""
if not view_name:
return (None, None)
return self.routes_names.get(view_name, (None, None))
def get(self, request):
"""Get a request handler based on the URL of the request, or raises an
error
:param request: Request object
:return: handler, arguments, keyword arguments
"""
# No virtual hosts specified; default behavior
if not self.hosts:
return self._get(request.path, request.method, '')
# virtual hosts specified; try to match route to the host header
try:
return self._get(request.path, request.method,
request.headers.get("Host", ''))
# try default hosts
except NotFound:
return self._get(request.path, request.method, '')
@lru_cache(maxsize=ROUTER_CACHE_SIZE)
def _get(self, url, method, host):
"""Get a request handler based on the URL of the request, or raises an
error. Internal method for caching.
:param url: request URL
:param method: request method
:return: handler, arguments, keyword arguments
"""
url = host + url
# Check against known static routes
route = self.routes_static.get(url)
method_not_supported = InvalidUsage(
'Method {} not allowed for URL {}'.format(
method, url), status_code=405)
if route:
if route.methods and method not in route.methods:
raise method_not_supported
match = route.pattern.match(url)
else:
route_found = False
# Move on to testing all regex routes
for route in self.routes_dynamic[url_hash(url)]:
match = route.pattern.match(url)
route_found |= match is not None
# Do early method checking
if match and method in route.methods:
break
else:
# Lastly, check against all regex routes that cannot be hashed
for route in self.routes_always_check:
match = route.pattern.match(url)
route_found |= match is not None
# Do early method checking
if match and method in route.methods:
break
else:
# Route was found but the methods didn't match
if route_found:
raise method_not_supported
raise NotFound('Requested URL {} not found'.format(url))
kwargs = {p.name: p.cast(value)
for value, p
in zip(match.groups(1), route.parameters)}
route_handler = route.handler
if hasattr(route_handler, 'handlers'):
route_handler = route_handler.handlers[method]
return route_handler, [], kwargs, route.uri
def is_stream_handler(self, request):
""" Handler for request is stream or not.
:param request: Request object
:return: bool
"""
try:
handler = self.get(request)[0]
except (NotFound, InvalidUsage):
return False
if (hasattr(handler, 'view_class') and
hasattr(handler.view_class, request.method.lower())):
handler = getattr(handler.view_class, request.method.lower())
return hasattr(handler, 'is_stream')
|
mit
| 1,279,275,129,031,850,000
| 35.214099
| 79
| 0.559697
| false
| 4.446938
| false
| false
| false
|
StuartAxelOwen/join
|
join/_join_funcs.py
|
1
|
2328
|
from functools import partial
__author__ = 'stuart'
def get_object_attrs(obj):
if hasattr(obj, '__dict__'):
return obj.__dict__
elif hasattr(obj, '__slots__'):
return {key: getattr(obj, key) for key in obj.__slots__}
else:
return {}
class Union(object):
def __init__(self, attributes):
if isinstance(attributes, dict):
for name, value in attributes.items():
setattr(self, name, value)
else:
for name, value in attributes:
setattr(self, name, value)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.__dict__)
def tuple_join(left, right):
"""
Returns a tuple of the joined objects
>>> tuple_join(1, '2')
(1, '2')
:param left: left object to be joined with right
:param right: right object to be joined with left
:return: tuple containing both join parents
"""
return left, right
def union_join(left, right, left_as='left', right_as='right'):
"""
Join function truest to the SQL style join. Merges both objects together in a sum-type,
saving references to each parent in ``left`` and ``right`` attributes.
>>> Dog = namedtuple('Dog', ['name', 'woof', 'weight'])
>>> dog = Dog('gatsby', 'Ruff!', 15)
>>> Cat = namedtuple('Cat', ['name', 'meow', 'weight'])
>>> cat = Cat('pleo', 'roooowwwr', 12)
>>> catdog = union_join(cat, dog, 'cat', 'dog')
>>> catdog.name
pleo
>>> catdog.woof
Ruff!
>>> catdog.dog.name
gatsby
:param left: left object to be joined with right
:param right: right object to be joined with left
:return: joined object with attrs/methods from both parents available
"""
attrs = {}
attrs.update(get_object_attrs(right))
attrs.update(get_object_attrs(left))
attrs[left_as] = left
attrs[right_as] = right
if isinstance(left, dict) and isinstance(right, dict):
return attrs
else:
joined_class = type(left.__class__.__name__ + right.__class__.__name__, (Union,),
{})
return joined_class(attrs)
def make_union_join(left_as='left', right_as='right'):
return partial(union_join, left_as=left_as, right_as=right_as)
|
mit
| -4,050,428,239,922,643,500
| 29.233766
| 92
| 0.572165
| false
| 3.742765
| false
| false
| false
|
soylentdeen/Graffity
|
src/ErrorBudgetAnalysis.py
|
1
|
7600
|
import Graffity
import numpy
import scipy
import matplotlib.pyplot as pyplot
wave = 632.8
ciao = Graffity.WFS(wavelength=1800.0)
var = numpy.array([False, False, True, True, True])
offsets = []
x = 0
for v in var:
if v:
offsets.append(x)
x+= 1
else:
offsets.append(0)
zern = [0.0, 0.0, 0.0, 0.0, 0.0]
pupil = [0.0, 0.0]
actPoke = numpy.zeros(60, dtype=numpy.float32)
derotAngle = 0.00
clockingAngle = 0.0
# Take the flat-wavefront image
ciao.setupInstrument(zern, pupil, actPoke, derotAngle, clockingAngle)
ciao.expose()
if var[0]:
f0 = pyplot.figure(0)
f0.clear()
ax0 = f0.add_axes([0.1, 0.1, 0.8, 0.8])
if var[1]:
f1 = pyplot.figure(1)
f1.clear()
ax1 = f1.add_axes([0.1, 0.1, 0.8, 0.8])
if var[2]:
f2 = pyplot.figure(2)
f2.clear()
ax2 = f2.add_axes([0.1, 0.1, 0.8, 0.8])
if var[3]:
f3 = pyplot.figure(3)
f3.clear()
ax3 = f3.add_axes([0.1, 0.1, 0.8, 0.8])
if var[4]:
f4 = pyplot.figure(4)
f4.clear()
ax4 = f4.add_axes([0.1, 0.1, 0.8, 0.8])
f5 = pyplot.figure(5)
f5.clear()
f6 = pyplot.figure(6)
f6.clear()
ax5 = f5.add_axes([0.1, 0.1, 0.8, 0.8])
ax6 = f6.add_axes([0.1, 0.1, 0.8, 0.8])
wferror = numpy.linspace(-2.0*wave, 2.0*wave, num=14)
clockingAngle = 0.00
for rms in wferror:
print rms
if var[0]:
zern = [rms, 0.0, 0.0, 0.0, 0.0]
ciao.setupInstrument(zern, pupil, actPoke, derotAngle, clockingAngle)
ciao.expose()
if var[1]:
zern = [0.0, rms, 0.0, 0.0, 0.0]
ciao.setupInstrument(zern, pupil, actPoke, derotAngle, clockingAngle)
ciao.expose()
if var[2]:
zern = [0.0, 0.0, rms, 0.0, 0.0]
ciao.setupInstrument(zern, pupil, actPoke, derotAngle, clockingAngle)
ciao.expose()
if var[3]:
zern = [0.0, 0.0, 0.0, rms, 0.0]
ciao.setupInstrument(zern, pupil, actPoke, derotAngle, clockingAngle)
ciao.expose()
if var[4]:
zern = [0.0, 0.0, 0.0, 0.0, rms]
ciao.setupInstrument(zern, pupil, actPoke, derotAngle, clockingAngle)
ciao.expose()
centroids = numpy.array(ciao.centroids)
nvars = len(var[var==True])
flat = centroids[0]
if var[0]:
tip = centroids[[i*nvars+offsets[0]+1 for i in range(len(wferror))]]-flat
if var[1]:
tilt = centroids[[i*nvars+offsets[1]+1 for i in range(len(wferror))]]-flat
if var[2]:
focus = centroids[[i*nvars+offsets[2]+1 for i in range(len(wferror))]]-flat
if var[3]:
astig1 = centroids[[i*nvars+offsets[3]+1 for i in range(len(wferror))]]-flat
if var[4]:
astig2 = centroids[[i*nvars+offsets[4]+1 for i in range(len(wferror))]]-flat
colorMap = pyplot.get_cmap()
colors = [colorMap(i) for i in numpy.linspace(0, 1, len(wferror))]
subapnum = range(68)
rms_x = []
rms_y = []
max_x = []
max_y = []
for i in range(len(wferror)):
rx = []
ry = []
mx = []
my = []
if var[0]:
ax0.plot(subapnum, tip[:,:,0][i], color=colors[i], marker='o')
ax0.plot(subapnum, tip[:,:,1][i], color=colors[i], marker='+')
rx.append(numpy.sqrt(numpy.average(tip[:,:,0][i]**2.0)))
ry.append(numpy.sqrt(numpy.average(tip[:,:,1][i]**2.0)))
mx.append(numpy.max(numpy.abs(tip[:,:,0][i])))
my.append(numpy.max(numpy.abs(tip[:,:,1][i])))
if var[1]:
ax1.plot(subapnum, tilt[:,:,0][i], color=colors[i], marker='o')
ax1.plot(subapnum, tilt[:,:,1][i], color=colors[i], marker='+')
rx.append(numpy.sqrt(numpy.average(tilt[:,:,0][i]**2.0)))
ry.append(numpy.sqrt(numpy.average(tilt[:,:,1][i]**2.0)))
mx.append(numpy.max(numpy.abs(tilt[:,:,0][i])))
my.append(numpy.max(numpy.abs(tilt[:,:,1][i])))
if var[2]:
ax2.plot(subapnum, focus[:,:,0][i], color=colors[i], marker='o')
ax2.plot(subapnum, focus[:,:,1][i], color=colors[i], marker='+')
rx.append(numpy.sqrt(numpy.average(focus[:,:,0][i]**2.0)))
ry.append(numpy.sqrt(numpy.average(focus[:,:,1][i]**2.0)))
mx.append(numpy.max(numpy.abs(focus[:,:,0][i])))
my.append(numpy.max(numpy.abs(focus[:,:,1][i])))
if var[3]:
ax3.plot(subapnum, astig1[:,:,0][i], color=colors[i], marker='o')
ax3.plot(subapnum, astig1[:,:,1][i], color=colors[i], marker='+')
rx.append(numpy.sqrt(numpy.average(astig1[:,:,0][i]**2.0)))
ry.append(numpy.sqrt(numpy.average(astig1[:,:,1][i]**2.0)))
mx.append(numpy.max(numpy.abs(astig1[:,:,0][i])))
my.append(numpy.max(numpy.abs(astig1[:,:,1][i])))
if var[4]:
ax4.plot(subapnum, astig2[:,:,0][i], color=colors[i], marker='o')
ax4.plot(subapnum, astig2[:,:,1][i], color=colors[i], marker='+')
rx.append(numpy.sqrt(numpy.average(astig2[:,:,0][i]**2.0)))
ry.append(numpy.sqrt(numpy.average(astig2[:,:,1][i]**2.0)))
mx.append(numpy.max(numpy.abs(astig2[:,:,0][i])))
my.append(numpy.max(numpy.abs(astig2[:,:,1][i])))
rms_x.append(rx)
rms_y.append(ry)
max_x.append(mx)
max_y.append(my)
rms_x = numpy.array(rms_x).transpose()
rms_y = numpy.array(rms_y).transpose()
max_x = numpy.array(max_x).transpose()
max_y = numpy.array(max_y).transpose()
labels = []
lines = []
if var[0]:
lines.append(ax5.plot(wferror, max_x[offsets[0]], color = 'b', marker = 'o')[0])
ax6.plot(wferror, max_y[offsets[0]], color = 'b', marker = 'o')
labels.append["Tip"]
#ax5.plot(wferror, rms_x[0], color = 'b', marker = '+')
if var[1]:
lines.append(ax5.plot(wferror, max_x[offsets[1]], color = 'g', marker = 'o')[0])
ax6.plot(wferror, max_y[offsets[1]], color = 'g', marker = 'o')
labels.append["Tilt"]
#ax5.plot(wferror, rms_x[1], color = 'g', marker = '+')
if var[2]:
lines.append(ax5.plot(wferror, max_x[offsets[2]], color = 'r', marker = 'o')[0])
ax6.plot(wferror, max_y[offsets[2]], color = 'r', marker = 'o')
labels.append("Focus")
#ax5.plot(wferror, rms_x[2], color = 'r', marker = '+')
if var[3]:
lines.append(ax5.plot(wferror, max_x[offsets[3]], color = 'c', marker = 'o')[0])
ax6.plot(wferror, max_y[offsets[3]], color = 'c', marker = 'o')
labels.append("Astig1")
#ax5.plot(wferror, rms_x[3], color = 'c', marker = '+')
if var[4]:
lines.append(ax5.plot(wferror, max_x[offsets[4]], color = 'm', marker = 'o')[0])
ax6.plot(wferror, max_y[offsets[4]], color = 'm', marker = 'o')
labels.append("Astig2")
#ax5.plot(wferror, rms_x[4], color = 'm', marker = '+')
ax5.set_xlabel("RMS Wavefront Error (nm)")
ax5.set_ylabel("Maximum X Slope (pixels)")
f5.legend(lines, labels)
ax5.set_title('X Slopes')
ax6.set_xlabel("RMS Wavefront Error (nm)")
ax6.set_ylabel("Maximum Y Slope (pixels)")
f6.legend(lines, labels)
ax6.set_title('Y Slopes')
if var[0]:
ax0.set_xlabel("Subaperture Number")
ax0.set_ylabel("Slopes (Pixels)")
ax0.set_title('Tip')
f0.show()
f0.savefig('tip.png')
if var[1]:
ax1.set_xlabel("Subaperture Number")
ax1.set_ylabel("Slopes (Pixels)")
ax1.set_title('Tilt')
f1.show()
f1.savefig('tilt.png')
if var[2]:
ax2.set_xlabel("Subaperture Number")
ax2.set_ylabel("Slopes (Pixels)")
ax2.set_title('Focus')
f2.show()
f2.savefig('focus.png')
if var[3]:
ax3.set_xlabel("Subaperture Number")
ax3.set_ylabel("Slopes (Pixels)")
ax3.set_title('Oblique Astigmatism')
f3.show()
f3.savefig('ObliqAstig.png')
if var[4]:
ax4.set_xlabel("Subaperture Number")
ax4.set_ylabel("Slopes (Pixels)")
ax4.set_title('Vertical Astigmatism')
f4.show()
f4.savefig('VertAstig.png')
f5.show()
f5.savefig('Xerror.png')
f6.show()
f6.savefig('Yerror.png')
|
mit
| -3,690,607,500,366,776,000
| 31.478632
| 84
| 0.589474
| false
| 2.509079
| false
| false
| false
|
jbvsmo/discoder
|
discoder/lib/parse.py
|
1
|
2685
|
# coding: utf-8
""" Copyright (c) 2013 João Bernardo Vianna Oliveira
This file is part of Discoder.
Discoder is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Discoder is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Discoder. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = 'jb'
from discoder.lib import Obj
import re
class ParseError(Exception):
pass
tag = Obj(
# Regex for parsing the markup language generated by ffprobe or avprobe
open = re.compile(r'^\[(\w+)\]$'),
close = re.compile(r'^\[/(\w+)\]$'),
value = re.compile(r'^(\w+)(?::(\w+))?=(.*)$')
)
def probe(text):
"""
Parse multiline text generated by `ffprobe` or `avprobe`
Command line:
ffprobe -v quiet [-show_format] [-show_streams] filename
:type text: str
Input:
------
[TAG]
data_x=1
data_y=2
INFO:data_z=3
[/TAG]
Output:
-------
{'tag': [{'data_x': 1, 'data_y': 2, 'info': {'data_z': 3}}]}
"""
blocks = Obj()
this = None
for i, line in enumerate(text.splitlines()):
if not line.strip():
continue
open_block = tag.open.match(line)
if open_block:
if this is not None:
raise ParseError('Opened block without closing last one: {0}: {1}'.format(i, line))
this = Obj()
name = open_block.group(1).lower()
if name == 'stream':
name += 's' # compatibility with json output
if name != 'format': # "format" only has one element.
blocks.setdefault(name, []).append(this)
else:
blocks[name] = this
else:
if this is None:
raise ParseError("There's no block to insert data or close: {0}: {1}".format(i, line))
if tag.close.match(line):
this = None
else:
name, sub, val = tag.value.match(line).groups()
if not sub:
this[name] = val
else:
attr = this.setdefault(name.lower(), Obj())
attr[sub] = val
return blocks
|
gpl-3.0
| 4,299,707,843,823,941,600
| 30.952381
| 102
| 0.548808
| false
| 4.085236
| false
| false
| false
|
kubow/HAC
|
System/UI74KW.py
|
1
|
2115
|
#!/usr/bin/python3
import os.path
from kivy.resources import resource_add_path
KV_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__)))
resource_add_path(KV_PATH)
print(KV_PATH)
#import kivy
#kivy.require('1.7.1')
from kivy.lang import Builder
Builder.load_file('H808E.kv')
from kivy.app import App
from kivy.properties import ObjectProperty
#from kiwi.uix.scatter import Scatter
from kivy.uix.listview import ListItemButton
#from kivy.adapters.listadapter import ListAdapter
from kivy.uix.boxlayout import BoxLayout
#from kiwi.uix.floatlayout import FloatLayout #good na 3d
from kivy.uix.gridlayout import GridLayout
from kivy.properties import ListProperty, StringProperty
from OS74 import FileSystemObject
class ShowEnc(GridLayout):
main_text = ObjectProperty(None)
folder_list = ListProperty([])
folder_select = StringProperty('Select a folder')
file_list = ListProperty([])
file_select = StringProperty('Select a file')
fldr_lib, file_lib = FileSystemObject().object_read_split()
actual_location = FileSystemObject().path
def multimedia_content(self):
print(self.actual_location)
directory = FileSystemObject(self.actual_location).dir_up(1)
self.fldr_lib, self.file_lib = FileSystemObject(directory).object_read_split()
print(directory)
# clear the lists content
self.file_list.adapter.data[:]
self.folder_list.adapter.data[:]
# append new data
self.file_list.append(self.file_lib)
self.folder_list.append(self.fldr_lib)
def folder_on_select(self, change_value):
self.selected_value = "Selected: {0}".format(change_value.text)
print(self.selected_value)
def file_on_select(self, change_value):
self.selected_value = "Selected: {0}".format(change_value.text)
print(self.selected_value)
def clear(self):
self.main_text.text = ""
self.main_text.focus = True
class MainApp(App):
title = 'H808E'
def build(self):
return ShowEnc()
if __name__ == '__main__':
MainApp().run()
|
unlicense
| -7,743,993,530,130,653,000
| 29.652174
| 86
| 0.689362
| false
| 3.519135
| false
| false
| false
|
PatentBlocker/Motorola_Patent_Citations
|
src/get_citations.py
|
1
|
1772
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 20 16:08:56 2016
@author: Thibault
"""
import pandas as pd
import numpy as np
# Loading the data
data_dir = '../data'
# training and validation sets
train_file = data_dir + '/blocking1114.csv'
# Opening the blocking data
TrainFile = pd.read_csv(train_file, header=None)
TrainFile.columns = ['Application', 'Patent_Blocking']
# Opening the Portfolio database
portf = data_dir + '/SamplePortfolioforBerkeley.csv'
Moto_database = pd.read_csv(portf, sep=',')
# Creating the query
Moto_Patents = np.asarray(Moto_database['Patent #'])
# Returns
def foo(s1):
return "'{}'".format(s1)
def query(table):
query = 'SELECT uspatentcitation.citation_id, uspatentcitation.patent_id FROM uspatentcitation WHERE uspatentcitation.citation_id='
for k in table:
if k != table[-1]:
query += foo(str(k)) + ' OR uspatentapplication.patent_id='
else:
query += foo(str(k))
return query
print(query(Moto_Patents))
# Connecting to the server
"NEED TO CONNECT TO SQL DATABASE USING MySQL"
# Doing the query to get the database
"""
SELECT uspatentcitation.citation_id, uspatentcitation.patent_id
FROM uspatentcitation
WHERE uspatentcitation.citation_id='7046910'
OR uspatentcitation.citation_id='5903133'
OR uspatentcitation.citation_id='8395587'
OR uspatentcitation.citation_id='6408436'
OR uspatentcitation.citation_id='7190956'
OR uspatentcitation.citation_id='6778512'
OR uspatentcitation.citation_id='5794185'
OR uspatentcitation.citation_id='6592696'
OR uspatentcitation.citation_id='8078203'
OR uspatentcitation.citation_id='8229428'
OR uspatentcitation.citation_id='7555696'
OR uspatentcitation.citation_id='5946653'
OR uspatentcitation.citation_id='7675970'
""""
|
bsd-2-clause
| -3,259,559,936,448,086,000
| 22.315789
| 135
| 0.731941
| false
| 2.862682
| false
| false
| false
|
public/python-super3
|
test.py
|
1
|
2905
|
import time
import inspect
import random
from super3 import more_super3 as super3, callable_super3
def super_proxy(self, type):
py_super = super(type, self)
my_super = super3(caller=inspect.currentframe().f_back)
assert py_super.__str__() == my_super.__str__() \
or (py_super.__self_class__ == my_super.__self_class__ and \
py_super.__self__ == my_super.__self__ and \
py_super.__thisclass__ == my_super.__thisclass__)
return my_super
def with_super_result(func):
def decorated(self, *args, **kwargs):
s = super3()
r = getattr(s, func.__name__)()
return func(self, r)
return decorated
class A(object):
def f(self):
return A
class A2(object):
def f(self):
return A2
class A3(A):
@with_super_result
def f(self, super):
return A3
class B(A):
pass
class C(B):
def f(self):
return super_proxy(self, C).f()
class D(C, A2):
def f(self):
return C.f(self)
class E(C, A, A2):
pass
class F(E):
def f(self):
return super_proxy(self, F).f()
class G(F, E, A):
def f(self):
r = super_proxy(self, G).f()
assert r == A
return F.f(self)
class H(G):
@with_super_result
def f(self, super):
return H
class I(H):
@with_super_result
def f(self, super):
assert super == H
return I
class J(A):
def f(self):
r = callable_super3()()
return r
class K(G):
def f(self):
return callable_super3()()
classes = [J, K, A3, I, H, A, A2, B, C, D, E, F, G]
random.shuffle(classes)
print(classes)
for cls in classes:
print((cls, cls().f()))
def speed():
class A(object):
def f(self):
return A, self
class myImplicitB(A):
def f(self):
return super3().f()
class myExplicitB(A):
def f(self):
return super3(myExplicitB, self).f()
class pyB(A):
def f(self):
return super(pyB, self).f()
class myDecoratedB(A):
@with_super_result
def f(self, result):
return self
def super_time(cls):
b = cls()
N = 10000
U = 10
s = time.time()
for i in range(1, N):
b.f()
b.f()
b.f()
b.f()
b.f()
b.f()
b.f()
b.f()
b.f()
b.f()
e = time.time()
print((e-s), (e-s)/(N*U))
return (e-s), N*U
py = super_time(pyB)
myI = super_time(myImplicitB)
myE = super_time(myExplicitB)
myD = super_time(myDecoratedB)
print("implicit is", myI[0]/py[0], "times slower than normal super()")
print("explicit is", myE[0]/py[0], "times slower than normal super()")
print("decorated is", myD[0]/py[0], "times slower than normal super()")
speed()
|
lgpl-3.0
| 1,098,780,801,679,328,900
| 19.034483
| 75
| 0.512909
| false
| 3.150759
| false
| false
| false
|
GarethPW/Scratch-Comment-Viewer
|
old/v2.0.0/scratchcomments.py
|
1
|
4830
|
'''
Scratch Project Comments Parser v1.0.0
Created for use with SCV Server v2.0.0
Created by Scratch user, Gaza101.
Licensed under GNU General Public License v3.
www.garethpw.net
'''
from HTMLParser import HTMLParser
from htmlentitydefs import name2codepoint
from urllib2 import urlopen
class CommentsParser(HTMLParser):
def __init__(self,emap={}):
self.emap = emap
self.out = []
self.nest = []
self.comments = str()
def aDict(self,a): #Converts attrs into dict format for easier parsing.
d = {} # e.g. [('class', 'example'),('height', '50px')]
for i in a: # becomes {'class':('example',),'height':('50px',)}
if i[0] in d:
d[i[0]] += (i[1],)
else:
d[i[0]] = (i[1],)
return d
def isLegal(self,n,r): #Checks the nest based on a set of rules provided.
try: # Rule format: [(#tuple of tag nest - can be any length - starts from root tag),(#level of nest, #attr, #value)]
if ( tuple([i[0] for i in n][:len(r[0])]) == r[0]
and not (False in [(True in [sr[2] in i for i in n[sr[0]][1][sr[1]]]) for sr in r[1:]]) ):
return True
except KeyError:
pass
return False
def isCName(self,n): #Checks if the current nest is valid to be the comment username.
return self.isLegal(n,[ ("li","div","div","div",'a'),
(0,"class","top-level-reply"),
(1,"class","comment"),
(2,"class","info"),
(3,"class","name") ])
def isCBody(self,n): #Checks if the current nest is valid to be the comment body.
return self.isLegal(n,[ ("li","div","div","div"),
(0,"class","top-level-reply"),
(1,"class","comment"),
(2,"class","info"),
(3,"class","content") ])
def handle_starttag(self, tag, attrs):
il = (self.isCName(self.nest),self.isCBody(self.nest))
self.nest.append((tag,self.aDict(attrs)))
if il != (self.isCName(self.nest),self.isCBody(self.nest)): #Check if a new comment username or body has begun.
self.out.append([]) #If so, append new list to output array.
if tag == "img": #If the tag is valid to be an emoticon,
if ( self.isCBody(self.nest)
and self.isLegal(self.nest,[ tuple(),
(-1,"class","easter-egg") ]) ):
try:
self.out[-1].append(self.emap[self.nest[-1][1]['src'][0]]) #Attempt to match with its alias in the emoticon map.
except KeyError:
self.out[-1].append("_undefined_") #If alias not found, refer to it as "_undefined_"
self.nest.pop() #Remove image from nest array since it's most likely without an end tag.
def handle_endtag(self,tag):
if tag != "img": #Ignore img end tags since they will have already been dealt with.
self.nest.pop()
def handle_data(self,data):
if self.isCName(self.nest) or self.isCBody(self.nest): #If we're in valid comment text,
self.out[-1].append(data) #Append text to output.
def handle_entityref(self,name):
if self.isCName(self.nest) or self.isCBody(self.nest): #If we're in valid comment text,
self.out[-1].append(unichr(name2codepoint[name])) #Append text to output.
def handle_charref(self,name):
if self.isCName(self.nest) or self.isCBody(self.nest): #If we're in valid comment text,
self.out[-1].append(unichr(int(name[1:],16) if name[0] == 'x' else int(name))) #Append text to output.
def parse(self,project_id,max_comments=30): #Parses any data given. Data must be complete.
comments = urlopen("https://scratch.mit.edu/site-api/comments/project/"+str(project_id)+'/').read()
if self.comments != comments: #If we haven't already parsed this,
self.comments = comments
self.out = [] #Reinitialise the instance.
self.nest = []
self.reset() #Reset the parser.
self.feed(self.comments) #Feed the parser the data from the comments of the project specified.
self.out = tuple( [{"user": u''.join([u''.join([unichr(ord(c)) for c in m]) for m in self.out[i]]), #Convert parsed data into a more usable format. e.g. {'user','Gaza101','msg':'_meow_'}
"msg": u''.join([u''.join([unichr(ord(c)) for c in m]) for m in self.out[i+1]])[23:-12]} for i in range(0,min(len(self.out),max_comments),2)] )
return self.out #Output parsed data.
|
gpl-3.0
| -5,200,480,149,626,272,000
| 56.5
| 199
| 0.547826
| false
| 3.642534
| false
| false
| false
|
kyubifire/softlayer-python
|
SoftLayer/managers/dns.py
|
1
|
8207
|
"""
SoftLayer.dns
~~~~~~~~~~~~~
DNS Manager/helpers
:license: MIT, see LICENSE for more details.
"""
import time
from SoftLayer import utils
class DNSManager(utils.IdentifierMixin, object):
"""Manage SoftLayer DNS.
See product information here: http://www.softlayer.com/DOMAIN-SERVICES
:param SoftLayer.API.BaseClient client: the client instance
"""
def __init__(self, client):
self.client = client
self.service = self.client['Dns_Domain']
self.record = self.client['Dns_Domain_ResourceRecord']
self.resolvers = [self._get_zone_id_from_name]
def _get_zone_id_from_name(self, name):
"""Return zone ID based on a zone."""
results = self.client['Account'].getDomains(
filter={"domains": {"name": utils.query_filter(name)}})
return [x['id'] for x in results]
def list_zones(self, **kwargs):
"""Retrieve a list of all DNS zones.
:param dict \\*\\*kwargs: response-level options (mask, limit, etc.)
:returns: A list of dictionaries representing the matching zones.
"""
return self.client['Account'].getDomains(**kwargs)
def get_zone(self, zone_id, records=True):
"""Get a zone and its records.
:param zone: the zone name
:returns: A dictionary containing a large amount of information about
the specified zone.
"""
mask = None
if records:
mask = 'resourceRecords'
return self.service.getObject(id=zone_id, mask=mask)
def create_zone(self, zone, serial=None):
"""Create a zone for the specified zone.
:param zone: the zone name to create
:param serial: serial value on the zone (default: strftime(%Y%m%d01))
"""
return self.service.createObject({
'name': zone,
'serial': serial or time.strftime('%Y%m%d01'),
"resourceRecords": {}})
def delete_zone(self, zone_id):
"""Delete a zone by its ID.
:param integer zone_id: the zone ID to delete
"""
return self.service.deleteObject(id=zone_id)
def edit_zone(self, zone):
"""Update an existing zone with the options provided.
The provided dict must include an 'id' key and value corresponding
to the zone that should be updated.
:param dict zone: the zone to update
"""
self.service.editObject(zone)
def create_record(self, zone_id, record, record_type, data, ttl=60):
"""Create a resource record on a domain.
:param integer id: the zone's ID
:param record: the name of the record to add
:param record_type: the type of record (A, AAAA, CNAME, TXT, etc.)
:param data: the record's value
:param integer ttl: the TTL or time-to-live value (default: 60)
"""
resource_record = self._generate_create_dict(record, record_type, data,
ttl, domainId=zone_id)
return self.record.createObject(resource_record)
def create_record_mx(self, zone_id, record, data, ttl=60, priority=10):
"""Create a mx resource record on a domain.
:param integer id: the zone's ID
:param record: the name of the record to add
:param data: the record's value
:param integer ttl: the TTL or time-to-live value (default: 60)
:param integer priority: the priority of the target host
"""
resource_record = self._generate_create_dict(record, 'MX', data, ttl,
domainId=zone_id, mxPriority=priority)
return self.record.createObject(resource_record)
def create_record_srv(self, zone_id, record, data, protocol, port, service,
ttl=60, priority=20, weight=10):
"""Create a resource record on a domain.
:param integer id: the zone's ID
:param record: the name of the record to add
:param data: the record's value
:param string protocol: the protocol of the service, usually either TCP or UDP.
:param integer port: the TCP or UDP port on which the service is to be found.
:param string service: the symbolic name of the desired service.
:param integer ttl: the TTL or time-to-live value (default: 60)
:param integer priority: the priority of the target host (default: 20)
:param integer weight: relative weight for records with same priority (default: 10)
"""
resource_record = self._generate_create_dict(record, 'SRV', data, ttl, domainId=zone_id,
priority=priority, protocol=protocol, port=port,
service=service, weight=weight)
# The createObject won't creates SRV records unless we send the following complexType.
resource_record['complexType'] = 'SoftLayer_Dns_Domain_ResourceRecord_SrvType'
return self.record.createObject(resource_record)
def create_record_ptr(self, record, data, ttl=60):
"""Create a reverse record.
:param record: the public ip address of device for which you would like to manage reverse DNS.
:param data: the record's value
:param integer ttl: the TTL or time-to-live value (default: 60)
"""
resource_record = self._generate_create_dict(record, 'PTR', data, ttl)
return self.record.createObject(resource_record)
@staticmethod
def _generate_create_dict(record, record_type, data, ttl, **kwargs):
"""Returns a dict appropriate to pass into Dns_Domain_ResourceRecord::createObject"""
# Basic dns record structure
resource_record = {
'host': record,
'data': data,
'ttl': ttl,
'type': record_type
}
for (key, value) in kwargs.items():
resource_record.setdefault(key, value)
return resource_record
def delete_record(self, record_id):
"""Delete a resource record by its ID.
:param integer id: the record's ID
"""
self.record.deleteObject(id=record_id)
def get_record(self, record_id):
"""Get a DNS record.
:param integer id: the record's ID
"""
return self.record.getObject(id=record_id)
def get_records(self, zone_id, ttl=None, data=None, host=None,
record_type=None):
"""List, and optionally filter, records within a zone.
:param zone: the zone name in which to search.
:param int ttl: time in seconds
:param str data: the records data
:param str host: record's host
:param str record_type: the type of record
:returns: A list of dictionaries representing the matching records
within the specified zone.
"""
_filter = utils.NestedDict()
if ttl:
_filter['resourceRecords']['ttl'] = utils.query_filter(ttl)
if host:
_filter['resourceRecords']['host'] = utils.query_filter(host)
if data:
_filter['resourceRecords']['data'] = utils.query_filter(data)
if record_type:
_filter['resourceRecords']['type'] = utils.query_filter(
record_type.lower())
results = self.service.getResourceRecords(
id=zone_id,
mask='id,expire,domainId,host,minimum,refresh,retry,'
'mxPriority,ttl,type,data,responsiblePerson',
filter=_filter.to_dict(),
)
return results
def edit_record(self, record):
"""Update an existing record with the options provided.
The provided dict must include an 'id' key and value corresponding to
the record that should be updated.
:param dict record: the record to update
"""
self.record.editObject(record, id=record['id'])
def dump_zone(self, zone_id):
"""Retrieve a zone dump in BIND format.
:param integer id: The zone ID to dump
"""
return self.service.getZoneFileContents(id=zone_id)
|
mit
| 1,036,015,339,823,414,800
| 33.628692
| 102
| 0.60156
| false
| 4.263377
| false
| false
| false
|
adityahase/frappe
|
frappe/desk/page/user_profile/user_profile.py
|
1
|
2323
|
import frappe
from datetime import datetime
@frappe.whitelist()
def get_energy_points_heatmap_data(user, date):
return dict(frappe.db.sql("""select unix_timestamp(date(creation)), sum(points)
from `tabEnergy Point Log`
where
date(creation) > subdate('{date}', interval 1 year) and
date(creation) < subdate('{date}', interval -1 year) and
user = '{user}' and
type != 'Review'
group by date(creation)
order by creation asc""".format(user = user, date = date)))
@frappe.whitelist()
def get_energy_points_percentage_chart_data(user, field):
result = frappe.db.get_all('Energy Point Log',
filters = {'user': user, 'type': ['!=', 'Review']},
group_by = field,
order_by = field,
fields = [field, 'ABS(sum(points)) as points'],
as_list = True)
return {
"labels": [r[0] for r in result if r[0] != None],
"datasets": [{
"values": [r[1] for r in result]
}]
}
@frappe.whitelist()
def get_user_rank(user):
month_start = datetime.today().replace(day=1)
monthly_rank = frappe.db.get_all('Energy Point Log',
group_by = 'user',
filters = {'creation': ['>', month_start], 'type' : ['!=', 'Review']},
fields = ['user', 'sum(points)'],
order_by = 'sum(points) desc',
as_list = True)
all_time_rank = frappe.db.get_all('Energy Point Log',
group_by = 'user',
filters = {'type' : ['!=', 'Review']},
fields = ['user', 'sum(points)'],
order_by = 'sum(points) desc',
as_list = True)
return {
'monthly_rank': [i+1 for i, r in enumerate(monthly_rank) if r[0] == user],
'all_time_rank': [i+1 for i, r in enumerate(all_time_rank) if r[0] == user]
}
@frappe.whitelist()
def update_profile_info(profile_info):
profile_info = frappe.parse_json(profile_info)
keys = ['location', 'interest', 'user_image', 'bio']
for key in keys:
if key not in profile_info:
profile_info[key] = None
user = frappe.get_doc('User', frappe.session.user)
user.update(profile_info)
user.save()
return user
@frappe.whitelist()
def get_energy_points_list(start, limit, user):
return frappe.db.get_list('Energy Point Log',
filters = {'user': user, 'type': ['!=', 'Review']},
fields = ['name','user', 'points', 'reference_doctype', 'reference_name', 'reason',
'type', 'seen', 'rule', 'owner', 'creation', 'revert_of'],
start = start,
limit = limit,
order_by = 'creation desc')
|
mit
| -6,314,365,701,903,240,000
| 28.782051
| 85
| 0.637538
| false
| 2.882134
| false
| false
| false
|
ToonTownInfiniteRepo/ToontownInfinite
|
toontown/estate/DistributedFurnitureItem.py
|
1
|
4767
|
from toontown.toonbase.ToontownGlobals import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from toontown.catalog import CatalogItem
from toontown.toonbase import ToontownGlobals
from direct.distributed import DistributedObject
from toontown.toonbase import TTLocalizer
import DistributedHouseItem
from direct.distributed import DistributedSmoothNode
from direct.task import Task
import HouseGlobals
class DistributedFurnitureItem(DistributedHouseItem.DistributedHouseItem, DistributedSmoothNode.DistributedSmoothNode):
notify = directNotify.newCategory('DistributedFurnitureItem')
deferFor = 1
def __init__(self, cr):
DistributedHouseItem.DistributedHouseItem.__init__(self, cr)
DistributedSmoothNode.DistributedSmoothNode.__init__(self, cr)
NodePath.__init__(self)
self.localControl = True
self.__broadcastFrequency = 0.25
self.__adjustStarted = 0
self.furnitureMgr = None
self.transmitRelativeTo = None
return
def generate(self):
DistributedHouseItem.DistributedHouseItem.generate(self)
DistributedSmoothNode.DistributedSmoothNode.generate(self)
self.__taskName = self.taskName('sendRequestPosHpr')
def announceGenerate(self):
DistributedHouseItem.DistributedHouseItem.announceGenerate(self)
DistributedSmoothNode.DistributedSmoothNode.announceGenerate(self)
self.load()
def load(self):
pass
def disable(self):
taskMgr.remove(self.__taskName)
self.stopSmooth()
self.furnitureMgr.dfitems.remove(self)
self.furnitureMgr = None
DistributedHouseItem.DistributedHouseItem.disable(self)
DistributedSmoothNode.DistributedSmoothNode.disable(self)
return
def delete(self):
self.removeNode()
del self.item
DistributedHouseItem.DistributedHouseItem.delete(self)
DistributedSmoothNode.DistributedSmoothNode.delete(self)
def setItem(self, furnitureMgrId, blob):
self.furnitureMgr = self.cr.doId2do[furnitureMgrId]
self.furnitureMgr.dfitems.append(self)
self.item = CatalogItem.getItem(blob, store=CatalogItem.Customization)
self.assign(self.loadModel())
interior = self.furnitureMgr.getInteriorObject()
self.reparentTo(interior.interior)
def loadModel(self):
return self.item.loadModel()
def startAdjustPosHpr(self):
if self.__adjustStarted:
return
self.__adjustStarted = 1
self.clearSmoothing()
taskMgr.remove(self.__taskName)
posHpr = self.__getPosHpr()
self.__oldPosHpr = posHpr
self.sendRequestPosHpr(0, *posHpr)
taskMgr.doMethodLater(self.__broadcastFrequency, self.__posHprBroadcast, self.__taskName)
def __posHprBroadcast(self, task):
posHpr = self.__getPosHpr()
if not self.__comparePosHpr(posHpr, self.__oldPosHpr, 0.1):
pass
else:
self.__oldPosHpr = posHpr
self.sendRequestPosHpr(0, *posHpr)
taskMgr.doMethodLater(self.__broadcastFrequency, self.__posHprBroadcast, self.__taskName)
return Task.done
def stopAdjustPosHpr(self):
if not self.__adjustStarted:
return
self.__adjustStarted = 0
taskMgr.remove(self.__taskName)
posHpr = self.__getPosHpr()
self.sendRequestPosHpr(1, *posHpr)
del self.__oldPosHpr
def sendRequestPosHpr(self, final, x, y, z, h, p, r):
t = globalClockDelta.getFrameNetworkTime()
self.sendUpdate('requestPosHpr', (final,
x,
y,
z,
h,
p,
r,
t))
def setMode(self, mode, avId):
if mode == HouseGlobals.FURNITURE_MODE_START:
if avId != base.localAvatar.getDoId():
self.startSmooth()
elif mode == HouseGlobals.FURNITURE_MODE_STOP:
if avId != base.localAvatar.getDoId():
self.stopSmooth()
elif mode == HouseGlobals.FURNITURE_MODE_OFF:
pass
else:
self.notify.warning('setMode: unknown mode: %s avId: %s' % (mode, avId))
def __getPosHpr(self):
if self.transmitRelativeTo == None:
pos = self.getPos()
hpr = self.getHpr()
else:
pos = self.getPos(self.transmitRelativeTo)
hpr = self.getHpr(self.transmitRelativeTo)
return (pos[0],
pos[1],
pos[2],
hpr[0],
hpr[1],
hpr[2])
def __comparePosHpr(self, a, b, threshold):
for i in xrange(len(a)):
if abs(a[i] - b[i]) >= threshold:
return 1
return 0
|
mit
| 3,282,787,121,327,398,400
| 33.294964
| 119
| 0.643382
| false
| 3.724219
| false
| false
| false
|
willi-kappler/Snowball_Python
|
modules/gimmick.py
|
1
|
13945
|
import random
import pygame
import gfxobject
class Gimmick:
"Class for the funny gimmicks. Note that it doesn't use any of the gfxobject classes"
def __init__(self, screen, level):
self.screen = screen
self.level = level
self.tux = gfxobject.GFXObject(screen, level, level.playerGfx, 0, 0)
self.firedevil = gfxobject.GFXObject(screen, level, level.firedevilGfx, 0, 0)
self.ghost = gfxobject.GFXObject(screen, level, level.ghostGfx, 0, 0)
self.skull = gfxobject.GFXObject(screen, level, level.skullGfx, 0, 0)
self.zombie = gfxobject.GFXObject(screen, level, level.zombieGfx, 0, 0)
self.doSequence = [None, self.seq1, self.seq2, self.seq3, self.seq4]
self.prepareSequence = [None, self.prepareSeq1, self.prepareSeq2, self.prepareSeq3, self.prepareSeq4]
self.sequence = 0
self.time = 0
def prepareSeq1(self):
self.tux.x = -32
self.tux.y = 416
self.tux.animList1 = [(10, 80), (11, 80), (12, 80), (13, 80)]
self.tux.animList2 = [(14,80)]
self.tux.animList = self.tux.animList1
self.tux.animFrame = 0
self.tux.mode = 0
self.tux.step = 20
self.firedevil.x = -96
self.firedevil.y = 416
self.firedevil.animList = [(5, 80), (6, 80), (7, 80), (8, 80)]
self.firedevil.animFrame = 0
self.firedevil.mode = 0
self.ground = [1,0,0,0,0,0,0]
def prepareSeq2(self):
self.skull.x = 512
self.skull.y = 416
self.skull.animList1 = [(0, 80), (1, 80), (2, 80), (3, 80)]
self.skull.animList2 = [(5, 80), (6, 80), (7, 80), (8, 80)]
self.skull.animList = self.skull.animList1
self.skull.animFrame = 0
self.skull.mode = 0
self.skull.step = 40
self.ghost.x = 640
self.ghost.y = 416
self.ghost.animList1 = [(0, 80), (1, 80), (2, 80), (3, 80)]
self.ghost.animList2 = [(5, 80), (6, 80), (7, 80), (8, 80)]
self.ghost.animList = self.ghost.animList1
self.ghost.animFrame = 0
self.ghost.mode = 0
self.ground = []
self.ground.append([self.level.greenBottle, self.level.doorClosed, 0, 0, 0, 0])
self.ground.append([2, 2, 2, 2, 2, 2])
def prepareSeq3(self):
self.skull.x = 544
self.skull.y = 416
self.skull.animList1 = [(0, 80), (1, 80), (2, 80), (3, 80)]
self.skull.animList2 = [(5, 80), (6, 80), (7, 80), (8, 80)]
self.skull.animList = self.skull.animList1
self.skull.animFrame = 0
self.skull.mode = 0
self.zombie.x = 0
self.zombie.y = 416
self.zombie.animList1 = [(0, 80), (1, 80), (2, 80), (3, 80)]
self.zombie.animList2 = [(5, 80), (6, 80), (7, 80), (8, 80)]
self.zombie.animList = self.zombie.animList2
self.zombie.animFrame = 0
self.zombie.mode = 0
self.leftGround = []
self.leftGround.append([1, 1, 1, self.level.spikeNo + 2, 0])
self.leftGround.append([0, 0, 0, self.level.doorOpened + 1, self.level.heartNo + 1])
self.leftGround.append([2, 2, 2, self.level.spikeNo + 1, 2])
self.ground = []
self.ground.append([0, 0, self.level.switchMin])
self.ground.append([2, 2, 2])
def prepareSeq4(self):
pass
def seq1(self): # tux and firedevil
if self.tux.mode == 0:
self.tux.x += 2
self.tux.step -= 1
if self.tux.step == 0:
self.tux.mode = 1
self.tux.animList = self.tux.animList2
self.tux.animFrame = 0
self.tux.step = 8
self.ground[(self.tux.x / 32) + 1] = 1 # put blocks on ground
self.firedevil.mode = 1
if self.firedevil.x > 32:
self.ground[(self.firedevil.x / 32) - 1] = 0 # take blocks from ground
if self.tux.x > 160:
self.tux.mode = 2
self.firedevil.mode = 1
self.tux.animList = [(0, 80)] # turn around
self.tux.animFrame = 0
self.tux.step = 32 # and wait
self.firedevil.animList = [(5, 80)]
self.firedevil.animFrame = 0
elif self.tux.mode == 1:
self.tux.step -= 1 # wait and bow
if self.tux.step == 0:
self.tux.mode = 0
self.tux.animList = self.tux.animList1 # move on
self.tux.animFrame = 0
self.tux.step = 16
self.firedevil.mode = 0
elif self.tux.mode == 2:
self.tux.step -= 1 # wait
if self.tux.step == 0:
self.tux.mode = 3
self.tux.step = 32
elif self.tux.mode == 3:
self.screen.blit(self.level.frontGfx[self.level.heartNo], (140, 400)) # show heart
self.tux.step -= 1 # and wait
if self.tux.step == 0:
self.tux.mode = 4
self.tux.animList = [(0, 80), (1, 80), (2, 80), (3, 80)]
self.tux.animFrame = 0
self.firedevil.mode = 2
self.firedevil.animList = [(0, 80), (1, 80), (2, 80), (3, 80)]
self.firedevil.animFrame = 0
elif self.tux.mode == 4:
self.tux.x -= 6 # you know what you want.... go and get it!
if self.tux.x > 0:
self.ground[(self.tux.x / 32) + 1] = 0 # remove blocks
else:
self.sequence = 0
self.time = pygame.time.get_ticks()
self.tux.go()
if self.firedevil.mode == 0:
self.firedevil.x += 2
elif self.firedevil.mode == 2:
self.firedevil.x -= 6 # run for your life!
if self.firedevil.x > 32:
self.ground[(self.firedevil.x / 32) - 1] = 1 # put blocks
self.firedevil.go()
for i in range(6):
if self.ground[i] == 1:
self.screen.blit(self.level.frontGfx[1], (i*32, 448))
def seq2(self): # skull and ghost
for i in range(6):
if self.ground[0][i] > 0:
self.screen.blit(self.level.frontGfx[self.ground[0][i]], (448 + (i*32), 416))
if self.ground[1][i] > 0:
self.screen.blit(self.level.frontGfx[self.ground[1][i]], (448 + (i*32), 448))
if self.skull.mode == 1:
self.skull.step -= 1 # wait in front of the door
if self.skull.step == 0:
self.skull.mode = 2
self.skull.animList = self.skull.animList2 # turn around
self.skull.animFrame = 0
elif self.skull.mode == 2:
self.skull.x += 2 # move to ghost
if self.skull.x >= 580:
self.skull.mode = 3
self.skull.step = 40
elif self.skull.mode == 3:
self.skull.step -= 1 # babble a lot of stuff meaningless stuff to ghost
if self.skull.step == 0:
self.skull.mode = 0 # wait
self.skull.animList = [(1, 80)] # turn around
self.skull.animFrame = 0
self.ghost.mode = 2
elif self.skull.mode == 4:
self.skull.step -= 1 # babble to ghost again...
if self.skull.step == 0:
self.skull.mode = 0 # wait
self.skull.animList = [(1, 80)]
self.skull.animFrame = 0
self.ghost.mode = 4
self.ghost.animList = self.ghost.animList1
self.ghost.animFrame = 0
elif self.skull.mode == 5:
self.skull.x -= 2
if self.skull.x <= 540:
self.ground[0][3] = 0
self.skull.mode = 0
self.skull.go()
if self.ghost.mode == 0:
self.ghost.x -= 2 # sneek in
if self.ghost.x <= 608:
self.ghost.mode = 1
self.skull.mode = 1
elif self.ghost.mode == 2:
self.ghost.x -= 2 # move to door
if self.ghost.x <= 512:
self.ghost.mode = 3 # wait
self.skull.step = 30
elif self.ghost.mode == 3:
self.skull.step -= 1
if self.skull.step == 0:
self.ghost.mode = 1 # wait
self.ghost.animList = self.ghost.animList2 # turn around
self.ghost.animFrame = 0
self.skull.step = 30
self.skull.mode = 4
self.skull.animList = self.skull.animList1
self.skull.animFrame = 0
elif self.ghost.mode == 4:
self.ghost.x -= 2
if self.ghost.x <= 448:
self.ghost.mode = 5
self.skull.step = 30
elif self.ghost.mode == 5:
self.skull.step -= 1
if self.skull.step == 0:
self.ground[0][0] = 0
self.ghost.mode = 6
self.ghost.animList = self.ghost.animList2
self.ghost.animFrame = 0
self.skull.animList = self.skull.animList1
self.skull.animFrame = 0
elif self.ghost.mode == 6:
self.ghost.x += 2
if self.ghost.x >= 548:
self.ground[0][3] = self.level.greenBottle
self.ghost.mode = 7
self.skull.mode = 5
elif self.ghost.mode == 7:
self.ghost.x += 2
if self.ghost.x >= 640:
self.sequence = 0
self.time = pygame.time.get_ticks()
self.ghost.go()
def seq3(self): # zombie and skull
for i in range(5):
if self.leftGround[0][i] > 0:
self.screen.blit(self.level.frontGfx[self.leftGround[0][i]], (i*32, 384))
if self.leftGround[1][i] > 0:
self.screen.blit(self.level.frontGfx[self.leftGround[1][i]], (i*32, 416))
if self.leftGround[2][i] > 0:
self.screen.blit(self.level.frontGfx[self.leftGround[2][i]], (i*32, 448))
for i in range(3):
if self.ground[0][i] > 0:
self.screen.blit(self.level.frontGfx[self.ground[0][i]], (544 + (i*32), 416))
if self.ground[1][i] > 0:
self.screen.blit(self.level.frontGfx[self.ground[1][i]], (544 + (i*32), 448))
if self.skull.mode == 1: # fast! got to the switch! the stupid zombie is comming...
self.skull.x += 2
if self.skull.x >= 580:
self.skull.mode = 2
self.skull.animList = self.skull.animList1
self.skull.animFrame = 0
self.leftGround[1][3] = self.level.redOn
if self.skull.mode == 2: # go back and enjoy the show
self.skull.x -= 2
if self.skull.x <= 544:
self.skull.mode = 0 # wait
if self.skull.mode == 3: # one more time...
self.skull.x += 2
if self.skull.x >= 580:
self.skull.mode = 2
self.skull.animList = self.skull.animList1
self.skull.animFrame = 0
self.leftGround[1][3] = self.level.doorOpened + 1
self.skull.go()
if self.zombie.mode == 0: # nice shiny coin! zombie want coin! zombie must have coin!
self.zombie.x += 1
if self.zombie.x == 32:
self.skull.mode = 1
self.skull.animList = self.skull.animList2
self.skull.animFrame = 0
elif self.zombie.x == 64:
self.zombie.mode = 1
self.zombie.animList = self.zombie.animList1
self.zombie.animFrame = 0
elif self.zombie.mode == 1: # arrgh! turn around and move back... zombie no coin...
self.zombie.x -= 1
if self.zombie.x == 32:
self.skull.mode = 3
self.skull.animList = self.skull.animList2
self.skull.animFrame = 0
elif self.zombie.x == 0:
self.zombie.mode = 2
self.zombie.animList = self.zombie.animList2
self.zombie.animFrame = 0
elif self.zombie.mode == 2: # coin there again! zombie want coin!
self.zombie.x += 1
if self.zombie.x == 32:
self.skull.mode = 1
self.skull.animList = self.skull.animList2
self.skull.animFrame = 0
elif self.zombie.x == 64:
self.zombie.mode = 3
self.zombie.animList = self.zombie.animList1
self.zombie.animFrame = 0
elif self.zombie.mode == 3: # zombie go home... zombie no want play...
self.zombie.x -= 1
if self.zombie.x == 32:
self.zombie.mode = 4
self.zombie.animList = [(5, 80)]
self.zombie.animFrame = 0
self.zombie.step = 30
elif self.zombie.mode == 4: # coin ?? coin ?? no coin....
self.zombie.step -= 1
if self.zombie.step == 0:
self.zombie.mode = 5
self.zombie.animList = self.zombie.animList1
self.zombie.animFrame = 0
elif self.zombie.mode == 5: # zombie away...
self.zombie.x -= 1
if self.zombie.x == -16:
self.sequence = 0
self.time = pygame.time.get_ticks()
self.zombie.go()
def seq4(self):
pass
def reset(self):
self.sequence = 0
self.time = pygame.time.get_ticks()
def go(self):
if self.sequence == 0:
if pygame.time.get_ticks() > self.time + 5000:
self.time = pygame.time.get_ticks()
self.sequence = random.randint(0, 3)
if self.sequence > 0:
self.prepareSequence[self.sequence]()
else:
self.doSequence[self.sequence]()
|
gpl-2.0
| -7,936,916,952,836,410,000
| 38.616477
| 109
| 0.502617
| false
| 3.223532
| false
| false
| false
|
googleapis/googleapis-gen
|
google/cloud/websecurityscanner/v1/websecurityscanner-v1-py/google/cloud/websecurityscanner_v1/types/finding_type_stats.py
|
1
|
1388
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.cloud.websecurityscanner.v1',
manifest={
'FindingTypeStats',
},
)
class FindingTypeStats(proto.Message):
r"""A FindingTypeStats resource represents stats regarding a
specific FindingType of Findings under a given ScanRun.
Attributes:
finding_type (str):
Output only. The finding type associated with
the stats.
finding_count (int):
Output only. The count of findings belonging
to this finding type.
"""
finding_type = proto.Field(
proto.STRING,
number=1,
)
finding_count = proto.Field(
proto.INT32,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| -8,043,455,129,311,050,000
| 26.76
| 74
| 0.668588
| false
| 4.106509
| false
| false
| false
|
robotika/husky
|
ros/tcpros.py
|
1
|
3927
|
"""
Parsing TCPROS communication protocol
usage:
./tcpros <log file to replay>
"""
import socket
import struct
import sys
import datetime
def prefix4BytesLen( s ):
"adding ROS length"
return struct.pack("I", len(s)) + s
def splitLenStr( data ):
ret = []
while len(data) >= 4:
size = struct.unpack( "I", data[:4] )[0]
data = data[4:]
ret.append( data[:size] )
data = data[size:]
return ret
class LoggedStream:
def __init__( self, readFn=None, writeFn=None, prefix="" ):
self.readFn = readFn
self.writeFn = writeFn
dt = datetime.datetime.now()
self.filename = prefix + dt.strftime("%y%m%d_%H%M%S.log")
self.logFile = open( "logs/" + self.filename, "wb" )
print "LogIt:", self.filename
self.buf = ""
def readMsg( self ):
try:
data = self.readFn( 4096 )
except socket.timeout as e:
assert False, e # it should contain partial data
except socket.error as (errno, errStr):
assert errno in [10035,11], (errno, errStr)
# Windows 'A non-blocking socket operation could not be completed immediately'
# Linux (11, 'Resource temporarily unavailable')
data = ""
self.logFile.write( data )
self.logFile.flush()
self.buf += data
if len(self.buf) >= 4:
num = struct.unpack("I", self.buf[:4])[0]
if len(self.buf) >= 4 + num:
data = self.buf[4:4+num]
self.buf = self.buf[4+num:]
return data
return None
def writeMsg( self, msg ):
data = prefix4BytesLen( msg )
self.logFile.write( data )
self.logFile.flush()
self.writeFn( data )
class ReplayLoggedStream:
def __init__( self, filename, assertWrite ):
self.filename = filename
self.logFile = open( self.filename, "rb" )
print "ReplayLog:", self.filename
self.assertWrite = assertWrite
def readMsg( self ):
data = self.logFile.read( 4 )
if len(data) >= 4:
num = struct.unpack("I", data[:4])[0]
return self.logFile.read( num )
return None
def writeMsg( self, msg ):
data = prefix4BytesLen( msg )
ref = self.logFile.read( len(data) )
if self.assertWrite:
assert data == ref, (ref,data)
class Tcpros:
"TCPROS communication protocol"
def __init__( self, readFn=None, readMsgFn=None, verbose=False ):
self.readFn = readFn
self.readMsgFn = readMsgFn
self.topicType = None
self.verbose = verbose
def readMsg( self ):
"skip very first message - topic description"
if self.topicType == None:
m = self._readMsg()
if m != None:
self.topicType = splitLenStr(m)
if self.verbose:
for s in self.topicType:
print s
return self._readMsg()
return None
return self._readMsg()
def _readMsg( self ):
if self.readMsgFn:
return self.readMsgFn()
data = self.readFn(4)
if len(data) == 0:
return None
size = struct.unpack("I", data)[0]
return self.readFn( size )
if __name__ == "__main__":
from msgs import *
if len(sys.argv) < 2:
print __doc__
sys.exit(1)
t = Tcpros( open(sys.argv[1], "rb").read )
while 1:
m = t.readMsg()
if m == None:
break
# print t.parseImu(m)
# print t.parseEncoders(m)
# print t.parsePower(m)
# print parseString(m)
# print parseJoy(m)
print parseSafety(m)
print "--------------"
#-------------------------------------------------------------------
# vim: expandtab sw=4 ts=4
|
mit
| 264,870,536,879,751,900
| 27.664234
| 93
| 0.520499
| false
| 3.768714
| false
| false
| false
|
pseudobeard/teambalancer
|
legacy/getter.py
|
1
|
1244
|
import json
import requests
with open('properties.json') as data_file:
data = json.load(data_file)
jwtToken = data["jwtToken"]
id = data["id"]
ITEM_NAME = data["item_name"]
headers = {"authorization" : "Bearer " + jwtToken}
baseurl = "https://api.streamelements.com/kappa/v1/store/"
end = "/redemptions?limit=100&pending=true"
class Getter:
def __init__(self):
return
def getJSON(self):
res = requests.get(baseurl + id + end, headers=headers)
data = json.loads(res.text)
return data
def getViewerGameParticipants(self):
battletags = []
json = self.getJSON()
redemptions = json.get("docs")
for redemption in redemptions: # Iterate throgh redemptions
item = redemption.get("item") # Get item
if item is not None:
itemName = item.get("name")
if itemName == ITEM_NAME: # If it is a viewer ticket, add the battletag to the list
inputs = redemption.get("input")
battletags.append(inputs[0])
return battletags
if __name__=="__main__":
g = Getter()
battletags = g.getViewerGameParticipants()
for battletag in battletags:
print(battletag)
|
gpl-3.0
| -2,647,808,785,494,130,000
| 26.666667
| 99
| 0.605305
| false
| 3.54416
| false
| false
| false
|
rmulton/lawen
|
webservice_caller/GoogleAPICaller.py
|
1
|
2949
|
import json
import re
from bs4 import BeautifulSoup
from model.Transport.Walk import Walk
from model.Transport.PublicTransport import PublicTransport
from model.Transport.Drive import Drive
from model.Transport.Bicycle import Bicycle
from model.Possibilities import Possibilities
from webservice_caller.TransportAPICaller import TransportAPICaller
from webservice_caller.call_url import call_url, APICallError
class GoogleAPICaller(TransportAPICaller):
'''
Class that handles calling google api to compute itiniraries
'''
_url = 'https://maps.googleapis.com/maps/api/directions/json?'
_key = 'AIzaSyCqgwlzgUDYYF7xnePerJZaapgUWmyGYjc'
def __init__ (self, request):
'''
Create the different parameters that we will need for the API url
'''
self._origin = request.from_x, request.from_y
self._destination = request.to_x, request.to_y
self._modes = {'driving':Drive,'walking':Walk,'bicycling':Bicycle,'transit':PublicTransport}
@property
def modes(self):
return self._modes
def get_times(self):
'''
Get the different times related to the travel modes and returns
a list of objects corresponding to each travel mode'
'''
times = {}
for mode, mode_class in self._modes.items():
url_final = GoogleAPICaller._url + "origin=" + ",".join(str (e) for e in self._origin) + "&destination=" + ",".join(str(f) for f in self._destination) + "&mode=" + mode + "&key=" + GoogleAPICaller._key
response = call_url(url_final)
data = json.loads(response.content)
try:
travel_time = data["routes"][0]["legs"][0]["duration"]["value"]
except IndexError:
raise APICallError
except KeyError:
raise APICallError
times[mode] = travel_time
return times
def get_itineraries(self):
'''
Get the different itineraries related to the travel modes
'''
itineraries = {}
for mode, mode_class in self._modes.items():
url_final = GoogleAPICaller._url + "origin=" + ",".join(str (e) for e in self._origin) + "&destination=" + ",".join(str(f) for f in self._destination) + "&mode=" + mode + "&key=" + GoogleAPICaller._key
response = call_url(url_final)
data = json.loads(response.content)
try:
instruction = data["routes"][0]["legs"][0]["steps"]
except IndexError:
raise APICallError
except KeyError:
raise APICallError
itinerary = ""
for i in range(len(instruction)):
itinerary += instruction[i]["html_instructions"] + ", "
clean_itinerary = BeautifulSoup(itinerary,"html.parser").text
itineraries[mode] = clean_itinerary
return itineraries
|
mit
| 4,741,626,998,261,590,000
| 39.958333
| 213
| 0.60902
| false
| 4.039726
| false
| false
| false
|
kevintee/Predicting-Gene-Networks
|
results/goatools-master/scripts/map_to_slim.py
|
1
|
4362
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import print_function
import os
import os.path as op
import sys
sys.path.insert(0, op.join(op.dirname(__file__), ".."))
from goatools.obo_parser import GODag
from goatools.mapslim import mapslim
# copied from find_enrichment.py
# TODO: put this method into the library, copying is BAD practise
def read_associations(assoc_fn):
assoc = {}
for row in open(assoc_fn):
atoms = row.split()
if len(atoms) == 2:
a, b = atoms
elif len(atoms) > 2 and row.count('\t') == 1:
a, b = row.split("\t")
else:
continue
b = set(b.split(";"))
assoc[a] = b
return assoc
if __name__ == '__main__':
import optparse
p = optparse.OptionParser("%prog [options] go_obo_file goslim_obo_file")
p.add_option("--term", dest="term", help="a term (association id) to map "
"to slim IDs. This can not be used together with "
"--association_file", action="store", type="string",
default=None)
p.add_option("--association_file", dest="ass_file_name", action="store",
help="the file of protein products and their associations "
"to be mapped to GO slim terms. This can not be used "
"together with --term", type="string", default=None)
p.add_option("--slim_out", dest="slim_out", action="store", type="string",
default="direct", help="One of `direct` or `all`. Defines "
"whether the output should contain all slim terms (all "
"ancestors) or only direct slim terms (only direct "
"ancestors)")
opts, args = p.parse_args()
# check for correct number of arguments
if len(args) != 2:
p.print_help()
sys.exit(1)
obo_file = args[0]
assert os.path.exists(obo_file), "file %s not found!" % obo_file
slim_obo_file = args[1]
assert os.path.exists(slim_obo_file), "file %s not found!" % slim_obo_file
# check that either --term or --association_file is set
if (opts.term is None and opts.ass_file_name is None) \
or ((opts.term is not None) and (opts.ass_file_name is not None)):
p.print_help()
sys.exit(1)
# check that slim_out is either "direct" or "all" and set according flag
only_direct = None
if opts.slim_out == "direct":
only_direct = True
elif opts.slim_out == "all":
only_direct = False
else:
p.print_help()
sys.exit(1)
# load DAGs
go_dag = GODag(obo_file)
goslim_dag = GODag(slim_obo_file)
# in case a single term is given as input:
if opts.term:
if opts.term not in go_dag:
print(("term %s not found!" % opts.term), file=sys.stderr)
sys.exit(1)
direct_anc, all_anc = mapslim(opts.term, go_dag, goslim_dag)
# output either all or only direct slims, depending on user command
if only_direct:
slim_terms_str = ";".join(direct_anc)
else:
slim_terms_str = ";".join(all_anc)
print(slim_terms_str)
# in case a association file is given as input
if opts.ass_file_name:
assert os.path.exists(opts.ass_file_name), ("file %s not found!"
% opts.ass_file_name)
assocs = read_associations(opts.ass_file_name)
for protein_product, go_terms in assocs.items():
all_direct_anc = set()
all_covered_anc = set()
all_all_anc = set()
for go_term in go_terms:
if go_term not in go_dag:
continue
direct_anc, all_anc = mapslim(go_term, go_dag, goslim_dag)
all_all_anc |= all_anc
# collect all covered ancestors, so the direct ancestors
# can be calculated afterwards
all_covered_anc |= (all_anc - direct_anc)
all_direct_anc = all_all_anc - all_covered_anc
# output either all or only direct, depending on user command
if only_direct:
slim_terms_str = ";".join(all_direct_anc)
else:
slim_terms_str = ";".join(all_all_anc)
print((protein_product + "\t" + slim_terms_str))
|
mit
| 2,198,930,277,739,661,600
| 36.282051
| 78
| 0.560064
| false
| 3.631973
| false
| false
| false
|
polypmer/obligarcy
|
obligarcy/urls.py
|
1
|
1546
|
from django.conf.urls import url
from . import views
from django.conf.urls.static import static, settings
urlpatterns = [
# ex: /oblicarcy/
url(r'^$', views.index, name='index'),
url(r'^firehose/$', views.firehose, name='firehose'),
url(r'^profile/$', views.profile, name='profile'),
# ex: /obligarcy/user/5/
url(r'^user/([0-9]+)/$', views.show_prof, name='user'),
url(r'^follow/$', views.follow, name='follow'),
url(r'^update/$', views.update_profile, name='update'),
# ex: /obligarcy/user
#url(r'^user/$', views.profile, name='profile'),
# ex: /oblicarcy/submissions/5/
url(r'^submission/([0-9a-z]+)/$', views.show_sub, name='submission'),
url(r'^submit/([0-9a-z]+)/([0-9]+)/$', views.submit, name='submit'),
url(r'^upload/([0-9a-z]+)/([0-9]+)/$', views.submit_upload, name='upload'),
# url(r'^submit/([0-9a-z]+)/([0-9]+)/$', views.submit, name='submit'),
# ex: /oblicarcy/contracts/5/
url(r'^contract/([0-9a-z]+)/$', views.show_con, name='contract'),
url(r'^challenge/$', views.challenge, name='challenge'),
url(r'^sign/([0-9a-z]+)/$', views.sign_con, name='sign'),
url(r'^active/([0-9]+)/$', views.show_active, name='active'),
# ex: /oblicarcy/login/
url(r'^login/$', views.user_login, name='login'),
url(r'^logout/$', views.user_logout, name='logout'),
url(r'^register/$', views.register, name='register'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
gpl-3.0
| -9,180,719,467,244,064,000
| 40.783784
| 130
| 0.609314
| false
| 2.939163
| false
| true
| false
|
xinl/lifepadbox
|
lp/time.py
|
1
|
1825
|
import datetime
class TZOffset(datetime.tzinfo):
def __init__(self, offset_string):
# validity of offset_string is already taken care of by Setting.put() so we just trust it here.
self.offset_string = offset_string
self._h = int(self.offset_string[1:3])
self._m = int(self.offset_string[3:5])
if self.offset_string[0] == "-":
self._h = - self._h
self._m = - self._m
def utcoffset(self, dt): return datetime.timedelta(hours = self._h, minutes = self._m)
def dst(self, dt): return datetime.timedelta(0)
def tzname(self, dt): return self.offset_string
#UTC = TZOffset("+0000")
def str2datetime(time_str, time_zone="+0000"):
""" Convert string (format: YYYY-MM-DD HH:MM:SS) into datetime object. """
# For some unknown reason, datetime.strptime() refuse to work.
ts = time_str.split(' ')
ts[0] = ts[0].split('-')
ts[1] = ts[1].split(':')
time_object = datetime.datetime(int(ts[0][0]), int(ts[0][1]), int(ts[0][2]), int(ts[1][0]), int(ts[1][1]), int(ts[1][2]), 000000, TZOffset(time_zone))
#time_object = datetime.datetime.strptime(time_string, '%Y-%m-%d %H:%M:%S')
#time_object.tzinfo = TZOffset(time_zone)
return time_object
def datetime2str(time_obj):
""" Convert datetime object to string (format: YYYY-MM-DD HH:MM:SS). """
#time_str = time_obj.strftime("%Y-%m-%d %H:%M:%S")
time_str = "-".join([str(time_obj.year), str(time_obj.month), str(time_obj.day)]) + " " + ":".join([str(time_obj.hour), str(time_obj.minute), str(time_obj.second)])
return time_str
def changetz(time_object, timezone_string):
if time_object.tzinfo == None:
time_object = time_object.replace(tzinfo=TZOffset("+0000"))
return time_object.astimezone(TZOffset(timezone_string))
|
bsd-2-clause
| 3,487,160,411,433,715,700
| 41.465116
| 168
| 0.61863
| false
| 3.135739
| false
| false
| false
|
tuwmft/MatterBot
|
mattermost_bot/plugins/mlpl/Game.py
|
1
|
9936
|
from threading import Timer
import random
import string
class Game():
IDDLE = 0
START_GAME = 1
START_ROUND = 2
DURING_ROUND = 3
END_ROUND = 4
POINTS_PER_SCORE = [
1,
2,
3,
5,
7,
10,
14,
20,
]
MLPL_BONUS = 10
MESSAGES = {
'start_round' : [
'ok faut troué le mot avec sa les gars : {}',
'voila les lettr qu''on vuet : {}',
'c sa les lettre maietenant : {}',
'on trouve le mot ki contient sa : {}',
],
'end_round' : [
'ct sa le mot qu''orn voulez : {}',
'le mot gaggnant : {}',
'le mot queest le meileur : {}',
'c sa qui gagen : {}',
],
'prepare_next_round' : [
'allé on se prepar',
'sa va continué, on est pret la',
'oké la suite mentienant',
'bon sa continu',
],
'best_proposition' : [
'POUUUUAH allé {} il trovue le max de {} letre',
'ALLLEZZZ {} il met les {} leterr !!!',
'WOOOOOUH {} il a trouver les {} lettre',
'JAVOUUUU bien jour {} il a fait {} letre !!',
],
'good_proposition' : [
'c pa mal sa {}. {} lettres',
'jaim bien ta porpositon {}. Sa fai {} lettres',
'alé bien ouej {} !! sa fé {} lettere cousin',
'pouuaaah commen il déchire {}, il a trouver {} letre',
],
'better_proposition' : [
'{} il nik {} lol. {} letre maintenat',
'{} passe devan {} avek {} letre',
'ouuuuuuuhhhhaaa alé {} a niker {} avec {} leterte',
],
'better_auto_proposition' : [
'{} se bat luimeme et fait {} letre !',
],
'the_winner_is' : [
'c {} qui a gagner, ac {} letr. {} point maggle',
'{} et le ganian. {} retre. Bi1 jouer couz. {} en plus',
'{} a fé {} letre et a gagner ce roundd. {} ppin en plu pour toi',
],
'no_winner' : [
'person a trouver lol',
'pa de gagnant. vous ete nul ou koi',
],
'i_did_not_really_understand_sorry' : [
'kwa',
'hein',
'kétuti',
'g pa compri',
'koi',
],
}
DICO = [
'a',
'b',
'et',
'chibre',
'fesse',
'caca',
'acac',
]
def __init__(self, id, send_message):
self.id = id
self.send_message = send_message
self.timers = []
self.current_letters = []
self.scores = {}
self.load_dico()
def listen(self, message):
if self.status == Game.DURING_ROUND:
self.handle_proposition(message)
def start(self):
self.send_message("c parti pour le jeu dans {}".format(self.id))
self.status = Game.START_GAME
self.delayed_action(3, self.start_round)
self.load_scores()
def stop(self):
self.status = Game.IDDLE
self.send_message("a ok c torminé alors")
self.clear_timers()
self.save_scores()
self.show_scores()
def start_round(self):
letters = []
for x in range(8):
letters.append(string.ascii_lowercase[random.randint(0, 25)])
message = self.get_random_message('start_round')
self.send_message(message.format(', '.join(letters).upper()))
self.current_letters = letters
self.current_best_words = self.find_best_words(letters)
self.current_best_proposition = ()
self.status = Game.DURING_ROUND
self.delayed_action(30, self.end_round)
def end_round(self, mlpl=False):
message = self.get_random_message('end_round')
best_words = self.find_best_words(self.current_letters)
if best_words:
self.send_message(message.format('`' + '` ou `'.join(best_words).upper() + '`'))
else:
self.send_message(message.format(' RIEN DUTOUT lol. CT pas facile la javou'))
if self.current_best_proposition:
winner = self.current_best_proposition[0]
score = self.current_best_proposition[1]
message = self.get_random_message('the_winner_is')
points = self.award_player(winner, score, mlpl)
self.send_message(message.format(winner, score, points))
else:
message = self.get_random_message('no_winner')
self.send_message(message)
self.status = Game.END_ROUND
self.delayed_action(3, self.prepare_next_round)
self.save_scores()
def load_scores(self):
try:
f = open('./mattermost_bot/plugins/mlpl/scores', 'r+')
for line in f:
line_data = line.split(':')
self.scores[line_data[0]] = int(line_data[1])
f.close()
print('scores sarzés')
except IOError as e:
print('err : impossible de charger les scores')
print(str(e))
def save_scores(self):
try:
f = open('./mattermost_bot/plugins/mlpl/scores', 'w+')
for name, score in self.scores.items():
f.write('{}:{}:\n'.format(name, score))
f.close()
print('scores enrizistrés')
except IOError:
print('err : impossible de sauvegarder les scores')
def award_player(self, name, score, mlpl=False):
points = Game.POINTS_PER_SCORE[score - 1]
if mlpl:
points += Game.MLPL_BONUS
if name in self.scores:
self.scores[name] += points
else:
self.scores[name] = points
return points
def prepare_next_round(self):
message = self.get_random_message('prepare_next_round')
self.send_message(message)
self.status = Game.START_ROUND
self.current_best_proposition = ()
self.delayed_action(2, self.start_round)
def delayed_action(self, delay, action, args=[]):
timer = Timer(delay, action, args)
timer.start()
self.timers.append(timer)
def handle_proposition(self, message):
proposition = message.get_message()
sender = message.get_username()
if not self.word_exists(proposition):
return
if not self.is_word_made_of_current_letters(proposition):
return
score = len(proposition)
if self.current_best_words:
best_len = len(self.current_best_words[0])
if score == best_len:
message = self.get_random_message('best_proposition')
self.send_message(message.format(sender, score))
self.current_best_proposition = (sender, score)
self.clear_timers()
self.end_round(mlpl=True)
if not self.current_best_proposition:
message = self.get_random_message('good_proposition')
self.send_message(message.format(sender, score))
self.current_best_proposition = (sender, score)
else:
last_score = self.current_best_proposition[1]
last_sender = self.current_best_proposition[0]
if last_score < score:
if last_sender == sender:
message = self.get_random_message('better_auto_proposition')
self.send_message(message.format(sender, score))
else:
message = self.get_random_message('better_proposition')
self.send_message(message.format(sender, last_sender, score))
self.current_best_proposition = (sender, score)
def show_scores(self):
if not self.scores:
self.send_message('pa de score encor')
return
self.send_message(
'les scores : \n{}'.format(
'\n'.join(' {} : {}'.format(n, s) for n, s in self.scores.items())
)
)
def load_dico(self):
self.dico = []
try:
f = open('./mattermost_bot/plugins/mlpl/dico.txt', 'r')
for line in f:
cleaned_line = line.replace('\n', '').replace('\r', '').lower()
self.dico.append(cleaned_line)
f.close()
except IOError:
print('err : dico pas chargeaaaable')
print('dico sarzé')
def get_dico(self):
return self.dico
def word_exists(self, word):
return word.lower() in self.get_dico()
def find_best_words(self, letters):
best_words = []
for word in self.get_dico():
word_ok = self.is_word_made_of_letters(word, letters)
if word_ok:
word_len = len(word)
if best_words:
best_word_len = len(best_words[0])
if word_len == best_word_len:
best_words.append(word)
if word_len > best_word_len:
best_words = [word]
else:
best_words = [word]
return best_words
def get_random_message(self, message_category):
messages = Game.MESSAGES[message_category]
return messages[random.randint(0, len(messages) - 1)]
def clear_timers(self):
for timer in self.timers:
timer.cancel()
def is_word_made_of_letters(self, proposition, letters):
word_ok = True
check_letters = letters[:]
for letter in proposition.lower():
if letter not in check_letters:
word_ok = False
break
check_letters.remove(letter)
return word_ok
def is_word_made_of_current_letters(self, proposition):
return self.is_word_made_of_letters(proposition, self.current_letters)
|
mit
| 1,998,505,489,977,923,300
| 28.88253
| 92
| 0.519605
| false
| 3.679896
| false
| false
| false
|
aveao/AveBot
|
cogs/stockstream.py
|
1
|
1897
|
import discord
from discord.ext import commands
import secrets
class Stockstream:
def __init__(self, bot):
self.bot = bot
@commands.command()
async def copypasta(self, ctx, ticker: str):
"""Generates a copypasta for StockStream using the given ticker."""
copypasta_list = ["Kreygasm MUST Kreygasm BUY Kreygasm {} Kreygasm THIS Kreygasm ROUND Kreygasm",
"FutureMan BUY FutureMan {} FutureMan FOR FutureMan A FutureMan BRIGHTER FutureMan FUTURE FutureMan",
"Clappy Lemme buy a {0} before I send you a {0} Clappy",
"GivePLZ TRAIN TO PROFIT TOWN TakeNRG BUY {}! GivePLZ BUY {} TakeNRG",
"PogChamp {} PogChamp IS PogChamp OUR PogChamp LAST PogChamp HOPE PogChamp"]
to_post = f"Copypasta ready: `{secrets.choice(copypasta_list).format(ticker.upper())}`"
await ctx.send(to_post)
@commands.command()
async def copypastasell(self, ctx, ticker: str):
"""Generates a copypasta for StockStream using the given ticker."""
copypasta_list = ["Kreygasm MUST Kreygasm SELL Kreygasm {} Kreygasm THIS Kreygasm ROUND Kreygasm",
"Kreygasm TIME Kreygasm TO Kreygasm CASH Kreygasm IN Kreygasm {} Kreygasm",
"FutureMan SELL FutureMan {} FutureMan FOR FutureMan A FutureMan BRIGHTER FutureMan FUTURE FutureMan",
"Clappy Lemme sell a {0} before I send you a {0} Clappy",
"GivePLZ TRAIN TO PROFIT TOWN TakeNRG SELL {}! GivePLZ SELL {} TakeNRG",
"SELLING PogChamp {} PogChamp IS PogChamp OUR PogChamp LAST PogChamp HOPE PogChamp"]
to_post = f"Copypasta ready: `{secrets.choice(copypasta_list).format(ticker.upper())}`"
await ctx.send(to_post)
def setup(bot):
bot.add_cog(Stockstream(bot))
|
mit
| 6,933,392,645,307,308,000
| 53.2
| 128
| 0.627306
| false
| 3.345679
| false
| false
| false
|
ttrifonov/horizon
|
horizon/horizon/dashboards/nova/access_and_security/security_groups/tests.py
|
1
|
10057
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import http
from django.conf import settings
from django.core.urlresolvers import reverse
from novaclient import exceptions as novaclient_exceptions
from novaclient.v1_1 import security_group_rules as nova_rules
from mox import IsA
from horizon import api
from horizon import test
from .tables import SecurityGroupsTable, RulesTable
SECGROUP_ID = '2'
INDEX_URL = reverse('horizon:nova:access_and_security:index')
SG_CREATE_URL = \
reverse('horizon:nova:access_and_security:security_groups:create')
SG_EDIT_RULE_URL = \
reverse('horizon:nova:access_and_security:security_groups:edit_rules',
args=[SECGROUP_ID])
def strip_absolute_base(uri):
return uri.split(settings.TESTSERVER, 1)[-1]
class SecurityGroupsViewTests(test.BaseViewTests):
def setUp(self):
super(SecurityGroupsViewTests, self).setUp()
sg1 = api.SecurityGroup(None)
sg1.id = 1
sg1.name = 'default'
sg2 = api.SecurityGroup(None)
sg2.id = 2
sg2.name = 'group_2'
rule = {'id': 1,
'ip_protocol': u"tcp",
'from_port': "80",
'to_port': "80",
'parent_group_id': "2",
'ip_range': {'cidr': "0.0.0.0/32"}}
manager = nova_rules.SecurityGroupRuleManager
rule_obj = nova_rules.SecurityGroupRule(manager, rule)
self.rules = [rule_obj]
sg1.rules = self.rules
sg2.rules = self.rules
self.security_groups = (sg1, sg2)
def test_create_security_groups_get(self):
res = self.client.get(SG_CREATE_URL)
self.assertTemplateUsed(res,
'nova/access_and_security/security_groups/create.html')
def test_create_security_groups_post(self):
SECGROUP_NAME = 'fakegroup'
SECGROUP_DESC = 'fakegroup_desc'
new_group = self.mox.CreateMock(api.SecurityGroup)
new_group.name = SECGROUP_NAME
formData = {'method': 'CreateGroup',
'tenant_id': self.TEST_TENANT,
'name': SECGROUP_NAME,
'description': SECGROUP_DESC,
}
self.mox.StubOutWithMock(api, 'security_group_create')
api.security_group_create(IsA(http.HttpRequest),
SECGROUP_NAME, SECGROUP_DESC).AndReturn(new_group)
self.mox.ReplayAll()
res = self.client.post(SG_CREATE_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_create_security_groups_post_exception(self):
SECGROUP_NAME = 'fakegroup'
SECGROUP_DESC = 'fakegroup_desc'
exception = novaclient_exceptions.ClientException('ClientException',
message='ClientException')
formData = {'method': 'CreateGroup',
'tenant_id': self.TEST_TENANT,
'name': SECGROUP_NAME,
'description': SECGROUP_DESC,
}
self.mox.StubOutWithMock(api, 'security_group_create')
api.security_group_create(IsA(http.HttpRequest),
SECGROUP_NAME, SECGROUP_DESC).AndRaise(exception)
self.mox.ReplayAll()
res = self.client.post(SG_CREATE_URL, formData)
self.assertTemplateUsed(res,
'nova/access_and_security/security_groups/create.html')
def test_edit_rules_get(self):
self.mox.StubOutWithMock(api, 'security_group_get')
api.security_group_get(IsA(http.HttpRequest), SECGROUP_ID).AndReturn(
self.security_groups[1])
self.mox.ReplayAll()
res = self.client.get(SG_EDIT_RULE_URL)
self.assertTemplateUsed(res,
'nova/access_and_security/security_groups/edit_rules.html')
self.assertItemsEqual(res.context['security_group'].name,
self.security_groups[1].name)
def test_edit_rules_get_exception(self):
exception = novaclient_exceptions.ClientException('ClientException',
message='ClientException')
self.mox.StubOutWithMock(api, 'security_group_get')
api.security_group_get(IsA(http.HttpRequest), SECGROUP_ID) \
.AndRaise(exception)
self.mox.ReplayAll()
res = self.client.get(SG_EDIT_RULE_URL)
self.assertRedirects(res, INDEX_URL)
def test_edit_rules_add_rule(self):
RULE_ID = '1'
FROM_PORT = '-1'
TO_PORT = '-1'
IP_PROTOCOL = 'icmp'
CIDR = '0.0.0.0/0'
new_rule = self.mox.CreateMock(api.SecurityGroup)
new_rule.from_port = FROM_PORT
new_rule.to_port = TO_PORT
new_rule.ip_protocol = IP_PROTOCOL
new_rule.cidr = CIDR
new_rule.security_group_id = SECGROUP_ID
new_rule.id = RULE_ID
formData = {'method': 'AddRule',
'tenant_id': self.TEST_TENANT,
'security_group_id': SECGROUP_ID,
'from_port': FROM_PORT,
'to_port': TO_PORT,
'ip_protocol': IP_PROTOCOL,
'cidr': CIDR}
self.mox.StubOutWithMock(api, 'security_group_rule_create')
api.security_group_rule_create(IsA(http.HttpRequest),
SECGROUP_ID, IP_PROTOCOL, FROM_PORT, TO_PORT, CIDR)\
.AndReturn(new_rule)
self.mox.ReplayAll()
res = self.client.post(SG_EDIT_RULE_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_edit_rules_add_rule_exception(self):
exception = novaclient_exceptions.ClientException('ClientException',
message='ClientException')
FROM_PORT = '-1'
TO_PORT = '-1'
IP_PROTOCOL = 'icmp'
CIDR = '0.0.0.0/0'
formData = {'method': 'AddRule',
'tenant_id': self.TEST_TENANT,
'security_group_id': SECGROUP_ID,
'from_port': FROM_PORT,
'to_port': TO_PORT,
'ip_protocol': IP_PROTOCOL,
'cidr': CIDR}
self.mox.StubOutWithMock(api, 'security_group_rule_create')
api.security_group_rule_create(IsA(http.HttpRequest),
SECGROUP_ID, IP_PROTOCOL, FROM_PORT,
TO_PORT, CIDR).AndRaise(exception)
self.mox.ReplayAll()
res = self.client.post(SG_EDIT_RULE_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_edit_rules_delete_rule(self):
RULE_ID = 1
self.mox.StubOutWithMock(api, 'security_group_rule_delete')
api.security_group_rule_delete(IsA(http.HttpRequest), RULE_ID)
self.mox.ReplayAll()
form_data = {"action": "rules__delete__%s" % RULE_ID}
req = self.factory.post(SG_EDIT_RULE_URL, form_data)
table = RulesTable(req, self.rules)
handled = table.maybe_handle()
self.assertEqual(strip_absolute_base(handled['location']),
INDEX_URL)
def test_edit_rules_delete_rule_exception(self):
RULE_ID = 1
self.mox.StubOutWithMock(api, 'security_group_rule_delete')
exception = novaclient_exceptions.ClientException('ClientException',
message='ClientException')
api.security_group_rule_delete(IsA(http.HttpRequest), RULE_ID) \
.AndRaise(exception)
self.mox.ReplayAll()
form_data = {"action": "rules__delete__%s" % RULE_ID}
req = self.factory.post(SG_EDIT_RULE_URL, form_data)
table = RulesTable(req, self.rules)
handled = table.maybe_handle()
self.assertEqual(strip_absolute_base(handled['location']),
INDEX_URL)
def test_delete_group(self):
self.mox.StubOutWithMock(api, 'security_group_delete')
api.security_group_delete(IsA(http.HttpRequest), 2)
self.mox.ReplayAll()
form_data = {"action": "security_groups__delete__%s" % '2'}
req = self.factory.post(INDEX_URL, form_data)
table = SecurityGroupsTable(req, self.security_groups)
handled = table.maybe_handle()
self.assertEqual(strip_absolute_base(handled['location']),
INDEX_URL)
def test_delete_group_exception(self):
self.mox.StubOutWithMock(api, 'security_group_delete')
exception = novaclient_exceptions.ClientException('ClientException',
message='ClientException')
api.security_group_delete(IsA(http.HttpRequest), 2).\
AndRaise(exception)
self.mox.ReplayAll()
form_data = {"action": "security_groups__delete__%s" % '2'}
req = self.factory.post(INDEX_URL, form_data)
table = SecurityGroupsTable(req, self.security_groups)
handled = table.maybe_handle()
self.assertEqual(strip_absolute_base(handled['location']),
INDEX_URL)
|
apache-2.0
| -533,934,509,314,937,400
| 35.046595
| 79
| 0.586159
| false
| 3.95167
| true
| false
| false
|
zaina/nova
|
nova/virt/libvirt/volume.py
|
1
|
68849
|
# Copyright 2011 OpenStack Foundation
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume drivers for libvirt."""
import errno
import glob
import os
import platform
import re
import time
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import strutils
import six
from six.moves import urllib
import six.moves.urllib.parse as urlparse
from nova.compute import arch
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import paths
from nova.storage import linuxscsi
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import quobyte
from nova.virt.libvirt import remotefs
from nova.virt.libvirt import utils as libvirt_utils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.IntOpt('num_iscsi_scan_tries',
default=5,
help='Number of times to rescan iSCSI target to find volume'),
cfg.IntOpt('num_iser_scan_tries',
default=5,
help='Number of times to rescan iSER target to find volume'),
cfg.StrOpt('rbd_user',
help='The RADOS client name for accessing rbd volumes'),
cfg.StrOpt('rbd_secret_uuid',
help='The libvirt UUID of the secret for the rbd_user'
'volumes'),
cfg.StrOpt('nfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the NFS volume is mounted on the'
' compute node'),
cfg.StrOpt('nfs_mount_options',
help='Mount options passed to the NFS client. See section '
'of the nfs man page for details'),
cfg.StrOpt('smbfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the SMBFS shares are mounted on the '
'compute node'),
cfg.StrOpt('smbfs_mount_options',
default='',
help='Mount options passed to the SMBFS client. See '
'mount.cifs man page for details. Note that the '
'libvirt-qemu uid and gid must be specified.'),
cfg.IntOpt('num_aoe_discover_tries',
default=3,
help='Number of times to rediscover AoE target to find volume'),
cfg.StrOpt('glusterfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the glusterfs volume is mounted on the '
'compute node'),
cfg.BoolOpt('iscsi_use_multipath',
default=False,
help='Use multipath connection of the iSCSI volume'),
cfg.BoolOpt('iser_use_multipath',
default=False,
help='Use multipath connection of the iSER volume'),
cfg.StrOpt('scality_sofs_config',
help='Path or URL to Scality SOFS configuration file'),
cfg.StrOpt('scality_sofs_mount_point',
default='$state_path/scality',
help='Base dir where Scality SOFS shall be mounted'),
cfg.ListOpt('qemu_allowed_storage_drivers',
default=[],
help='Protocols listed here will be accessed directly '
'from QEMU. Currently supported protocols: [gluster]'),
cfg.StrOpt('quobyte_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the Quobyte volume is mounted on the '
'compute node'),
cfg.StrOpt('quobyte_client_cfg',
help='Path to a Quobyte Client configuration file.'),
cfg.StrOpt('iscsi_iface',
deprecated_name='iscsi_transport',
help='The iSCSI transport iface to use to connect to target in '
'case offload support is desired. Default format is of '
'the form <transport_name>.<hwaddress> where '
'<transport_name> is one of (be2iscsi, bnx2i, cxgb3i, '
'cxgb4i, qla4xxx, ocs) and <hwadress> is the MAC address '
'of the interface and can be generated via the '
'iscsiadm -m iface command. Do not confuse the '
'iscsi_iface parameter to be provided here with the '
'actual transport name.'),
# iser is also supported, but use LibvirtISERVolumeDriver
# instead
]
CONF = cfg.CONF
CONF.register_opts(volume_opts, 'libvirt')
class LibvirtBaseVolumeDriver(object):
"""Base class for volume drivers."""
def __init__(self, connection, is_block_dev):
self.connection = connection
self.is_block_dev = is_block_dev
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = vconfig.LibvirtConfigGuestDisk()
conf.driver_name = libvirt_utils.pick_disk_driver_name(
self.connection._host.get_version(),
self.is_block_dev
)
conf.source_device = disk_info['type']
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
# Support for block size tuning
data = {}
if 'data' in connection_info:
data = connection_info['data']
if 'logical_block_size' in data:
conf.logical_block_size = data['logical_block_size']
if 'physical_block_size' in data:
conf.physical_block_size = data['physical_block_size']
# Extract rate_limit control parameters
if 'qos_specs' in data and data['qos_specs']:
tune_opts = ['total_bytes_sec', 'read_bytes_sec',
'write_bytes_sec', 'total_iops_sec',
'read_iops_sec', 'write_iops_sec']
specs = data['qos_specs']
if isinstance(specs, dict):
for k, v in six.iteritems(specs):
if k in tune_opts:
new_key = 'disk_' + k
setattr(conf, new_key, v)
else:
LOG.warn(_LW('Unknown content in connection_info/'
'qos_specs: %s'), specs)
# Extract access_mode control parameters
if 'access_mode' in data and data['access_mode']:
access_mode = data['access_mode']
if access_mode in ('ro', 'rw'):
conf.readonly = access_mode == 'ro'
else:
LOG.error(_LE('Unknown content in '
'connection_info/access_mode: %s'),
access_mode)
raise exception.InvalidVolumeAccessMode(
access_mode=access_mode)
return conf
def _get_secret_uuid(self, conf, password=None):
secret = self.connection._host.find_secret(conf.source_protocol,
conf.source_name)
if secret is None:
secret = self.connection._host.create_secret(conf.source_protocol,
conf.source_name,
password)
return secret.UUIDString()
def _delete_secret_by_name(self, connection_info):
source_protocol = connection_info['driver_volume_type']
netdisk_properties = connection_info['data']
if source_protocol == 'rbd':
return
elif source_protocol == 'iscsi':
usage_type = 'iscsi'
usage_name = ("%(target_iqn)s/%(target_lun)s" %
netdisk_properties)
self.connection._host.delete_secret(usage_type, usage_name)
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
pass
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
pass
class LibvirtVolumeDriver(LibvirtBaseVolumeDriver):
"""Class for volumes backed by local file."""
def __init__(self, connection):
super(LibvirtVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
class LibvirtFakeVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach fake volumes to libvirt."""
def __init__(self, connection):
super(LibvirtFakeVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtFakeVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "network"
conf.source_protocol = "fake"
conf.source_name = "fake"
return conf
class LibvirtNetVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtNetVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNetVolumeDriver,
self).get_config(connection_info, disk_info)
netdisk_properties = connection_info['data']
conf.source_type = "network"
conf.source_protocol = connection_info['driver_volume_type']
conf.source_name = netdisk_properties.get('name')
conf.source_hosts = netdisk_properties.get('hosts', [])
conf.source_ports = netdisk_properties.get('ports', [])
auth_enabled = netdisk_properties.get('auth_enabled')
if (conf.source_protocol == 'rbd' and
CONF.libvirt.rbd_secret_uuid):
conf.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid
auth_enabled = True # Force authentication locally
if CONF.libvirt.rbd_user:
conf.auth_username = CONF.libvirt.rbd_user
if conf.source_protocol == 'iscsi':
try:
conf.source_name = ("%(target_iqn)s/%(target_lun)s" %
netdisk_properties)
target_portal = netdisk_properties['target_portal']
except KeyError:
raise exception.NovaException(_("Invalid volume source data"))
ip, port = utils.parse_server_string(target_portal)
if ip == '' or port == '':
raise exception.NovaException(_("Invalid target_lun"))
conf.source_hosts = [ip]
conf.source_ports = [port]
if netdisk_properties.get('auth_method') == 'CHAP':
auth_enabled = True
conf.auth_secret_type = 'iscsi'
password = netdisk_properties.get('auth_password')
conf.auth_secret_uuid = self._get_secret_uuid(conf, password)
if auth_enabled:
conf.auth_username = (conf.auth_username or
netdisk_properties['auth_username'])
conf.auth_secret_type = (conf.auth_secret_type or
netdisk_properties['secret_type'])
conf.auth_secret_uuid = (conf.auth_secret_uuid or
netdisk_properties['secret_uuid'])
return conf
def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
super(LibvirtNetVolumeDriver,
self).disconnect_volume(connection_info, disk_dev)
self._delete_secret_by_name(connection_info)
class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
supported_transports = ['be2iscsi', 'bnx2i', 'cxgb3i',
'cxgb4i', 'qla4xxx', 'ocs']
def __init__(self, connection):
super(LibvirtISCSIVolumeDriver, self).__init__(connection,
is_block_dev=True)
self.num_scan_tries = CONF.libvirt.num_iscsi_scan_tries
self.use_multipath = CONF.libvirt.iscsi_use_multipath
if CONF.libvirt.iscsi_iface:
self.transport = CONF.libvirt.iscsi_iface
else:
self.transport = 'default'
def _get_transport(self):
if self._validate_transport(self.transport):
return self.transport
else:
return 'default'
def _validate_transport(self, transport_iface):
"""Check that given iscsi_iface uses only supported transports
Accepted transport names for provided iface param are
be2iscsi, bnx2i, cxgb3i, cxgb4i, qla4xxx and ocs. iSER uses it's
own separate driver. Note the difference between transport and
iface; unlike iscsi_tcp/iser, this is not one and the same for
offloaded transports, where the default format is
transport_name.hwaddress
"""
# We can support iser here as well, but currently reject it as the
# separate iser driver has not yet been deprecated.
if transport_iface == 'default':
return True
# Will return (6) if iscsi_iface file was not found, or (2) if iscsid
# could not be contacted
out = self._run_iscsiadm_bare(['-m',
'iface',
'-I',
transport_iface],
check_exit_code=[0, 2, 6])[0] or ""
LOG.debug("iscsiadm %(iface)s configuration: stdout=%(out)s",
{'iface': transport_iface, 'out': out})
for data in [line.split() for line in out.splitlines()]:
if data[0] == 'iface.transport_name':
if data[2] in self.supported_transports:
return True
LOG.warn(_LW("No useable transport found for iscsi iface %s. "
"Falling back to default transport"),
transport_iface)
return False
def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('iscsiadm', '-m', 'node', '-T',
iscsi_properties['target_iqn'],
'-p', iscsi_properties['target_portal'],
*iscsi_command, run_as_root=True,
check_exit_code=check_exit_code)
msg = ('iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s' %
{'command': iscsi_command, 'out': out, 'err': err})
# NOTE(bpokorny): iscsi_command can contain passwords so we need to
# sanitize the password in the message.
LOG.debug(strutils.mask_password(msg))
return (out, err)
def _iscsiadm_update(self, iscsi_properties, property_key, property_value,
**kwargs):
iscsi_command = ('--op', 'update', '-n', property_key,
'-v', property_value)
return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs)
def _get_target_portals_from_iscsiadm_output(self, output):
# return both portals and iqns
#
# as we are parsing a command line utility, allow for the
# possibility that additional debug data is spewed in the
# stream, and only grab actual ip / iqn lines.
targets = []
for data in [line.split() for line in output.splitlines()]:
if len(data) == 2 and data[1].startswith('iqn.'):
targets.append(data)
return targets
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtISCSIVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
@utils.synchronized('connect_volume')
def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
iscsi_properties = connection_info['data']
# multipath installed, discovering other targets if available
# multipath should be configured on the nova-compute node,
# in order to fit storage vendor
if self.use_multipath:
out = self._run_iscsiadm_discover(iscsi_properties)
# There are two types of iSCSI multipath devices. One which shares
# the same iqn between multiple portals, and the other which use
# different iqns on different portals. Try to identify the type by
# checking the iscsiadm output if the iqn is used by multiple
# portals. If it is, it's the former, so use the supplied iqn.
# Otherwise, it's the latter, so try the ip,iqn combinations to
# find the targets which constitutes the multipath device.
ips_iqns = self._get_target_portals_from_iscsiadm_output(out)
same_portal = False
all_portals = set()
match_portals = set()
for ip, iqn in ips_iqns:
all_portals.add(ip)
if iqn == iscsi_properties['target_iqn']:
match_portals.add(ip)
if len(all_portals) == len(match_portals):
same_portal = True
for ip, iqn in ips_iqns:
props = iscsi_properties.copy()
props['target_portal'] = ip.split(",")[0]
if not same_portal:
props['target_iqn'] = iqn
self._connect_to_iscsi_portal(props)
self._rescan_iscsi()
else:
self._connect_to_iscsi_portal(iscsi_properties)
# Detect new/resized LUNs for existing sessions
self._run_iscsiadm(iscsi_properties, ("--rescan",))
host_device = self._get_host_device(iscsi_properties)
# The /dev/disk/by-path/... node is not always present immediately
# TODO(justinsb): This retry-with-delay is a pattern, move to utils?
tries = 0
disk_dev = disk_info['dev']
# Check host_device only when transport is used, since otherwise it is
# directly derived from properties. Only needed for unit tests
while ((self._get_transport() != "default" and not host_device)
or not os.path.exists(host_device)):
if tries >= self.num_scan_tries:
raise exception.NovaException(_("iSCSI device not found at %s")
% (host_device))
LOG.warn(_LW("ISCSI volume not yet found at: %(disk_dev)s. "
"Will rescan & retry. Try number: %(tries)s"),
{'disk_dev': disk_dev, 'tries': tries})
# The rescan isn't documented as being necessary(?), but it helps
self._run_iscsiadm(iscsi_properties, ("--rescan",))
# For offloaded open-iscsi transports, host_device cannot be
# guessed unlike iscsi_tcp where it can be obtained from
# properties, so try and get it again.
if not host_device and self._get_transport() != "default":
host_device = self._get_host_device(iscsi_properties)
tries = tries + 1
if not host_device or not os.path.exists(host_device):
time.sleep(tries ** 2)
if tries != 0:
LOG.debug("Found iSCSI node %(disk_dev)s "
"(after %(tries)s rescans)",
{'disk_dev': disk_dev,
'tries': tries})
if self.use_multipath:
# we use the multipath device instead of the single path device
self._rescan_multipath()
multipath_device = self._get_multipath_device_name(host_device)
if multipath_device is not None:
host_device = multipath_device
connection_info['data']['multipath_id'] = \
multipath_device.split('/')[-1]
connection_info['data']['device_path'] = host_device
def _run_iscsiadm_discover(self, iscsi_properties):
def run_iscsiadm_update_discoverydb():
return utils.execute(
'iscsiadm',
'-m', 'discoverydb',
'-t', 'sendtargets',
'-p', iscsi_properties['target_portal'],
'--op', 'update',
'-n', "discovery.sendtargets.auth.authmethod",
'-v', iscsi_properties['discovery_auth_method'],
'-n', "discovery.sendtargets.auth.username",
'-v', iscsi_properties['discovery_auth_username'],
'-n', "discovery.sendtargets.auth.password",
'-v', iscsi_properties['discovery_auth_password'],
run_as_root=True)
out = None
if iscsi_properties.get('discovery_auth_method'):
try:
run_iscsiadm_update_discoverydb()
except processutils.ProcessExecutionError as exc:
# iscsiadm returns 6 for "db record not found"
if exc.exit_code == 6:
(out, err) = utils.execute(
'iscsiadm',
'-m', 'discoverydb',
'-t', 'sendtargets',
'-p', iscsi_properties['target_portal'],
'--op', 'new',
run_as_root=True)
run_iscsiadm_update_discoverydb()
else:
raise
out = self._run_iscsiadm_bare(
['-m',
'discoverydb',
'-t',
'sendtargets',
'-p',
iscsi_properties['target_portal'],
'--discover'],
check_exit_code=[0, 255])[0] or ""
else:
out = self._run_iscsiadm_bare(
['-m',
'discovery',
'-t',
'sendtargets',
'-p',
iscsi_properties['target_portal']],
check_exit_code=[0, 255])[0] or ""
return out
@utils.synchronized('connect_volume')
def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
iscsi_properties = connection_info['data']
host_device = self._get_host_device(iscsi_properties)
multipath_device = None
if self.use_multipath:
if 'multipath_id' in iscsi_properties:
multipath_device = ('/dev/mapper/%s' %
iscsi_properties['multipath_id'])
else:
multipath_device = self._get_multipath_device_name(host_device)
super(LibvirtISCSIVolumeDriver,
self).disconnect_volume(connection_info, disk_dev)
if self.use_multipath and multipath_device:
return self._disconnect_volume_multipath_iscsi(iscsi_properties,
multipath_device)
# NOTE(vish): Only disconnect from the target if no luns from the
# target are in use.
device_byname = ("ip-%s-iscsi-%s-lun-" %
(iscsi_properties['target_portal'],
iscsi_properties['target_iqn']))
devices = self.connection._get_all_block_devices()
devices = [dev for dev in devices if (device_byname in dev
and
dev.startswith(
'/dev/disk/by-path/'))]
if not devices:
self._disconnect_from_iscsi_portal(iscsi_properties)
elif host_device not in devices:
# Delete device if LUN is not in use by another instance
self._delete_device(host_device)
def _delete_device(self, device_path):
device_name = os.path.basename(os.path.realpath(device_path))
delete_control = '/sys/block/' + device_name + '/device/delete'
if os.path.exists(delete_control):
# Copy '1' from stdin to the device delete control file
utils.execute('cp', '/dev/stdin', delete_control,
process_input='1', run_as_root=True)
else:
LOG.warn(_LW("Unable to delete volume device %s"), device_name)
def _remove_multipath_device_descriptor(self, disk_descriptor):
disk_descriptor = disk_descriptor.replace('/dev/mapper/', '')
try:
self._run_multipath(['-f', disk_descriptor],
check_exit_code=[0, 1])
except processutils.ProcessExecutionError as exc:
# Because not all cinder drivers need to remove the dev mapper,
# here just logs a warning to avoid affecting those drivers in
# exceptional cases.
LOG.warn(_LW('Failed to remove multipath device descriptor '
'%(dev_mapper)s. Exception message: %(msg)s')
% {'dev_mapper': disk_descriptor,
'msg': exc.message})
def _disconnect_volume_multipath_iscsi(self, iscsi_properties,
multipath_device):
self._rescan_multipath()
block_devices = self.connection._get_all_block_devices()
devices = []
for dev in block_devices:
if "/mapper/" in dev:
devices.append(dev)
else:
mpdev = self._get_multipath_device_name(dev)
if mpdev:
devices.append(mpdev)
# Do a discovery to find all targets.
# Targets for multiple paths for the same multipath device
# may not be the same.
out = self._run_iscsiadm_discover(iscsi_properties)
# Extract targets for the current multipath device.
ips_iqns = []
entries = self._get_iscsi_devices()
for ip, iqn in self._get_target_portals_from_iscsiadm_output(out):
ip_iqn = "%s-iscsi-%s" % (ip.split(",")[0], iqn)
for entry in entries:
entry_ip_iqn = entry.split("-lun-")[0]
if entry_ip_iqn[:3] == "ip-":
entry_ip_iqn = entry_ip_iqn[3:]
elif entry_ip_iqn[:4] == "pci-":
# Look at an offset of len('pci-0000:00:00.0')
offset = entry_ip_iqn.find("ip-", 16, 21)
entry_ip_iqn = entry_ip_iqn[(offset + 3):]
if (ip_iqn != entry_ip_iqn):
continue
entry_real_path = os.path.realpath("/dev/disk/by-path/%s" %
entry)
entry_mpdev = self._get_multipath_device_name(entry_real_path)
if entry_mpdev == multipath_device:
ips_iqns.append([ip, iqn])
break
if not devices:
# disconnect if no other multipath devices
self._disconnect_mpath(iscsi_properties, ips_iqns)
return
# Get a target for all other multipath devices
other_iqns = [self._get_multipath_iqn(device)
for device in devices]
# Get all the targets for the current multipath device
current_iqns = [iqn for ip, iqn in ips_iqns]
in_use = False
for current in current_iqns:
if current in other_iqns:
in_use = True
break
# If no other multipath device attached has the same iqn
# as the current device
if not in_use:
# disconnect if no other multipath devices with same iqn
self._disconnect_mpath(iscsi_properties, ips_iqns)
return
elif multipath_device not in devices:
# delete the devices associated w/ the unused multipath
self._delete_mpath(iscsi_properties, multipath_device, ips_iqns)
# else do not disconnect iscsi portals,
# as they are used for other luns,
# just remove multipath mapping device descriptor
self._remove_multipath_device_descriptor(multipath_device)
return
def _connect_to_iscsi_portal(self, iscsi_properties):
# NOTE(vish): If we are on the same host as nova volume, the
# discovery makes the target so we don't need to
# run --op new. Therefore, we check to see if the
# target exists, and if we get 255 (Not Found), then
# we run --op new. This will also happen if another
# volume is using the same target.
try:
self._run_iscsiadm(iscsi_properties, ())
except processutils.ProcessExecutionError as exc:
# iscsiadm returns 21 for "No records found" after version 2.0-871
if exc.exit_code in [21, 255]:
self._reconnect(iscsi_properties)
else:
raise
if iscsi_properties.get('auth_method'):
self._iscsiadm_update(iscsi_properties,
"node.session.auth.authmethod",
iscsi_properties['auth_method'])
self._iscsiadm_update(iscsi_properties,
"node.session.auth.username",
iscsi_properties['auth_username'])
self._iscsiadm_update(iscsi_properties,
"node.session.auth.password",
iscsi_properties['auth_password'])
# duplicate logins crash iscsiadm after load,
# so we scan active sessions to see if the node is logged in.
out = self._run_iscsiadm_bare(["-m", "session"],
run_as_root=True,
check_exit_code=[0, 1, 21])[0] or ""
portals = [{'portal': p.split(" ")[2], 'iqn': p.split(" ")[3]}
for p in out.splitlines() if p.startswith("tcp:")]
stripped_portal = iscsi_properties['target_portal'].split(",")[0]
if len(portals) == 0 or len([s for s in portals
if stripped_portal ==
s['portal'].split(",")[0]
and
s['iqn'] ==
iscsi_properties['target_iqn']]
) == 0:
try:
self._run_iscsiadm(iscsi_properties,
("--login",),
check_exit_code=[0, 255])
except processutils.ProcessExecutionError as err:
# as this might be one of many paths,
# only set successful logins to startup automatically
if err.exit_code in [15]:
self._iscsiadm_update(iscsi_properties,
"node.startup",
"automatic")
return
self._iscsiadm_update(iscsi_properties,
"node.startup",
"automatic")
def _disconnect_from_iscsi_portal(self, iscsi_properties):
self._iscsiadm_update(iscsi_properties, "node.startup", "manual",
check_exit_code=[0, 21, 255])
self._run_iscsiadm(iscsi_properties, ("--logout",),
check_exit_code=[0, 21, 255])
self._run_iscsiadm(iscsi_properties, ('--op', 'delete'),
check_exit_code=[0, 21, 255])
def _get_multipath_device_name(self, single_path_device):
device = os.path.realpath(single_path_device)
out = self._run_multipath(['-ll',
device],
check_exit_code=[0, 1])[0]
mpath_line = [line for line in out.splitlines()
if "scsi_id" not in line] # ignore udev errors
if len(mpath_line) > 0 and len(mpath_line[0]) > 0:
return "/dev/mapper/%s" % mpath_line[0].split(" ")[0]
return None
def _get_iscsi_devices(self):
try:
devices = list(os.walk('/dev/disk/by-path'))[0][-1]
except IndexError:
return []
iscsi_devs = []
for entry in devices:
if (entry.startswith("ip-") or
(entry.startswith('pci-') and 'ip-' in entry)):
iscsi_devs.append(entry)
return iscsi_devs
def _delete_mpath(self, iscsi_properties, multipath_device, ips_iqns):
entries = self._get_iscsi_devices()
# Loop through ips_iqns to construct all paths
iqn_luns = []
for ip, iqn in ips_iqns:
iqn_lun = '%s-lun-%s' % (iqn,
iscsi_properties.get('target_lun', 0))
iqn_luns.append(iqn_lun)
for dev in ['/dev/disk/by-path/%s' % dev for dev in entries]:
for iqn_lun in iqn_luns:
if iqn_lun in dev:
self._delete_device(dev)
self._rescan_multipath()
def _disconnect_mpath(self, iscsi_properties, ips_iqns):
for ip, iqn in ips_iqns:
props = iscsi_properties.copy()
props['target_portal'] = ip
props['target_iqn'] = iqn
self._disconnect_from_iscsi_portal(props)
self._rescan_multipath()
def _get_multipath_iqn(self, multipath_device):
entries = self._get_iscsi_devices()
for entry in entries:
entry_real_path = os.path.realpath("/dev/disk/by-path/%s" % entry)
entry_multipath = self._get_multipath_device_name(entry_real_path)
if entry_multipath == multipath_device:
return entry.split("iscsi-")[1].split("-lun")[0]
return None
def _run_iscsiadm_bare(self, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('iscsiadm',
*iscsi_command,
run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s",
{'command': iscsi_command, 'out': out, 'err': err})
return (out, err)
def _run_multipath(self, multipath_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('multipath',
*multipath_command,
run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug("multipath %(command)s: stdout=%(out)s stderr=%(err)s",
{'command': multipath_command, 'out': out, 'err': err})
return (out, err)
def _rescan_iscsi(self):
self._run_iscsiadm_bare(('-m', 'node', '--rescan'),
check_exit_code=[0, 1, 21, 255])
self._run_iscsiadm_bare(('-m', 'session', '--rescan'),
check_exit_code=[0, 1, 21, 255])
def _rescan_multipath(self):
self._run_multipath(['-r'], check_exit_code=[0, 1, 21])
def _get_host_device(self, transport_properties):
"""Find device path in devtemfs."""
device = ("ip-%s-iscsi-%s-lun-%s" %
(transport_properties['target_portal'],
transport_properties['target_iqn'],
transport_properties.get('target_lun', 0)))
if self._get_transport() == "default":
return ("/dev/disk/by-path/%s" % device)
else:
host_device = None
look_for_device = glob.glob('/dev/disk/by-path/*%s' % device)
if look_for_device:
host_device = look_for_device[0]
return host_device
def _reconnect(self, iscsi_properties):
# Note: iscsiadm does not support changing iface.iscsi_ifacename
# via --op update, so we do this at creation time
self._run_iscsiadm(iscsi_properties,
('--interface', self._get_transport(),
'--op', 'new'))
class LibvirtISERVolumeDriver(LibvirtISCSIVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtISERVolumeDriver, self).__init__(connection)
self.num_scan_tries = CONF.libvirt.num_iser_scan_tries
self.use_multipath = CONF.libvirt.iser_use_multipath
def _get_transport(self):
return 'iser'
def _get_multipath_iqn(self, multipath_device):
entries = self._get_iscsi_devices()
for entry in entries:
entry_real_path = os.path.realpath("/dev/disk/by-path/%s" % entry)
entry_multipath = self._get_multipath_device_name(entry_real_path)
if entry_multipath == multipath_device:
return entry.split("iser-")[1].split("-lun")[0]
return None
class LibvirtNFSVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for NFS."""
def __init__(self, connection):
"""Create back-end to nfs."""
super(LibvirtNFSVolumeDriver,
self).__init__(connection, is_block_dev=False)
def _get_device_path(self, connection_info):
path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(connection_info['data']['export']))
path = os.path.join(path, connection_info['data']['name'])
return path
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNFSVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = 'file'
conf.source_path = connection_info['data']['device_path']
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
options = connection_info['data'].get('options')
self._ensure_mounted(connection_info['data']['export'], options)
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
export = connection_info['data']['export']
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(export))
try:
utils.execute('umount', mount_path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ('device is busy' in exc.message or
'target is busy' in exc.message):
LOG.debug("The NFS share %s is still in use.", export)
else:
LOG.exception(_LE("Couldn't unmount the NFS share %s"), export)
def _ensure_mounted(self, nfs_export, options=None):
"""@type nfs_export: string
@type options: string
"""
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(nfs_export))
if not libvirt_utils.is_mounted(mount_path, nfs_export):
self._mount_nfs(mount_path, nfs_export, options, ensure=True)
return mount_path
def _mount_nfs(self, mount_path, nfs_share, options=None, ensure=False):
"""Mount nfs export to mount path."""
utils.execute('mkdir', '-p', mount_path)
# Construct the NFS mount command.
nfs_cmd = ['mount', '-t', 'nfs']
if CONF.libvirt.nfs_mount_options is not None:
nfs_cmd.extend(['-o', CONF.libvirt.nfs_mount_options])
if options:
nfs_cmd.extend(options.split(' '))
nfs_cmd.extend([nfs_share, mount_path])
try:
utils.execute(*nfs_cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.message:
LOG.warn(_LW("%s is already mounted"), nfs_share)
else:
raise
class LibvirtSMBFSVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for SMBFS."""
def __init__(self, connection):
super(LibvirtSMBFSVolumeDriver,
self).__init__(connection, is_block_dev=False)
self.username_regex = re.compile(
r"(user(?:name)?)=(?:[^ ,]+\\)?([^ ,]+)")
def _get_device_path(self, connection_info):
smbfs_share = connection_info['data']['export']
mount_path = self._get_mount_path(smbfs_share)
volume_path = os.path.join(mount_path,
connection_info['data']['name'])
return volume_path
def _get_mount_path(self, smbfs_share):
mount_path = os.path.join(CONF.libvirt.smbfs_mount_point_base,
utils.get_hash_str(smbfs_share))
return mount_path
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtSMBFSVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = 'file'
conf.driver_cache = 'writethrough'
conf.source_path = connection_info['data']['device_path']
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume."""
smbfs_share = connection_info['data']['export']
mount_path = self._get_mount_path(smbfs_share)
if not libvirt_utils.is_mounted(mount_path, smbfs_share):
mount_options = self._parse_mount_options(connection_info)
remotefs.mount_share(mount_path, smbfs_share,
export_type='cifs', options=mount_options)
device_path = self._get_device_path(connection_info)
connection_info['data']['device_path'] = device_path
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
smbfs_share = connection_info['data']['export']
mount_path = self._get_mount_path(smbfs_share)
remotefs.unmount_share(mount_path, smbfs_share)
def _parse_mount_options(self, connection_info):
mount_options = " ".join(
[connection_info['data'].get('options') or '',
CONF.libvirt.smbfs_mount_options])
if not self.username_regex.findall(mount_options):
mount_options = mount_options + ' -o username=guest'
else:
# Remove the Domain Name from user name
mount_options = self.username_regex.sub(r'\1=\2', mount_options)
return mount_options.strip(", ").split(' ')
class LibvirtAOEVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach AoE volumes to libvirt."""
def __init__(self, connection):
super(LibvirtAOEVolumeDriver,
self).__init__(connection, is_block_dev=True)
def _aoe_discover(self):
"""Call aoe-discover (aoe-tools) AoE Discover."""
(out, err) = utils.execute('aoe-discover',
run_as_root=True, check_exit_code=0)
return (out, err)
def _aoe_revalidate(self, aoedev):
"""Revalidate the LUN Geometry (When an AoE ID is reused)."""
(out, err) = utils.execute('aoe-revalidate', aoedev,
run_as_root=True, check_exit_code=0)
return (out, err)
def _get_device_path(self, connection_info):
shelf = connection_info['data']['target_shelf']
lun = connection_info['data']['target_lun']
aoedev = 'e%s.%s' % (shelf, lun)
aoedevpath = '/dev/etherd/%s' % (aoedev)
return aoedevpath
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtAOEVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
def connect_volume(self, connection_info, mount_device):
shelf = connection_info['data']['target_shelf']
lun = connection_info['data']['target_lun']
aoedev = 'e%s.%s' % (shelf, lun)
aoedevpath = '/dev/etherd/%s' % (aoedev)
if os.path.exists(aoedevpath):
# NOTE(jbr_): If aoedevpath already exists, revalidate the LUN.
self._aoe_revalidate(aoedev)
else:
# NOTE(jbr_): If aoedevpath does not exist, do a discover.
self._aoe_discover()
# NOTE(jbr_): Device path is not always present immediately
def _wait_for_device_discovery(aoedevpath, mount_device):
tries = self.tries
if os.path.exists(aoedevpath):
raise loopingcall.LoopingCallDone()
if self.tries >= CONF.libvirt.num_aoe_discover_tries:
raise exception.NovaException(_("AoE device not found at %s") %
(aoedevpath))
LOG.warn(_LW("AoE volume not yet found at: %(aoedevpath)s. "
"Try number: %(tries)s"),
{'aoedevpath': aoedevpath, 'tries': tries})
self._aoe_discover()
self.tries = self.tries + 1
self.tries = 0
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_device_discovery, aoedevpath, mount_device)
timer.start(interval=2).wait()
tries = self.tries
if tries != 0:
LOG.debug("Found AoE device %(aoedevpath)s "
"(after %(tries)s rediscover)",
{'aoedevpath': aoedevpath,
'tries': tries})
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
class LibvirtGlusterfsVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for GlusterFS."""
def __init__(self, connection):
"""Create back-end to glusterfs."""
super(LibvirtGlusterfsVolumeDriver,
self).__init__(connection, is_block_dev=False)
def _get_device_path(self, connection_info):
path = os.path.join(CONF.libvirt.glusterfs_mount_point_base,
utils.get_hash_str(connection_info['data']['export']))
path = os.path.join(path, connection_info['data']['name'])
return path
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtGlusterfsVolumeDriver,
self).get_config(connection_info, disk_info)
data = connection_info['data']
if 'gluster' in CONF.libvirt.qemu_allowed_storage_drivers:
vol_name = data['export'].split('/')[1]
source_host = data['export'].split('/')[0][:-1]
conf.source_ports = ['24007']
conf.source_type = 'network'
conf.source_protocol = 'gluster'
conf.source_hosts = [source_host]
conf.source_name = '%s/%s' % (vol_name, data['name'])
else:
conf.source_type = 'file'
conf.source_path = connection_info['data']['device_path']
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def connect_volume(self, connection_info, mount_device):
data = connection_info['data']
if 'gluster' not in CONF.libvirt.qemu_allowed_storage_drivers:
self._ensure_mounted(data['export'], data.get('options'))
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
if 'gluster' in CONF.libvirt.qemu_allowed_storage_drivers:
return
export = connection_info['data']['export']
mount_path = os.path.join(CONF.libvirt.glusterfs_mount_point_base,
utils.get_hash_str(export))
try:
utils.execute('umount', mount_path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if 'target is busy' in exc.message:
LOG.debug("The GlusterFS share %s is still in use.", export)
else:
LOG.exception(_LE("Couldn't unmount the GlusterFS share %s"),
export)
def _ensure_mounted(self, glusterfs_export, options=None):
"""@type glusterfs_export: string
@type options: string
"""
mount_path = os.path.join(CONF.libvirt.glusterfs_mount_point_base,
utils.get_hash_str(glusterfs_export))
if not libvirt_utils.is_mounted(mount_path, glusterfs_export):
self._mount_glusterfs(mount_path, glusterfs_export,
options, ensure=True)
return mount_path
def _mount_glusterfs(self, mount_path, glusterfs_share,
options=None, ensure=False):
"""Mount glusterfs export to mount path."""
utils.execute('mkdir', '-p', mount_path)
gluster_cmd = ['mount', '-t', 'glusterfs']
if options is not None:
gluster_cmd.extend(options.split(' '))
gluster_cmd.extend([glusterfs_share, mount_path])
try:
utils.execute(*gluster_cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.message:
LOG.warn(_LW("%s is already mounted"), glusterfs_share)
else:
raise
class LibvirtFibreChannelVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Fibre Channel Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtFibreChannelVolumeDriver,
self).__init__(connection, is_block_dev=False)
def _get_pci_num(self, hba):
# NOTE(walter-boring)
# device path is in format of
# /sys/devices/pci0000:00/0000:00:03.0/0000:05:00.3/host2/fc_host/host2
# sometimes an extra entry exists before the host2 value
# we always want the value prior to the host2 value
pci_num = None
if hba is not None:
if "device_path" in hba:
index = 0
device_path = hba['device_path'].split('/')
for value in device_path:
if value.startswith('host'):
break
index = index + 1
if index > 0:
pci_num = device_path[index - 1]
return pci_num
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtFibreChannelVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
def _get_lun_string_for_s390(self, lun):
target_lun = 0
if lun < 256:
target_lun = "0x00%02x000000000000" % lun
elif lun <= 0xffffffff:
target_lun = "0x%08x00000000" % lun
return target_lun
def _get_device_file_path_s390(self, pci_num, target_wwn, lun):
"""Returns device file path"""
# NOTE the format of device file paths depends on the system
# architecture. Most architectures use a PCI based format.
# Systems following the S390, or S390x architecture use a format
# which is based upon the inherent channel architecture (ccw).
host_device = ("/dev/disk/by-path/ccw-%s-zfcp-%s:%s" %
(pci_num,
target_wwn,
lun))
return host_device
def _remove_lun_from_s390(self, connection_info):
"""Rempove lun from s390 configuration"""
# If LUN scanning is turned off on systems following the s390, or
# s390x architecture LUNs need to be removed from the configuration
# using the unit_remove call. The unit_remove call needs to be issued
# for each (virtual) HBA and target_port.
fc_properties = connection_info['data']
lun = int(fc_properties.get('target_lun', 0))
target_lun = self._get_lun_string_for_s390(lun)
ports = fc_properties['target_wwn']
for device_num, target_wwn in self._get_possible_devices(ports):
libvirt_utils.perform_unit_remove_for_s390(device_num,
target_wwn,
target_lun)
def _get_possible_devices(self, wwnports):
"""Compute the possible valid fiber channel device options.
:param wwnports: possible wwn addresses. Can either be string
or list of strings.
:returns: list of (pci_id, wwn) tuples
Given one or more wwn (mac addresses for fiber channel) ports
do the matrix math to figure out a set of pci device, wwn
tuples that are potentially valid (they won't all be). This
provides a search space for the device connection.
"""
# the wwn (think mac addresses for fiber channel devices) can
# either be a single value or a list. Normalize it to a list
# for further operations.
wwns = []
if isinstance(wwnports, list):
for wwn in wwnports:
wwns.append(str(wwn))
elif isinstance(wwnports, six.string_types):
wwns.append(str(wwnports))
raw_devices = []
hbas = libvirt_utils.get_fc_hbas_info()
for hba in hbas:
pci_num = self._get_pci_num(hba)
if pci_num is not None:
for wwn in wwns:
target_wwn = "0x%s" % wwn.lower()
raw_devices.append((pci_num, target_wwn))
return raw_devices
@utils.synchronized('connect_volume')
def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
fc_properties = connection_info['data']
mount_device = disk_info["dev"]
possible_devs = self._get_possible_devices(fc_properties['target_wwn'])
# map the raw device possibilities to possible host device paths
host_devices = []
for device in possible_devs:
pci_num, target_wwn = device
if platform.machine() in (arch.S390, arch.S390X):
target_lun = self._get_lun_string_for_s390(
fc_properties.get('target_lun', 0))
host_device = self._get_device_file_path_s390(
pci_num,
target_wwn,
target_lun)
libvirt_utils.perform_unit_add_for_s390(
pci_num, target_wwn, target_lun)
else:
host_device = ("/dev/disk/by-path/pci-%s-fc-%s-lun-%s" %
(pci_num,
target_wwn,
fc_properties.get('target_lun', 0)))
host_devices.append(host_device)
if len(host_devices) == 0:
# this is empty because we don't have any FC HBAs
msg = _("We are unable to locate any Fibre Channel devices")
raise exception.NovaException(msg)
# The /dev/disk/by-path/... node is not always present immediately
# We only need to find the first device. Once we see the first device
# multipath will have any others.
def _wait_for_device_discovery(host_devices, mount_device):
tries = self.tries
for device in host_devices:
LOG.debug("Looking for Fibre Channel dev %(device)s",
{'device': device})
if os.path.exists(device):
self.host_device = device
# get the /dev/sdX device. This is used
# to find the multipath device.
self.device_name = os.path.realpath(device)
raise loopingcall.LoopingCallDone()
if self.tries >= CONF.libvirt.num_iscsi_scan_tries:
msg = _("Fibre Channel device not found.")
raise exception.NovaException(msg)
LOG.warn(_LW("Fibre volume not yet found at: %(mount_device)s. "
"Will rescan & retry. Try number: %(tries)s"),
{'mount_device': mount_device, 'tries': tries})
linuxscsi.rescan_hosts(libvirt_utils.get_fc_hbas_info())
self.tries = self.tries + 1
self.host_device = None
self.device_name = None
self.tries = 0
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_device_discovery, host_devices, mount_device)
timer.start(interval=2).wait()
tries = self.tries
if self.host_device is not None and self.device_name is not None:
LOG.debug("Found Fibre Channel volume %(mount_device)s "
"(after %(tries)s rescans)",
{'mount_device': mount_device,
'tries': tries})
# see if the new drive is part of a multipath
# device. If so, we'll use the multipath device.
mdev_info = linuxscsi.find_multipath_device(self.device_name)
if mdev_info is not None:
LOG.debug("Multipath device discovered %(device)s",
{'device': mdev_info['device']})
device_path = mdev_info['device']
connection_info['data']['device_path'] = device_path
connection_info['data']['devices'] = mdev_info['devices']
connection_info['data']['multipath_id'] = mdev_info['id']
else:
# we didn't find a multipath device.
# so we assume the kernel only sees 1 device
device_path = self.host_device
device_info = linuxscsi.get_device_info(self.device_name)
connection_info['data']['device_path'] = device_path
connection_info['data']['devices'] = [device_info]
@utils.synchronized('connect_volume')
def disconnect_volume(self, connection_info, mount_device):
"""Detach the volume from instance_name."""
super(LibvirtFibreChannelVolumeDriver,
self).disconnect_volume(connection_info, mount_device)
# If this is a multipath device, we need to search again
# and make sure we remove all the devices. Some of them
# might not have shown up at attach time.
if 'multipath_id' in connection_info['data']:
multipath_id = connection_info['data']['multipath_id']
mdev_info = linuxscsi.find_multipath_device(multipath_id)
devices = mdev_info['devices'] if mdev_info else []
LOG.debug("devices to remove = %s", devices)
else:
# only needed when multipath-tools work improperly
devices = connection_info['data'].get('devices', [])
LOG.warn(_LW("multipath-tools probably work improperly. "
"devices to remove = %s.") % devices)
# There may have been more than 1 device mounted
# by the kernel for this volume. We have to remove
# all of them
for device in devices:
linuxscsi.remove_device(device)
if platform.machine() in (arch.S390, arch.S390X):
self._remove_lun_from_s390(connection_info)
class LibvirtScalityVolumeDriver(LibvirtBaseVolumeDriver):
"""Scality SOFS Nova driver. Provide hypervisors with access
to sparse files on SOFS.
"""
def __init__(self, connection):
"""Create back-end to SOFS and check connection."""
super(LibvirtScalityVolumeDriver,
self).__init__(connection, is_block_dev=False)
def _get_device_path(self, connection_info):
path = os.path.join(CONF.libvirt.scality_sofs_mount_point,
connection_info['data']['sofs_path'])
return path
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtScalityVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = 'file'
conf.source_path = connection_info['data']['device_path']
# The default driver cache policy is 'none', and this causes
# qemu/kvm to open the volume file with O_DIRECT, which is
# rejected by FUSE (on kernels older than 3.3). Scality SOFS
# is FUSE based, so we must provide a more sensible default.
conf.driver_cache = 'writethrough'
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
self._check_prerequisites()
self._mount_sofs()
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
def _check_prerequisites(self):
"""Sanity checks before attempting to mount SOFS."""
# config is mandatory
config = CONF.libvirt.scality_sofs_config
if not config:
msg = _("Value required for 'scality_sofs_config'")
LOG.warn(msg)
raise exception.NovaException(msg)
# config can be a file path or a URL, check it
if urlparse.urlparse(config).scheme == '':
# turn local path into URL
config = 'file://%s' % config
try:
urllib.request.urlopen(config, timeout=5).close()
except urllib.error.URLError as e:
msg = _("Cannot access 'scality_sofs_config': %s") % e
LOG.warn(msg)
raise exception.NovaException(msg)
# mount.sofs must be installed
if not os.access('/sbin/mount.sofs', os.X_OK):
msg = _("Cannot execute /sbin/mount.sofs")
LOG.warn(msg)
raise exception.NovaException(msg)
def _mount_sofs(self):
config = CONF.libvirt.scality_sofs_config
mount_path = CONF.libvirt.scality_sofs_mount_point
sysdir = os.path.join(mount_path, 'sys')
if not os.path.isdir(mount_path):
utils.execute('mkdir', '-p', mount_path)
if not os.path.isdir(sysdir):
utils.execute('mount', '-t', 'sofs', config, mount_path,
run_as_root=True)
if not os.path.isdir(sysdir):
msg = _("Cannot mount Scality SOFS, check syslog for errors")
LOG.warn(msg)
raise exception.NovaException(msg)
class LibvirtGPFSVolumeDriver(LibvirtBaseVolumeDriver):
"""Class for volumes backed by gpfs volume."""
def __init__(self, connection):
super(LibvirtGPFSVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtGPFSVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "file"
conf.source_path = connection_info['data']['device_path']
return conf
class LibvirtQuobyteVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for Quobyte."""
def __init__(self, connection):
"""Create back-end to Quobyte."""
super(LibvirtQuobyteVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
conf = super(LibvirtQuobyteVolumeDriver,
self).get_config(connection_info, disk_info)
data = connection_info['data']
conf.source_protocol = quobyte.SOURCE_PROTOCOL
conf.source_type = quobyte.SOURCE_TYPE
conf.driver_cache = quobyte.DRIVER_CACHE
conf.driver_io = quobyte.DRIVER_IO
conf.driver_format = data.get('format', 'raw')
quobyte_volume = self._normalize_url(data['export'])
path = os.path.join(self._get_mount_point_for_share(quobyte_volume),
data['name'])
conf.source_path = path
return conf
@utils.synchronized('connect_volume')
def connect_volume(self, connection_info, disk_info):
"""Connect the volume."""
data = connection_info['data']
quobyte_volume = self._normalize_url(data['export'])
mount_path = self._get_mount_point_for_share(quobyte_volume)
mounted = libvirt_utils.is_mounted(mount_path,
quobyte.SOURCE_PROTOCOL
+ '@' + quobyte_volume)
if mounted:
try:
os.stat(mount_path)
except OSError as exc:
if exc.errno == errno.ENOTCONN:
mounted = False
LOG.info(_LI('Fixing previous mount %s which was not'
' unmounted correctly.'), mount_path)
quobyte.umount_volume(mount_path)
if not mounted:
quobyte.mount_volume(quobyte_volume,
mount_path,
CONF.libvirt.quobyte_client_cfg)
quobyte.validate_volume(mount_path)
@utils.synchronized('connect_volume')
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
quobyte_volume = self._normalize_url(connection_info['data']['export'])
mount_path = self._get_mount_point_for_share(quobyte_volume)
if libvirt_utils.is_mounted(mount_path, 'quobyte@' + quobyte_volume):
quobyte.umount_volume(mount_path)
else:
LOG.info(_LI("Trying to disconnected unmounted volume at %s"),
mount_path)
def _normalize_url(self, export):
protocol = quobyte.SOURCE_PROTOCOL + "://"
if export.startswith(protocol):
export = export[len(protocol):]
return export
def _get_mount_point_for_share(self, quobyte_volume):
"""Return mount point for Quobyte volume.
:param quobyte_volume: Example: storage-host/openstack-volumes
"""
return os.path.join(CONF.libvirt.quobyte_mount_point_base,
utils.get_hash_str(quobyte_volume))
|
apache-2.0
| 2,811,650,872,835,121,700
| 42.219711
| 79
| 0.554402
| false
| 4.223088
| true
| false
| false
|
FOSSRIT/Nova
|
controllers/extras.py
|
1
|
79892
|
# Copyright (C) 2008 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# Distributed under the terms of the GNU Lesser General Public License
# http://www.gnu.org/copyleft/lesser.html
from networkx.classes.multigraph import MultiGraph
from networkx.classes.multidigraph import MultiDiGraph
from networkx.exception import NetworkXException, NetworkXError
import networkx.convert as convert
class UbiGraph(MultiGraph):
"""
Base classes for interaction between NetworkX and Ubigraph.
These classes allow drawing with Ubigraph and all of the NetworkX functions.
Examples
--------
(start Ubigraph server)
->>> import networkx
->>> G=nx.UbiGraph()
->>> G.add_edge('a','b',color='#0000ff') # blue edge between 'a' and 'b'
->>> G=nx.UbiGraph(networkx.cycle_graph(5)) # cycle of length 5
See the examples
https://networkx.lanl.gov/browser/networkx/trunk/doc/examples/ubigraph
UbiGraph
--------
NetworkX compatible graph class. Allows self loops and multiple edges.
Extends to NetworkX MultiGraph class.
UbiDiGraph
--------
NetworkX compatible digraph class. Allows self loops and multiple edges.
Extends NetworkX MultiDiGraph class.
Ubigraph attributes
--------------------
In addition to all of the XGraph and XDiGraph methods and NetworkX functions
this class also provides methods to set node and edge attributes and styles.
Node and edge attributes:
->>> G=nx.UbiGraph()
->>> G.add_node('a',shape='torus')
->>> G.add_edge('a','b',style='dashed')
->>> G.set_node_attr('a',color='#0000ff') # node a blue
->>> G.set_node_attr(color='#00ffff') # all nodes green
Node and edge styles:
->>> G=nx.UbiGraph(nx.cycle_graph(5)) # cycle of length 5
->>> redtorus=G.new_node_style(color="#ff0000',shape='torus')
->>> G.set_node_attr(style=redtorus) # all nodes to redtorus style
"""
def __init__(self, data=None, name='',
selfloops=False,
multiedges=False,
ubigraph_server= 'http://127.0.0.1:20738/RPC2',
clear=True,
nextid=0):
import xmlrpclib
try:
server_url = ubigraph_server
self.server = xmlrpclib.Server(server_url)
self.ubigraph = self.server.ubigraph
if clear:
self.ubigraph.clear()
except:
raise IOError("No Ubigraph server found")
# default node and edge styles
self.ubigraph.set_vertex_style_attribute(0, "color", "#ff0000")
self.ubigraph.set_vertex_style_attribute(0, "shape", "sphere")
self.ubigraph.set_vertex_style_attribute(0, "size", "0.7")
self.ubigraph.set_edge_style_attribute(0, "color", "#ffffff")
self.ubigraph.set_edge_style_attribute(0, "width", "2.0")
self.use_splines=False
self.use_node_labels=False
self.use_edge_labels=False
# keep a mapping from nodes to ubigraph ids
self.nodeid={}
self.nextid=nextid
self.idnode={}
self.adj={} # adjacency list
self.selfloops=selfloops
self.multiedges=multiedges
if data is not None:
self=convert.from_whatever(data,create_using=self)
self.name=name
def add_node(self, n,**kwds):
if n not in self:
MultiGraph.add_node(self,n)
self.nodeid[n]=self.nextid
self.idnode[self.nextid]=n
self.nextid+=1
self.ubigraph.new_vertex_w_id(self.nodeid[n])
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
# support toggling node labels
if self.use_node_labels:
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(n))
def add_nodes_from(self, nlist,**kwds):
for n in nlist:
self.add_node(n,**kwds)
def delete_node(self,n):
if n in self:
MultiGraph.delete_node(self,n)
self.ubigraph.remove_vertex(self.nodeid[n])
id=self.nodeid[n]
del self.nodeid[n]
del self.idnode[id]
def delete_nodes_from(self,nlist):
for n in nlist:
self.delete_node(n)
def add_edge(self, u, v=None, x=None, **kwds):
if v is None: # add_edge was called as add_edge(e), with e=(u,v,x)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v)
u,v=u # x=None
# if edge exists, quietly return if multiple edges are not allowed
if not self.multiedges and self.has_edge(u,v):
return
# add nodes
self.add_node(u)
self.add_node(v)
# self loop? quietly return if not allowed
if not self.selfloops and u==v:
return
# create ubigraph edge
# build dictionary with edge id and user data to use as edge data
e=self.ubigraph.new_edge(self.nodeid[u],self.nodeid[v])
edata={'id':e,'data':x}
if self.multiedges: # add x to the end of the list of objects
# that defines the edges between u and v
self.adj[u][v]=self.adj[u].get(v,[])+ [edata]
if u!=v:
self.adj[v][u]=self.adj[v].get(u,[])+ [edata]
else: # x is the new object assigned to single edge between u and v
self.adj[u][v]=edata
if u!=v:
self.adj[v][u]=edata # a copy would be required to avoid
# modifying both at the same time
# when doing a delete_edge
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(e,k,v)
# support toggling edge labels
if self.use_edge_labels:
self.ubigraph.set_edge_attribute(e,'label',str(x))
def add_edges_from(self, ebunch,**kwds):
for e in ebunch:
self.add_edge(e,**kwds)
def delete_edge(self, u, v=None, x=None):
if v is None: # was called as delete_edge(e)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v), x unspecified, set to None
u,v=u # x=None
try:
xdata=x['data']
except:
xdata=x
if self.multiedges:
if (self.adj.has_key(u) and self.adj[u].has_key(v)):
x=None
for edata in self.adj[u][v]:
if xdata == edata['data']:
x=edata # (u,v,edata) is an edge
eid=edata['id']
if x is None:
return # no edge
# remove the edge item from list
self.adj[u][v].remove(x)
# and if not self loop remove v->u entry
if u!=v:
self.adj[v][u].remove(x)
# if last edge between u and v was deleted, remove all trace
if len(self.adj[u][v])==0:
del self.adj[u][v]
# and if not self loop remove v->u entry
if u!=v:
del self.adj[v][u]
self.ubigraph.remove_edge(eid)
else: # delete single edge
if self.has_neighbor(u,v):
eid=self.get_edge(u,v)['id']
self.ubigraph.remove_edge(eid)
del self.adj[u][v]
if u!=v:
del self.adj[v][u]
def delete_edges_from(self, ebunch):
for e in ebunch:
self.delete_edge(e)
def clear(self):
if len(self)>0:
MultiGraph.clear(self)
self.ubigraph.clear()
self.nodeid={}
self.nextid=0
# node and edge attrs
def set_node_attr(self,nbunch=None,style=None,**kwds):
bunch=self.nbunch_iter(nbunch)
for n in bunch:
if style is None:
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
else:
self.ubigraph.change_vertex_style(self.nodeid[n],style)
def set_edge_attr(self,ebunch=None,style=None,**kwds):
if ebunch is None:
bunch=self.edges(data=True)
else:
try:
self.has_edge(ebunch)
bunch=[ebunch]
except:
bunch=list(ebunch)
for (u,v,d) in bunch:
if style is None:
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(d['id'],k,v)
else:
ret=self.ubigraph.change_edge_style(d['id'],style)
# node and edge styles
def new_node_style(self,style=0,**kwds):
style=self.ubigraph.new_vertex_style(style)
for (k,v) in kwds.items():
self.ubigraph.set_vertex_style_attribute(style,k,v)
return style
def new_edge_style(self,style=0,**kwds):
style=self.ubigraph.new_edge_style(style)
for (k,v) in kwds.items():
self.ubigraph.set_edge_style_attribute(style,k,v)
return style
# ubigraph helper methods
# an interface to the internal ubigraph methods that do this
# would make this simpler
def splines(self):
"""Toggle spline edges.
"""
if self.use_splines==True:
self.set_edge_attr(spline='false')
self.use_splines=False
else:
self.set_edge_attr(spline='true')
self.use_splines=True
def node_labels(self,nbunch=None,labels=None):
"""Toggle node labels.
"""
bunch=list(self.nbunch_iter(nbunch))
if self.use_node_labels==True:
labels=dict(zip(bunch,['']*len(bunch)))
self.use_node_labels=False
else:
if labels is None:
labels=dict(zip(bunch,bunch))
self.use_node_labels=True
for n,label in labels.items():
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(label))
def edge_labels(self,ebunch=None,labels=None):
"""Toggle edge labels.
"""
if ebunch is None:
bunch=self.edges(data=True)
else:
try:
self.has_edge(ebunch)
bunch=[ebunch]
except:
bunch=list(ebunch)
if self.use_edge_labels==True:
labels=dict([(d['id'],'') for u,v,d in bunch])
self.use_edge_labels=False
else:
if labels is None:
labels=dict([(d['id'],str(d['data'])) for u,v,d in bunch if d['data'] is not None])
self.use_edge_labels=True
for eid,label in labels.items():
self.ubigraph.set_edge_attribute(eid,'label',label)
class UbiDiGraph(UbiGraph,MultiDiGraph):
def __init__(self, data=None, name='',
selfloops=False,
multiedges=False,
ubigraph_server= 'http://127.0.0.1:20738/RPC2',
clear=True):
self.pred={} # predecessor
self.succ={}
UbiGraph.__init__(self,
data=data,name=name,
selfloops=selfloops,
multiedges=multiedges,
ubigraph_server=ubigraph_server,
clear=clear)
self.ubigraph.set_edge_style_attribute(0, "arrow", "true")
self.adj=self.succ # successor is same as adj for digraph
def add_node(self, n,**kwds):
if n not in self:
MultiDiGraph.add_node(self,n)
self.nodeid[n]=self.nextid
self.nextid+=1
self.ubigraph.new_vertex_w_id(self.nodeid[n])
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
# support toggling node labels
if self.use_node_labels:
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(n))
def delete_node(self,n):
if n in self:
MultiDiGraph.delete_node(self,n)
self.ubigraph.remove_vertex(self.nodeid[n])
def add_edge(self, u, v=None, x=None, **kwds):
if v is None: # add_edge was called as add_edge(e), with e a tuple
if len(u)==3: #case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v)
u,v=u # x=None
# if edge exists, quietly return if multiple edges are not allowed
if not self.multiedges and self.has_edge(u,v,x):
return
# add nodes
self.add_node(u)
self.add_node(v)
# self loop? quietly return if not allowed
if not self.selfloops and u==v:
return
# create ubigraph edge
# build dictionary with edge id and user data to use as edge data
e=self.ubigraph.new_edge(self.nodeid[u],self.nodeid[v])
edata={'id':e,'data':x}
if self.multiedges: # append x to the end of the list of objects
# that defines the edges between u and v
self.succ[u][v]=self.succ[u].get(v,[])+ [edata]
self.pred[v][u]=self.pred[v].get(u,[])+ [edata]
else: # x is the new object assigned to single edge between u and v
self.succ[u][v]=edata
self.pred[v][u]=edata # note that the same object is referred to
# from both succ and pred
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(e,k,v)
# support toggling edge labels
if self.use_edge_labels:
self.ubigraph.set_edge_attribute(e,'label',str(x))
def delete_edge(self, u, v=None, x=None):
if v is None: # was called as delete_edge(e)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v), x unspecified, set to None
u,v=u # x=None
try:
xdata=x['data']
except:
xdata=x
if self.multiedges: # multiedges are stored as a list
if (self.succ.has_key(u) and self.succ[u].has_key(v)):
x=None
for edata in self.succ[u][v]:
if xdata == edata['data']:
x=edata # (u,v,edata) is an edge
eid=edata['id']
if x is None:
return # no edge
self.succ[u][v].remove(x) # remove the edge item from list
self.pred[v][u].remove(x)
if len(self.succ[u][v])==0: # if last edge between u and v
del self.succ[u][v] # was deleted, remove all trace
del self.pred[v][u]
self.ubigraph.remove_edge(eid)
else: # delete single edge
if self.has_successor(u,v):
eid=self.get_edge(u,v)['id']
self.ubigraph.remove_edge(eid)
del self.succ[u][v]
del self.pred[v][u]
return
def clear(self):
if len(self)>0:
MultiDiGraph.clear(self)
self.ubigraph.clear()
self.nodeid={}
self.nextid=0
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2008 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# Distributed under the terms of the GNU Lesser General Public License
# http://www.gnu.org/copyleft/lesser.html
from networkx.classes.multigraph import MultiGraph
from networkx.classes.multidigraph import MultiDiGraph
from networkx.exception import NetworkXException, NetworkXError
import networkx.convert as convert
class UbiGraph(MultiGraph):
"""
Base classes for interaction between NetworkX and Ubigraph.
These classes allow drawing with Ubigraph and all of the NetworkX functions.
Examples
--------
(start Ubigraph server)
->>> import networkx
->>> G=nx.UbiGraph()
->>> G.add_edge('a','b',color='#0000ff') # blue edge between 'a' and 'b'
->>> G=nx.UbiGraph(networkx.cycle_graph(5)) # cycle of length 5
See the examples
https://networkx.lanl.gov/browser/networkx/trunk/doc/examples/ubigraph
UbiGraph
--------
NetworkX compatible graph class. Allows self loops and multiple edges.
Extends to NetworkX MultiGraph class.
UbiDiGraph
--------
NetworkX compatible digraph class. Allows self loops and multiple edges.
Extends NetworkX MultiDiGraph class.
Ubigraph attributes
--------------------
In addition to all of the XGraph and XDiGraph methods and NetworkX functions
this class also provides methods to set node and edge attributes and styles.
Node and edge attributes:
->>> G=nx.UbiGraph()
->>> G.add_node('a',shape='torus')
->>> G.add_edge('a','b',style='dashed')
->>> G.set_node_attr('a',color='#0000ff') # node a blue
->>> G.set_node_attr(color='#00ffff') # all nodes green
Node and edge styles:
->>> G=nx.UbiGraph(nx.cycle_graph(5)) # cycle of length 5
->>> redtorus=G.new_node_style(color="#ff0000',shape='torus')
->>> G.set_node_attr(style=redtorus) # all nodes to redtorus style
"""
def __init__(self, data=None, name='',
selfloops=False,
multiedges=False,
ubigraph_server= 'http://127.0.0.1:20738/RPC2',
clear=True,
nextid=0):
import xmlrpclib
try:
server_url = ubigraph_server
self.server = xmlrpclib.Server(server_url)
self.ubigraph = self.server.ubigraph
if clear:
self.ubigraph.clear()
except:
raise IOError("No Ubigraph server found")
# default node and edge styles
self.ubigraph.set_vertex_style_attribute(0, "color", "#ff0000")
self.ubigraph.set_vertex_style_attribute(0, "shape", "sphere")
self.ubigraph.set_vertex_style_attribute(0, "size", "0.7")
self.ubigraph.set_edge_style_attribute(0, "color", "#ffffff")
self.ubigraph.set_edge_style_attribute(0, "width", "2.0")
self.use_splines=False
self.use_node_labels=False
self.use_edge_labels=False
# keep a mapping from nodes to ubigraph ids
self.nodeid={}
self.nextid=nextid
self.idnode={}
self.adj={} # adjacency list
self.selfloops=selfloops
self.multiedges=multiedges
if data is not None:
self=convert.from_whatever(data,create_using=self)
self.name=name
def add_node(self, n,**kwds):
if n not in self:
MultiGraph.add_node(self,n)
self.nodeid[n]=self.nextid
self.idnode[self.nextid]=n
self.nextid+=1
self.ubigraph.new_vertex_w_id(self.nodeid[n])
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
# support toggling node labels
if self.use_node_labels:
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(n))
def add_nodes_from(self, nlist,**kwds):
for n in nlist:
self.add_node(n,**kwds)
def delete_node(self,n):
if n in self:
MultiGraph.delete_node(self,n)
self.ubigraph.remove_vertex(self.nodeid[n])
id=self.nodeid[n]
del self.nodeid[n]
del self.idnode[id]
def delete_nodes_from(self,nlist):
for n in nlist:
self.delete_node(n)
def add_edge(self, u, v=None, x=None, **kwds):
if v is None: # add_edge was called as add_edge(e), with e=(u,v,x)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v)
u,v=u # x=None
# if edge exists, quietly return if multiple edges are not allowed
if not self.multiedges and self.has_edge(u,v):
return
# add nodes
self.add_node(u)
self.add_node(v)
# self loop? quietly return if not allowed
if not self.selfloops and u==v:
return
# create ubigraph edge
# build dictionary with edge id and user data to use as edge data
e=self.ubigraph.new_edge(self.nodeid[u],self.nodeid[v])
edata={'id':e,'data':x}
if self.multiedges: # add x to the end of the list of objects
# that defines the edges between u and v
self.adj[u][v]=self.adj[u].get(v,[])+ [edata]
if u!=v:
self.adj[v][u]=self.adj[v].get(u,[])+ [edata]
else: # x is the new object assigned to single edge between u and v
self.adj[u][v]=edata
if u!=v:
self.adj[v][u]=edata # a copy would be required to avoid
# modifying both at the same time
# when doing a delete_edge
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(e,k,v)
# support toggling edge labels
if self.use_edge_labels:
self.ubigraph.set_edge_attribute(e,'label',str(x))
def add_edges_from(self, ebunch,**kwds):
for e in ebunch:
self.add_edge(e,**kwds)
def delete_edge(self, u, v=None, x=None):
if v is None: # was called as delete_edge(e)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v), x unspecified, set to None
u,v=u # x=None
try:
xdata=x['data']
except:
xdata=x
if self.multiedges:
if (self.adj.has_key(u) and self.adj[u].has_key(v)):
x=None
for edata in self.adj[u][v]:
if xdata == edata['data']:
x=edata # (u,v,edata) is an edge
eid=edata['id']
if x is None:
return # no edge
# remove the edge item from list
self.adj[u][v].remove(x)
# and if not self loop remove v->u entry
if u!=v:
self.adj[v][u].remove(x)
# if last edge between u and v was deleted, remove all trace
if len(self.adj[u][v])==0:
del self.adj[u][v]
# and if not self loop remove v->u entry
if u!=v:
del self.adj[v][u]
self.ubigraph.remove_edge(eid)
else: # delete single edge
if self.has_neighbor(u,v):
eid=self.get_edge(u,v)['id']
self.ubigraph.remove_edge(eid)
del self.adj[u][v]
if u!=v:
del self.adj[v][u]
def delete_edges_from(self, ebunch):
for e in ebunch:
self.delete_edge(e)
def clear(self):
if len(self)>0:
MultiGraph.clear(self)
self.ubigraph.clear()
self.nodeid={}
self.nextid=0
# node and edge attrs
def set_node_attr(self,nbunch=None,style=None,**kwds):
bunch=self.nbunch_iter(nbunch)
for n in bunch:
if style is None:
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
else:
self.ubigraph.change_vertex_style(self.nodeid[n],style)
def set_edge_attr(self,ebunch=None,style=None,**kwds):
if ebunch is None:
bunch=self.edges(data=True)
else:
try:
self.has_edge(ebunch)
bunch=[ebunch]
except:
bunch=list(ebunch)
for (u,v,d) in bunch:
if style is None:
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(d['id'],k,v)
else:
ret=self.ubigraph.change_edge_style(d['id'],style)
# node and edge styles
def new_node_style(self,style=0,**kwds):
style=self.ubigraph.new_vertex_style(style)
for (k,v) in kwds.items():
self.ubigraph.set_vertex_style_attribute(style,k,v)
return style
def new_edge_style(self,style=0,**kwds):
style=self.ubigraph.new_edge_style(style)
for (k,v) in kwds.items():
self.ubigraph.set_edge_style_attribute(style,k,v)
return style
# ubigraph helper methods
# an interface to the internal ubigraph methods that do this
# would make this simpler
def splines(self):
"""Toggle spline edges.
"""
if self.use_splines==True:
self.set_edge_attr(spline='false')
self.use_splines=False
else:
self.set_edge_attr(spline='true')
self.use_splines=True
def node_labels(self,nbunch=None,labels=None):
"""Toggle node labels.
"""
bunch=list(self.nbunch_iter(nbunch))
if self.use_node_labels==True:
labels=dict(zip(bunch,['']*len(bunch)))
self.use_node_labels=False
else:
if labels is None:
labels=dict(zip(bunch,bunch))
self.use_node_labels=True
for n,label in labels.items():
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(label))
def edge_labels(self,ebunch=None,labels=None):
"""Toggle edge labels.
"""
if ebunch is None:
bunch=self.edges(data=True)
else:
try:
self.has_edge(ebunch)
bunch=[ebunch]
except:
bunch=list(ebunch)
if self.use_edge_labels==True:
labels=dict([(d['id'],'') for u,v,d in bunch])
self.use_edge_labels=False
else:
if labels is None:
labels=dict([(d['id'],str(d['data'])) for u,v,d in bunch if d['data'] is not None])
self.use_edge_labels=True
for eid,label in labels.items():
self.ubigraph.set_edge_attribute(eid,'label',label)
class UbiDiGraph(UbiGraph,MultiDiGraph):
def __init__(self, data=None, name='',
selfloops=False,
multiedges=False,
ubigraph_server= 'http://127.0.0.1:20738/RPC2',
clear=True):
self.pred={} # predecessor
self.succ={}
UbiGraph.__init__(self,
data=data,name=name,
selfloops=selfloops,
multiedges=multiedges,
ubigraph_server=ubigraph_server,
clear=clear)
self.ubigraph.set_edge_style_attribute(0, "arrow", "true")
self.adj=self.succ # successor is same as adj for digraph
def add_node(self, n,**kwds):
if n not in self:
MultiDiGraph.add_node(self,n)
self.nodeid[n]=self.nextid
self.nextid+=1
self.ubigraph.new_vertex_w_id(self.nodeid[n])
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
# support toggling node labels
if self.use_node_labels:
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(n))
def delete_node(self,n):
if n in self:
MultiDiGraph.delete_node(self,n)
self.ubigraph.remove_vertex(self.nodeid[n])
def add_edge(self, u, v=None, x=None, **kwds):
if v is None: # add_edge was called as add_edge(e), with e a tuple
if len(u)==3: #case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v)
u,v=u # x=None
# if edge exists, quietly return if multiple edges are not allowed
if not self.multiedges and self.has_edge(u,v,x):
return
# add nodes
self.add_node(u)
self.add_node(v)
# self loop? quietly return if not allowed
if not self.selfloops and u==v:
return
# create ubigraph edge
# build dictionary with edge id and user data to use as edge data
e=self.ubigraph.new_edge(self.nodeid[u],self.nodeid[v])
edata={'id':e,'data':x}
if self.multiedges: # append x to the end of the list of objects
# that defines the edges between u and v
self.succ[u][v]=self.succ[u].get(v,[])+ [edata]
self.pred[v][u]=self.pred[v].get(u,[])+ [edata]
else: # x is the new object assigned to single edge between u and v
self.succ[u][v]=edata
self.pred[v][u]=edata # note that the same object is referred to
# from both succ and pred
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(e,k,v)
# support toggling edge labels
if self.use_edge_labels:
self.ubigraph.set_edge_attribute(e,'label',str(x))
def delete_edge(self, u, v=None, x=None):
if v is None: # was called as delete_edge(e)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v), x unspecified, set to None
u,v=u # x=None
try:
xdata=x['data']
except:
xdata=x
if self.multiedges: # multiedges are stored as a list
if (self.succ.has_key(u) and self.succ[u].has_key(v)):
x=None
for edata in self.succ[u][v]:
if xdata == edata['data']:
x=edata # (u,v,edata) is an edge
eid=edata['id']
if x is None:
return # no edge
self.succ[u][v].remove(x) # remove the edge item from list
self.pred[v][u].remove(x)
if len(self.succ[u][v])==0: # if last edge between u and v
del self.succ[u][v] # was deleted, remove all trace
del self.pred[v][u]
self.ubigraph.remove_edge(eid)
else: # delete single edge
if self.has_successor(u,v):
eid=self.get_edge(u,v)['id']
self.ubigraph.remove_edge(eid)
del self.succ[u][v]
del self.pred[v][u]
return
def clear(self):
if len(self)>0:
MultiDiGraph.clear(self)
self.ubigraph.clear()
self.nodeid={}
self.nextid=0
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2008 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# Distributed under the terms of the GNU Lesser General Public License
# http://www.gnu.org/copyleft/lesser.html
from networkx.classes.multigraph import MultiGraph
from networkx.classes.multidigraph import MultiDiGraph
from networkx.exception import NetworkXException, NetworkXError
import networkx.convert as convert
class UbiGraph(MultiGraph):
"""
Base classes for interaction between NetworkX and Ubigraph.
These classes allow drawing with Ubigraph and all of the NetworkX functions.
Examples
--------
(start Ubigraph server)
->>> import networkx
->>> G=nx.UbiGraph()
->>> G.add_edge('a','b',color='#0000ff') # blue edge between 'a' and 'b'
->>> G=nx.UbiGraph(networkx.cycle_graph(5)) # cycle of length 5
See the examples
https://networkx.lanl.gov/browser/networkx/trunk/doc/examples/ubigraph
UbiGraph
--------
NetworkX compatible graph class. Allows self loops and multiple edges.
Extends to NetworkX MultiGraph class.
UbiDiGraph
--------
NetworkX compatible digraph class. Allows self loops and multiple edges.
Extends NetworkX MultiDiGraph class.
Ubigraph attributes
--------------------
In addition to all of the XGraph and XDiGraph methods and NetworkX functions
this class also provides methods to set node and edge attributes and styles.
Node and edge attributes:
->>> G=nx.UbiGraph()
->>> G.add_node('a',shape='torus')
->>> G.add_edge('a','b',style='dashed')
->>> G.set_node_attr('a',color='#0000ff') # node a blue
->>> G.set_node_attr(color='#00ffff') # all nodes green
Node and edge styles:
->>> G=nx.UbiGraph(nx.cycle_graph(5)) # cycle of length 5
->>> redtorus=G.new_node_style(color="#ff0000',shape='torus')
->>> G.set_node_attr(style=redtorus) # all nodes to redtorus style
"""
def __init__(self, data=None, name='',
selfloops=False,
multiedges=False,
ubigraph_server= 'http://127.0.0.1:20738/RPC2',
clear=True,
nextid=0):
import xmlrpclib
try:
server_url = ubigraph_server
self.server = xmlrpclib.Server(server_url)
self.ubigraph = self.server.ubigraph
if clear:
self.ubigraph.clear()
except:
raise IOError("No Ubigraph server found")
# default node and edge styles
self.ubigraph.set_vertex_style_attribute(0, "color", "#ff0000")
self.ubigraph.set_vertex_style_attribute(0, "shape", "sphere")
self.ubigraph.set_vertex_style_attribute(0, "size", "0.7")
self.ubigraph.set_edge_style_attribute(0, "color", "#ffffff")
self.ubigraph.set_edge_style_attribute(0, "width", "2.0")
self.use_splines=False
self.use_node_labels=False
self.use_edge_labels=False
# keep a mapping from nodes to ubigraph ids
self.nodeid={}
self.nextid=nextid
self.idnode={}
self.adj={} # adjacency list
self.selfloops=selfloops
self.multiedges=multiedges
if data is not None:
self=convert.from_whatever(data,create_using=self)
self.name=name
def add_node(self, n,**kwds):
if n not in self:
MultiGraph.add_node(self,n)
self.nodeid[n]=self.nextid
self.idnode[self.nextid]=n
self.nextid+=1
self.ubigraph.new_vertex_w_id(self.nodeid[n])
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
# support toggling node labels
if self.use_node_labels:
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(n))
def add_nodes_from(self, nlist,**kwds):
for n in nlist:
self.add_node(n,**kwds)
def delete_node(self,n):
if n in self:
MultiGraph.delete_node(self,n)
self.ubigraph.remove_vertex(self.nodeid[n])
id=self.nodeid[n]
del self.nodeid[n]
del self.idnode[id]
def delete_nodes_from(self,nlist):
for n in nlist:
self.delete_node(n)
def add_edge(self, u, v=None, x=None, **kwds):
if v is None: # add_edge was called as add_edge(e), with e=(u,v,x)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v)
u,v=u # x=None
# if edge exists, quietly return if multiple edges are not allowed
if not self.multiedges and self.has_edge(u,v):
return
# add nodes
self.add_node(u)
self.add_node(v)
# self loop? quietly return if not allowed
if not self.selfloops and u==v:
return
# create ubigraph edge
# build dictionary with edge id and user data to use as edge data
e=self.ubigraph.new_edge(self.nodeid[u],self.nodeid[v])
edata={'id':e,'data':x}
if self.multiedges: # add x to the end of the list of objects
# that defines the edges between u and v
self.adj[u][v]=self.adj[u].get(v,[])+ [edata]
if u!=v:
self.adj[v][u]=self.adj[v].get(u,[])+ [edata]
else: # x is the new object assigned to single edge between u and v
self.adj[u][v]=edata
if u!=v:
self.adj[v][u]=edata # a copy would be required to avoid
# modifying both at the same time
# when doing a delete_edge
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(e,k,v)
# support toggling edge labels
if self.use_edge_labels:
self.ubigraph.set_edge_attribute(e,'label',str(x))
def add_edges_from(self, ebunch,**kwds):
for e in ebunch:
self.add_edge(e,**kwds)
def delete_edge(self, u, v=None, x=None):
if v is None: # was called as delete_edge(e)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v), x unspecified, set to None
u,v=u # x=None
try:
xdata=x['data']
except:
xdata=x
if self.multiedges:
if (self.adj.has_key(u) and self.adj[u].has_key(v)):
x=None
for edata in self.adj[u][v]:
if xdata == edata['data']:
x=edata # (u,v,edata) is an edge
eid=edata['id']
if x is None:
return # no edge
# remove the edge item from list
self.adj[u][v].remove(x)
# and if not self loop remove v->u entry
if u!=v:
self.adj[v][u].remove(x)
# if last edge between u and v was deleted, remove all trace
if len(self.adj[u][v])==0:
del self.adj[u][v]
# and if not self loop remove v->u entry
if u!=v:
del self.adj[v][u]
self.ubigraph.remove_edge(eid)
else: # delete single edge
if self.has_neighbor(u,v):
eid=self.get_edge(u,v)['id']
self.ubigraph.remove_edge(eid)
del self.adj[u][v]
if u!=v:
del self.adj[v][u]
def delete_edges_from(self, ebunch):
for e in ebunch:
self.delete_edge(e)
def clear(self):
if len(self)>0:
MultiGraph.clear(self)
self.ubigraph.clear()
self.nodeid={}
self.nextid=0
# node and edge attrs
def set_node_attr(self,nbunch=None,style=None,**kwds):
bunch=self.nbunch_iter(nbunch)
for n in bunch:
if style is None:
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
else:
self.ubigraph.change_vertex_style(self.nodeid[n],style)
def set_edge_attr(self,ebunch=None,style=None,**kwds):
if ebunch is None:
bunch=self.edges(data=True)
else:
try:
self.has_edge(ebunch)
bunch=[ebunch]
except:
bunch=list(ebunch)
for (u,v,d) in bunch:
if style is None:
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(d['id'],k,v)
else:
ret=self.ubigraph.change_edge_style(d['id'],style)
# node and edge styles
def new_node_style(self,style=0,**kwds):
style=self.ubigraph.new_vertex_style(style)
for (k,v) in kwds.items():
self.ubigraph.set_vertex_style_attribute(style,k,v)
return style
def new_edge_style(self,style=0,**kwds):
style=self.ubigraph.new_edge_style(style)
for (k,v) in kwds.items():
self.ubigraph.set_edge_style_attribute(style,k,v)
return style
# ubigraph helper methods
# an interface to the internal ubigraph methods that do this
# would make this simpler
def splines(self):
"""Toggle spline edges.
"""
if self.use_splines==True:
self.set_edge_attr(spline='false')
self.use_splines=False
else:
self.set_edge_attr(spline='true')
self.use_splines=True
def node_labels(self,nbunch=None,labels=None):
"""Toggle node labels.
"""
bunch=list(self.nbunch_iter(nbunch))
if self.use_node_labels==True:
labels=dict(zip(bunch,['']*len(bunch)))
self.use_node_labels=False
else:
if labels is None:
labels=dict(zip(bunch,bunch))
self.use_node_labels=True
for n,label in labels.items():
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(label))
def edge_labels(self,ebunch=None,labels=None):
"""Toggle edge labels.
"""
if ebunch is None:
bunch=self.edges(data=True)
else:
try:
self.has_edge(ebunch)
bunch=[ebunch]
except:
bunch=list(ebunch)
if self.use_edge_labels==True:
labels=dict([(d['id'],'') for u,v,d in bunch])
self.use_edge_labels=False
else:
if labels is None:
labels=dict([(d['id'],str(d['data'])) for u,v,d in bunch if d['data'] is not None])
self.use_edge_labels=True
for eid,label in labels.items():
self.ubigraph.set_edge_attribute(eid,'label',label)
class UbiDiGraph(UbiGraph,MultiDiGraph):
def __init__(self, data=None, name='',
selfloops=False,
multiedges=False,
ubigraph_server= 'http://127.0.0.1:20738/RPC2',
clear=True):
self.pred={} # predecessor
self.succ={}
UbiGraph.__init__(self,
data=data,name=name,
selfloops=selfloops,
multiedges=multiedges,
ubigraph_server=ubigraph_server,
clear=clear)
self.ubigraph.set_edge_style_attribute(0, "arrow", "true")
self.adj=self.succ # successor is same as adj for digraph
def add_node(self, n,**kwds):
if n not in self:
MultiDiGraph.add_node(self,n)
self.nodeid[n]=self.nextid
self.nextid+=1
self.ubigraph.new_vertex_w_id(self.nodeid[n])
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
# support toggling node labels
if self.use_node_labels:
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(n))
def delete_node(self,n):
if n in self:
MultiDiGraph.delete_node(self,n)
self.ubigraph.remove_vertex(self.nodeid[n])
def add_edge(self, u, v=None, x=None, **kwds):
if v is None: # add_edge was called as add_edge(e), with e a tuple
if len(u)==3: #case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v)
u,v=u # x=None
# if edge exists, quietly return if multiple edges are not allowed
if not self.multiedges and self.has_edge(u,v,x):
return
# add nodes
self.add_node(u)
self.add_node(v)
# self loop? quietly return if not allowed
if not self.selfloops and u==v:
return
# create ubigraph edge
# build dictionary with edge id and user data to use as edge data
e=self.ubigraph.new_edge(self.nodeid[u],self.nodeid[v])
edata={'id':e,'data':x}
if self.multiedges: # append x to the end of the list of objects
# that defines the edges between u and v
self.succ[u][v]=self.succ[u].get(v,[])+ [edata]
self.pred[v][u]=self.pred[v].get(u,[])+ [edata]
else: # x is the new object assigned to single edge between u and v
self.succ[u][v]=edata
self.pred[v][u]=edata # note that the same object is referred to
# from both succ and pred
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(e,k,v)
# support toggling edge labels
if self.use_edge_labels:
self.ubigraph.set_edge_attribute(e,'label',str(x))
def delete_edge(self, u, v=None, x=None):
if v is None: # was called as delete_edge(e)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v), x unspecified, set to None
u,v=u # x=None
try:
xdata=x['data']
except:
xdata=x
if self.multiedges: # multiedges are stored as a list
if (self.succ.has_key(u) and self.succ[u].has_key(v)):
x=None
for edata in self.succ[u][v]:
if xdata == edata['data']:
x=edata # (u,v,edata) is an edge
eid=edata['id']
if x is None:
return # no edge
self.succ[u][v].remove(x) # remove the edge item from list
self.pred[v][u].remove(x)
if len(self.succ[u][v])==0: # if last edge between u and v
del self.succ[u][v] # was deleted, remove all trace
del self.pred[v][u]
self.ubigraph.remove_edge(eid)
else: # delete single edge
if self.has_successor(u,v):
eid=self.get_edge(u,v)['id']
self.ubigraph.remove_edge(eid)
del self.succ[u][v]
del self.pred[v][u]
return
def clear(self):
if len(self)>0:
MultiDiGraph.clear(self)
self.ubigraph.clear()
self.nodeid={}
self.nextid=0
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2008 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# Distributed under the terms of the GNU Lesser General Public License
# http://www.gnu.org/copyleft/lesser.html
from networkx.classes.multigraph import MultiGraph
from networkx.classes.multidigraph import MultiDiGraph
from networkx.exception import NetworkXException, NetworkXError
import networkx.convert as convert
class UbiGraph(MultiGraph):
"""
Base classes for interaction between NetworkX and Ubigraph.
These classes allow drawing with Ubigraph and all of the NetworkX functions.
Examples
--------
(start Ubigraph server)
->>> import networkx
->>> G=nx.UbiGraph()
->>> G.add_edge('a','b',color='#0000ff') # blue edge between 'a' and 'b'
->>> G=nx.UbiGraph(networkx.cycle_graph(5)) # cycle of length 5
See the examples
https://networkx.lanl.gov/browser/networkx/trunk/doc/examples/ubigraph
UbiGraph
--------
NetworkX compatible graph class. Allows self loops and multiple edges.
Extends to NetworkX MultiGraph class.
UbiDiGraph
--------
NetworkX compatible digraph class. Allows self loops and multiple edges.
Extends NetworkX MultiDiGraph class.
Ubigraph attributes
--------------------
In addition to all of the XGraph and XDiGraph methods and NetworkX functions
this class also provides methods to set node and edge attributes and styles.
Node and edge attributes:
->>> G=nx.UbiGraph()
->>> G.add_node('a',shape='torus')
->>> G.add_edge('a','b',style='dashed')
->>> G.set_node_attr('a',color='#0000ff') # node a blue
->>> G.set_node_attr(color='#00ffff') # all nodes green
Node and edge styles:
->>> G=nx.UbiGraph(nx.cycle_graph(5)) # cycle of length 5
->>> redtorus=G.new_node_style(color="#ff0000',shape='torus')
->>> G.set_node_attr(style=redtorus) # all nodes to redtorus style
"""
def __init__(self, data=None, name='',
selfloops=False,
multiedges=False,
ubigraph_server= 'http://127.0.0.1:20738/RPC2',
clear=True,
nextid=0):
import xmlrpclib
try:
server_url = ubigraph_server
self.server = xmlrpclib.Server(server_url)
self.ubigraph = self.server.ubigraph
if clear:
self.ubigraph.clear()
except:
raise IOError("No Ubigraph server found")
# default node and edge styles
self.ubigraph.set_vertex_style_attribute(0, "color", "#ff0000")
self.ubigraph.set_vertex_style_attribute(0, "shape", "sphere")
self.ubigraph.set_vertex_style_attribute(0, "size", "0.7")
self.ubigraph.set_edge_style_attribute(0, "color", "#ffffff")
self.ubigraph.set_edge_style_attribute(0, "width", "2.0")
self.use_splines=False
self.use_node_labels=False
self.use_edge_labels=False
# keep a mapping from nodes to ubigraph ids
self.nodeid={}
self.nextid=nextid
self.idnode={}
self.adj={} # adjacency list
self.selfloops=selfloops
self.multiedges=multiedges
if data is not None:
self=convert.from_whatever(data,create_using=self)
self.name=name
def add_node(self, n,**kwds):
if n not in self:
MultiGraph.add_node(self,n)
self.nodeid[n]=self.nextid
self.idnode[self.nextid]=n
self.nextid+=1
self.ubigraph.new_vertex_w_id(self.nodeid[n])
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
# support toggling node labels
if self.use_node_labels:
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(n))
def add_nodes_from(self, nlist,**kwds):
for n in nlist:
self.add_node(n,**kwds)
def delete_node(self,n):
if n in self:
MultiGraph.delete_node(self,n)
self.ubigraph.remove_vertex(self.nodeid[n])
id=self.nodeid[n]
del self.nodeid[n]
del self.idnode[id]
def delete_nodes_from(self,nlist):
for n in nlist:
self.delete_node(n)
def add_edge(self, u, v=None, x=None, **kwds):
if v is None: # add_edge was called as add_edge(e), with e=(u,v,x)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v)
u,v=u # x=None
# if edge exists, quietly return if multiple edges are not allowed
if not self.multiedges and self.has_edge(u,v):
return
# add nodes
self.add_node(u)
self.add_node(v)
# self loop? quietly return if not allowed
if not self.selfloops and u==v:
return
# create ubigraph edge
# build dictionary with edge id and user data to use as edge data
e=self.ubigraph.new_edge(self.nodeid[u],self.nodeid[v])
edata={'id':e,'data':x}
if self.multiedges: # add x to the end of the list of objects
# that defines the edges between u and v
self.adj[u][v]=self.adj[u].get(v,[])+ [edata]
if u!=v:
self.adj[v][u]=self.adj[v].get(u,[])+ [edata]
else: # x is the new object assigned to single edge between u and v
self.adj[u][v]=edata
if u!=v:
self.adj[v][u]=edata # a copy would be required to avoid
# modifying both at the same time
# when doing a delete_edge
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(e,k,v)
# support toggling edge labels
if self.use_edge_labels:
self.ubigraph.set_edge_attribute(e,'label',str(x))
def add_edges_from(self, ebunch,**kwds):
for e in ebunch:
self.add_edge(e,**kwds)
def delete_edge(self, u, v=None, x=None):
if v is None: # was called as delete_edge(e)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v), x unspecified, set to None
u,v=u # x=None
try:
xdata=x['data']
except:
xdata=x
if self.multiedges:
if (self.adj.has_key(u) and self.adj[u].has_key(v)):
x=None
for edata in self.adj[u][v]:
if xdata == edata['data']:
x=edata # (u,v,edata) is an edge
eid=edata['id']
if x is None:
return # no edge
# remove the edge item from list
self.adj[u][v].remove(x)
# and if not self loop remove v->u entry
if u!=v:
self.adj[v][u].remove(x)
# if last edge between u and v was deleted, remove all trace
if len(self.adj[u][v])==0:
del self.adj[u][v]
# and if not self loop remove v->u entry
if u!=v:
del self.adj[v][u]
self.ubigraph.remove_edge(eid)
else: # delete single edge
if self.has_neighbor(u,v):
eid=self.get_edge(u,v)['id']
self.ubigraph.remove_edge(eid)
del self.adj[u][v]
if u!=v:
del self.adj[v][u]
def delete_edges_from(self, ebunch):
for e in ebunch:
self.delete_edge(e)
def clear(self):
if len(self)>0:
MultiGraph.clear(self)
self.ubigraph.clear()
self.nodeid={}
self.nextid=0
# node and edge attrs
def set_node_attr(self,nbunch=None,style=None,**kwds):
bunch=self.nbunch_iter(nbunch)
for n in bunch:
if style is None:
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
else:
self.ubigraph.change_vertex_style(self.nodeid[n],style)
def set_edge_attr(self,ebunch=None,style=None,**kwds):
if ebunch is None:
bunch=self.edges(data=True)
else:
try:
self.has_edge(ebunch)
bunch=[ebunch]
except:
bunch=list(ebunch)
for (u,v,d) in bunch:
if style is None:
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(d['id'],k,v)
else:
ret=self.ubigraph.change_edge_style(d['id'],style)
# node and edge styles
def new_node_style(self,style=0,**kwds):
style=self.ubigraph.new_vertex_style(style)
for (k,v) in kwds.items():
self.ubigraph.set_vertex_style_attribute(style,k,v)
return style
def new_edge_style(self,style=0,**kwds):
style=self.ubigraph.new_edge_style(style)
for (k,v) in kwds.items():
self.ubigraph.set_edge_style_attribute(style,k,v)
return style
# ubigraph helper methods
# an interface to the internal ubigraph methods that do this
# would make this simpler
def splines(self):
"""Toggle spline edges.
"""
if self.use_splines==True:
self.set_edge_attr(spline='false')
self.use_splines=False
else:
self.set_edge_attr(spline='true')
self.use_splines=True
def node_labels(self,nbunch=None,labels=None):
"""Toggle node labels.
"""
bunch=list(self.nbunch_iter(nbunch))
if self.use_node_labels==True:
labels=dict(zip(bunch,['']*len(bunch)))
self.use_node_labels=False
else:
if labels is None:
labels=dict(zip(bunch,bunch))
self.use_node_labels=True
for n,label in labels.items():
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(label))
def edge_labels(self,ebunch=None,labels=None):
"""Toggle edge labels.
"""
if ebunch is None:
bunch=self.edges(data=True)
else:
try:
self.has_edge(ebunch)
bunch=[ebunch]
except:
bunch=list(ebunch)
if self.use_edge_labels==True:
labels=dict([(d['id'],'') for u,v,d in bunch])
self.use_edge_labels=False
else:
if labels is None:
labels=dict([(d['id'],str(d['data'])) for u,v,d in bunch if d['data'] is not None])
self.use_edge_labels=True
for eid,label in labels.items():
self.ubigraph.set_edge_attribute(eid,'label',label)
class UbiDiGraph(UbiGraph,MultiDiGraph):
def __init__(self, data=None, name='',
selfloops=False,
multiedges=False,
ubigraph_server= 'http://127.0.0.1:20738/RPC2',
clear=True):
self.pred={} # predecessor
self.succ={}
UbiGraph.__init__(self,
data=data,name=name,
selfloops=selfloops,
multiedges=multiedges,
ubigraph_server=ubigraph_server,
clear=clear)
self.ubigraph.set_edge_style_attribute(0, "arrow", "true")
self.adj=self.succ # successor is same as adj for digraph
def add_node(self, n,**kwds):
if n not in self:
MultiDiGraph.add_node(self,n)
self.nodeid[n]=self.nextid
self.nextid+=1
self.ubigraph.new_vertex_w_id(self.nodeid[n])
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
# support toggling node labels
if self.use_node_labels:
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(n))
def delete_node(self,n):
if n in self:
MultiDiGraph.delete_node(self,n)
self.ubigraph.remove_vertex(self.nodeid[n])
def add_edge(self, u, v=None, x=None, **kwds):
if v is None: # add_edge was called as add_edge(e), with e a tuple
if len(u)==3: #case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v)
u,v=u # x=None
# if edge exists, quietly return if multiple edges are not allowed
if not self.multiedges and self.has_edge(u,v,x):
return
# add nodes
self.add_node(u)
self.add_node(v)
# self loop? quietly return if not allowed
if not self.selfloops and u==v:
return
# create ubigraph edge
# build dictionary with edge id and user data to use as edge data
e=self.ubigraph.new_edge(self.nodeid[u],self.nodeid[v])
edata={'id':e,'data':x}
if self.multiedges: # append x to the end of the list of objects
# that defines the edges between u and v
self.succ[u][v]=self.succ[u].get(v,[])+ [edata]
self.pred[v][u]=self.pred[v].get(u,[])+ [edata]
else: # x is the new object assigned to single edge between u and v
self.succ[u][v]=edata
self.pred[v][u]=edata # note that the same object is referred to
# from both succ and pred
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(e,k,v)
# support toggling edge labels
if self.use_edge_labels:
self.ubigraph.set_edge_attribute(e,'label',str(x))
def delete_edge(self, u, v=None, x=None):
if v is None: # was called as delete_edge(e)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v), x unspecified, set to None
u,v=u # x=None
try:
xdata=x['data']
except:
xdata=x
if self.multiedges: # multiedges are stored as a list
if (self.succ.has_key(u) and self.succ[u].has_key(v)):
x=None
for edata in self.succ[u][v]:
if xdata == edata['data']:
x=edata # (u,v,edata) is an edge
eid=edata['id']
if x is None:
return # no edge
self.succ[u][v].remove(x) # remove the edge item from list
self.pred[v][u].remove(x)
if len(self.succ[u][v])==0: # if last edge between u and v
del self.succ[u][v] # was deleted, remove all trace
del self.pred[v][u]
self.ubigraph.remove_edge(eid)
else: # delete single edge
if self.has_successor(u,v):
eid=self.get_edge(u,v)['id']
self.ubigraph.remove_edge(eid)
del self.succ[u][v]
del self.pred[v][u]
return
def clear(self):
if len(self)>0:
MultiDiGraph.clear(self)
self.ubigraph.clear()
self.nodeid={}
self.nextid=0
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2008 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# Distributed under the terms of the GNU Lesser General Public License
# http://www.gnu.org/copyleft/lesser.html
from networkx.classes.multigraph import MultiGraph
from networkx.classes.multidigraph import MultiDiGraph
from networkx.exception import NetworkXException, NetworkXError
import networkx.convert as convert
class UbiGraph(MultiGraph):
"""
Base classes for interaction between NetworkX and Ubigraph.
These classes allow drawing with Ubigraph and all of the NetworkX functions.
Examples
--------
(start Ubigraph server)
->>> import networkx
->>> G=nx.UbiGraph()
->>> G.add_edge('a','b',color='#0000ff') # blue edge between 'a' and 'b'
->>> G=nx.UbiGraph(networkx.cycle_graph(5)) # cycle of length 5
See the examples
https://networkx.lanl.gov/browser/networkx/trunk/doc/examples/ubigraph
UbiGraph
--------
NetworkX compatible graph class. Allows self loops and multiple edges.
Extends to NetworkX MultiGraph class.
UbiDiGraph
--------
NetworkX compatible digraph class. Allows self loops and multiple edges.
Extends NetworkX MultiDiGraph class.
Ubigraph attributes
--------------------
In addition to all of the XGraph and XDiGraph methods and NetworkX functions
this class also provides methods to set node and edge attributes and styles.
Node and edge attributes:
->>> G=nx.UbiGraph()
->>> G.add_node('a',shape='torus')
->>> G.add_edge('a','b',style='dashed')
->>> G.set_node_attr('a',color='#0000ff') # node a blue
->>> G.set_node_attr(color='#00ffff') # all nodes green
Node and edge styles:
->>> G=nx.UbiGraph(nx.cycle_graph(5)) # cycle of length 5
->>> redtorus=G.new_node_style(color="#ff0000',shape='torus')
->>> G.set_node_attr(style=redtorus) # all nodes to redtorus style
"""
def __init__(self, data=None, name='',
selfloops=False,
multiedges=False,
ubigraph_server= 'http://127.0.0.1:20738/RPC2',
clear=True,
nextid=0):
import xmlrpclib
try:
server_url = ubigraph_server
self.server = xmlrpclib.Server(server_url)
self.ubigraph = self.server.ubigraph
if clear:
self.ubigraph.clear()
except:
raise IOError("No Ubigraph server found")
# default node and edge styles
self.ubigraph.set_vertex_style_attribute(0, "color", "#ff0000")
self.ubigraph.set_vertex_style_attribute(0, "shape", "sphere")
self.ubigraph.set_vertex_style_attribute(0, "size", "0.7")
self.ubigraph.set_edge_style_attribute(0, "color", "#ffffff")
self.ubigraph.set_edge_style_attribute(0, "width", "2.0")
self.use_splines=False
self.use_node_labels=False
self.use_edge_labels=False
# keep a mapping from nodes to ubigraph ids
self.nodeid={}
self.nextid=nextid
self.idnode={}
self.adj={} # adjacency list
self.selfloops=selfloops
self.multiedges=multiedges
if data is not None:
self=convert.from_whatever(data,create_using=self)
self.name=name
def add_node(self, n,**kwds):
if n not in self:
MultiGraph.add_node(self,n)
self.nodeid[n]=self.nextid
self.idnode[self.nextid]=n
self.nextid+=1
self.ubigraph.new_vertex_w_id(self.nodeid[n])
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
# support toggling node labels
if self.use_node_labels:
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(n))
def add_nodes_from(self, nlist,**kwds):
for n in nlist:
self.add_node(n,**kwds)
def delete_node(self,n):
if n in self:
MultiGraph.delete_node(self,n)
self.ubigraph.remove_vertex(self.nodeid[n])
id=self.nodeid[n]
del self.nodeid[n]
del self.idnode[id]
def delete_nodes_from(self,nlist):
for n in nlist:
self.delete_node(n)
def add_edge(self, u, v=None, x=None, **kwds):
if v is None: # add_edge was called as add_edge(e), with e=(u,v,x)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v)
u,v=u # x=None
# if edge exists, quietly return if multiple edges are not allowed
if not self.multiedges and self.has_edge(u,v):
return
# add nodes
self.add_node(u)
self.add_node(v)
# self loop? quietly return if not allowed
if not self.selfloops and u==v:
return
# create ubigraph edge
# build dictionary with edge id and user data to use as edge data
e=self.ubigraph.new_edge(self.nodeid[u],self.nodeid[v])
edata={'id':e,'data':x}
if self.multiedges: # add x to the end of the list of objects
# that defines the edges between u and v
self.adj[u][v]=self.adj[u].get(v,[])+ [edata]
if u!=v:
self.adj[v][u]=self.adj[v].get(u,[])+ [edata]
else: # x is the new object assigned to single edge between u and v
self.adj[u][v]=edata
if u!=v:
self.adj[v][u]=edata # a copy would be required to avoid
# modifying both at the same time
# when doing a delete_edge
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(e,k,v)
# support toggling edge labels
if self.use_edge_labels:
self.ubigraph.set_edge_attribute(e,'label',str(x))
def add_edges_from(self, ebunch,**kwds):
for e in ebunch:
self.add_edge(e,**kwds)
def delete_edge(self, u, v=None, x=None):
if v is None: # was called as delete_edge(e)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v), x unspecified, set to None
u,v=u # x=None
try:
xdata=x['data']
except:
xdata=x
if self.multiedges:
if (self.adj.has_key(u) and self.adj[u].has_key(v)):
x=None
for edata in self.adj[u][v]:
if xdata == edata['data']:
x=edata # (u,v,edata) is an edge
eid=edata['id']
if x is None:
return # no edge
# remove the edge item from list
self.adj[u][v].remove(x)
# and if not self loop remove v->u entry
if u!=v:
self.adj[v][u].remove(x)
# if last edge between u and v was deleted, remove all trace
if len(self.adj[u][v])==0:
del self.adj[u][v]
# and if not self loop remove v->u entry
if u!=v:
del self.adj[v][u]
self.ubigraph.remove_edge(eid)
else: # delete single edge
if self.has_neighbor(u,v):
eid=self.get_edge(u,v)['id']
self.ubigraph.remove_edge(eid)
del self.adj[u][v]
if u!=v:
del self.adj[v][u]
def delete_edges_from(self, ebunch):
for e in ebunch:
self.delete_edge(e)
def clear(self):
if len(self)>0:
MultiGraph.clear(self)
self.ubigraph.clear()
self.nodeid={}
self.nextid=0
# node and edge attrs
def set_node_attr(self,nbunch=None,style=None,**kwds):
bunch=self.nbunch_iter(nbunch)
for n in bunch:
if style is None:
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
else:
self.ubigraph.change_vertex_style(self.nodeid[n],style)
def set_edge_attr(self,ebunch=None,style=None,**kwds):
if ebunch is None:
bunch=self.edges(data=True)
else:
try:
self.has_edge(ebunch)
bunch=[ebunch]
except:
bunch=list(ebunch)
for (u,v,d) in bunch:
if style is None:
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(d['id'],k,v)
else:
ret=self.ubigraph.change_edge_style(d['id'],style)
# node and edge styles
def new_node_style(self,style=0,**kwds):
style=self.ubigraph.new_vertex_style(style)
for (k,v) in kwds.items():
self.ubigraph.set_vertex_style_attribute(style,k,v)
return style
def new_edge_style(self,style=0,**kwds):
style=self.ubigraph.new_edge_style(style)
for (k,v) in kwds.items():
self.ubigraph.set_edge_style_attribute(style,k,v)
return style
# ubigraph helper methods
# an interface to the internal ubigraph methods that do this
# would make this simpler
def splines(self):
"""Toggle spline edges.
"""
if self.use_splines==True:
self.set_edge_attr(spline='false')
self.use_splines=False
else:
self.set_edge_attr(spline='true')
self.use_splines=True
def node_labels(self,nbunch=None,labels=None):
"""Toggle node labels.
"""
bunch=list(self.nbunch_iter(nbunch))
if self.use_node_labels==True:
labels=dict(zip(bunch,['']*len(bunch)))
self.use_node_labels=False
else:
if labels is None:
labels=dict(zip(bunch,bunch))
self.use_node_labels=True
for n,label in labels.items():
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(label))
def edge_labels(self,ebunch=None,labels=None):
"""Toggle edge labels.
"""
if ebunch is None:
bunch=self.edges(data=True)
else:
try:
self.has_edge(ebunch)
bunch=[ebunch]
except:
bunch=list(ebunch)
if self.use_edge_labels==True:
labels=dict([(d['id'],'') for u,v,d in bunch])
self.use_edge_labels=False
else:
if labels is None:
labels=dict([(d['id'],str(d['data'])) for u,v,d in bunch if d['data'] is not None])
self.use_edge_labels=True
for eid,label in labels.items():
self.ubigraph.set_edge_attribute(eid,'label',label)
class UbiDiGraph(UbiGraph,MultiDiGraph):
def __init__(self, data=None, name='',
selfloops=False,
multiedges=False,
ubigraph_server= 'http://127.0.0.1:20738/RPC2',
clear=True):
self.pred={} # predecessor
self.succ={}
UbiGraph.__init__(self,
data=data,name=name,
selfloops=selfloops,
multiedges=multiedges,
ubigraph_server=ubigraph_server,
clear=clear)
self.ubigraph.set_edge_style_attribute(0, "arrow", "true")
self.adj=self.succ # successor is same as adj for digraph
def add_node(self, n,**kwds):
if n not in self:
MultiDiGraph.add_node(self,n)
self.nodeid[n]=self.nextid
self.nextid+=1
self.ubigraph.new_vertex_w_id(self.nodeid[n])
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
# support toggling node labels
if self.use_node_labels:
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(n))
def delete_node(self,n):
if n in self:
MultiDiGraph.delete_node(self,n)
self.ubigraph.remove_vertex(self.nodeid[n])
def add_edge(self, u, v=None, x=None, **kwds):
if v is None: # add_edge was called as add_edge(e), with e a tuple
if len(u)==3: #case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v)
u,v=u # x=None
# if edge exists, quietly return if multiple edges are not allowed
if not self.multiedges and self.has_edge(u,v,x):
return
# add nodes
self.add_node(u)
self.add_node(v)
# self loop? quietly return if not allowed
if not self.selfloops and u==v:
return
# create ubigraph edge
# build dictionary with edge id and user data to use as edge data
e=self.ubigraph.new_edge(self.nodeid[u],self.nodeid[v])
edata={'id':e,'data':x}
if self.multiedges: # append x to the end of the list of objects
# that defines the edges between u and v
self.succ[u][v]=self.succ[u].get(v,[])+ [edata]
self.pred[v][u]=self.pred[v].get(u,[])+ [edata]
else: # x is the new object assigned to single edge between u and v
self.succ[u][v]=edata
self.pred[v][u]=edata # note that the same object is referred to
# from both succ and pred
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(e,k,v)
# support toggling edge labels
if self.use_edge_labels:
self.ubigraph.set_edge_attribute(e,'label',str(x))
def delete_edge(self, u, v=None, x=None):
if v is None: # was called as delete_edge(e)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v), x unspecified, set to None
u,v=u # x=None
try:
xdata=x['data']
except:
xdata=x
if self.multiedges: # multiedges are stored as a list
if (self.succ.has_key(u) and self.succ[u].has_key(v)):
x=None
for edata in self.succ[u][v]:
if xdata == edata['data']:
x=edata # (u,v,edata) is an edge
eid=edata['id']
if x is None:
return # no edge
self.succ[u][v].remove(x) # remove the edge item from list
self.pred[v][u].remove(x)
if len(self.succ[u][v])==0: # if last edge between u and v
del self.succ[u][v] # was deleted, remove all trace
del self.pred[v][u]
self.ubigraph.remove_edge(eid)
else: # delete single edge
if self.has_successor(u,v):
eid=self.get_edge(u,v)['id']
self.ubigraph.remove_edge(eid)
del self.succ[u][v]
del self.pred[v][u]
return
def clear(self):
if len(self)>0:
MultiDiGraph.clear(self)
self.ubigraph.clear()
self.nodeid={}
self.nextid=0
import networkx as nx
TYPE_COLORS = ["#ff0000", "#ffff00", "#00ff00", "#ffffff", "#ffffff", "#ff0000"]
TYPE_SHAPES = ["octahedron", "sphere", "icosahedron"]
class graph:
def __init__(self):
self.__graph = nx.Graph()
self.__node_updated = []
def connect_ubigraph(self, server=None):
try:
self.__graph = nx.UbiGraph(self.__graph, ubigraph_server=server)
except:
print """
It looks like you are using a version of networkx that has removed
support for ubigraph. I will attempt to load a copy of the old
class.
"""
self.__graph = UbiGraph(self.__graph, ubigraph_server=server)
self.__graph.node_labels()
def add_edit(self, page, id, name):
# Add Page
self.__graph.add_node( page,color=TYPE_COLORS[id], shape=TYPE_SHAPES[0] )
self.__graph.set_node_attr( page, label=name )
def add_edge( self, a, b ):
self.__graph.add_edge(a, b)
def run_graph():
the_graph = graph()
the_graph.connect_ubigraph( "http://localhost:20738/RPC2" )
#populate Nodes
for node in db(db.node.id > 0).select():
the_graph.add_edit( node.url, node.type.id - 1, node.name )
for link in db(db.linkTable.id > 0).select():
the_graph.add_edge(link.nodeId.url, link.linkId.url)
|
gpl-3.0
| -6,707,146,173,462,512,000
| 33.615251
| 101
| 0.528664
| false
| 3.580193
| false
| false
| false
|
isotoma/precog
|
setup.py
|
1
|
1182
|
# Copyright 2016 Isotoma Limited
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages, setup
setup(
name='precog',
version='0.0.2',
author='Isotoma Limited',
author_email='support@isotoma.com',
description='Git-hooks for flake8, isort and eslint',
url='https://github.com/isotoma/precog',
packages=find_packages(),
test_suite='tests',
install_requires=[
# These can probably be relaxed.
'isort>=4.2.2',
'flake8>=2.4.1',
],
tests_require=['mock'],
license="Apache Software License",
entry_points='''
[console_scripts]
precog = precog.install:install_git_hook
'''
)
|
apache-2.0
| 5,478,780,143,963,121,000
| 30.105263
| 74
| 0.692047
| false
| 3.764331
| false
| false
| false
|
IronLanguages/ironpython3
|
Src/StdLib/Lib/socket.py
|
1
|
20953
|
# Wrapper module for _socket, providing some additional facilities
# implemented in Python.
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
fromshare() -- create a socket object from data received from socket.share() [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
create_connection() -- connects to an address, with an optional timeout and
optional source address.
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
IntEnum constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Integer constants:
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
import os, sys, io
from enum import IntEnum
try:
import errno
except ImportError:
errno = None
EBADF = getattr(errno, 'EBADF', 9)
EAGAIN = getattr(errno, 'EAGAIN', 11)
EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11)
__all__ = ["fromfd", "getfqdn", "create_connection",
"AddressFamily", "SocketKind"]
__all__.extend(os._get_exports_list(_socket))
# Set up the socket.AF_* socket.SOCK_* constants as members of IntEnums for
# nicer string representations.
# Note that _socket only knows about the integer values. The public interface
# in this module understands the enums and translates them back from integers
# where needed (e.g. .family property of a socket object).
IntEnum._convert(
'AddressFamily',
__name__,
lambda C: C.isupper() and C.startswith('AF_'))
IntEnum._convert(
'SocketKind',
__name__,
lambda C: C.isupper() and C.startswith('SOCK_'))
_LOCALHOST = '127.0.0.1'
_LOCALHOST_V6 = '::1'
def _intenum_converter(value, enum_klass):
"""Convert a numeric family value to an IntEnum member.
If it's not a known member, return the numeric value itself.
"""
try:
return enum_klass(value)
except ValueError:
return value
_realsocket = socket
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10048] = "The network address is in use."
errorTab[10054] = "The connection has been reset."
errorTab[10058] = "The network has been shut down."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
__all__.append("errorTab")
class socket(_socket.socket):
"""A subclass of _socket.socket adding the makefile() method."""
__slots__ = ["__weakref__", "_io_refs", "_closed"]
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None):
# For user code address family and type values are IntEnum members, but
# for the underlying _socket.socket they're just integers. The
# constructor of _socket.socket converts the given argument to an
# integer automatically.
_socket.socket.__init__(self, family, type, proto, fileno)
self._io_refs = 0
self._closed = False
def __enter__(self):
return self
def __exit__(self, *args):
if not self._closed:
self.close()
def __repr__(self):
"""Wrap __repr__() to reveal the real class name and socket
address(es).
"""
closed = getattr(self, '_closed', False)
s = "<%s.%s%s fd=%i, family=%s, type=%s, proto=%i" \
% (self.__class__.__module__,
self.__class__.__name__,
" [closed]" if closed else "",
self.fileno(),
self.family,
self.type,
self.proto)
if not closed:
try:
laddr = self.getsockname()
if laddr:
s += ", laddr=%s" % str(laddr)
except error:
pass
try:
raddr = self.getpeername()
if raddr:
s += ", raddr=%s" % str(raddr)
except error:
pass
s += '>'
return s
def __getstate__(self):
raise TypeError("Cannot serialize socket object")
def dup(self):
"""dup() -> socket object
Duplicate the socket. Return a new socket object connected to the same
system resource. The new socket is non-inheritable.
"""
fd = dup(self.fileno())
sock = self.__class__(self.family, self.type, self.proto, fileno=fd)
sock.settimeout(self.gettimeout())
return sock
def accept(self):
"""accept() -> (socket object, address info)
Wait for an incoming connection. Return a new socket
representing the connection, and the address of the client.
For IP sockets, the address info is a pair (hostaddr, port).
"""
fd, addr = self._accept()
# If our type has the SOCK_NONBLOCK flag, we shouldn't pass it onto the
# new socket. We do not currently allow passing SOCK_NONBLOCK to
# accept4, so the returned socket is always blocking.
type = self.type & ~globals().get("SOCK_NONBLOCK", 0)
sock = socket(self.family, type, self.proto, fileno=fd)
# Issue #7995: if no default timeout is set and the listening
# socket had a (non-zero) timeout, force the new socket in blocking
# mode to override platform-specific socket flags inheritance.
if getdefaulttimeout() is None and self.gettimeout():
sock.setblocking(True)
return sock, addr
def makefile(self, mode="r", buffering=None, *,
encoding=None, errors=None, newline=None):
"""makefile(...) -> an I/O stream connected to the socket
The arguments are as for io.open() after the filename,
except the only mode characters supported are 'r', 'w' and 'b'.
The semantics are similar too. (XXX refactor to share code?)
"""
if not set(mode) <= {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
def _decref_socketios(self):
if self._io_refs > 0:
self._io_refs -= 1
if self._closed:
self.close()
def _real_close(self, _ss=_socket.socket):
# This function should not reference any globals. See issue #808164.
_ss.close(self)
def close(self):
# This function should not reference any globals. See issue #808164.
self._closed = True
if self._io_refs <= 0:
self._real_close()
def detach(self):
"""detach() -> file descriptor
Close the socket object without closing the underlying file descriptor.
The object cannot be used after this call, but the file descriptor
can be reused for other purposes. The file descriptor is returned.
"""
self._closed = True
return super().detach()
@property
def family(self):
"""Read-only access to the address family for this socket.
"""
return _intenum_converter(super().family, AddressFamily)
@property
def type(self):
"""Read-only access to the socket type.
"""
return _intenum_converter(super().type, SocketKind)
if os.name == 'nt':
def get_inheritable(self):
return os.get_handle_inheritable(self.fileno())
def set_inheritable(self, inheritable):
os.set_handle_inheritable(self.fileno(), inheritable)
else:
def get_inheritable(self):
return os.get_inheritable(self.fileno())
def set_inheritable(self, inheritable):
os.set_inheritable(self.fileno(), inheritable)
get_inheritable.__doc__ = "Get the inheritable flag of the socket"
set_inheritable.__doc__ = "Set the inheritable flag of the socket"
def fromfd(fd, family, type, proto=0):
""" fromfd(fd, family, type[, proto]) -> socket object
Create a socket object from a duplicate of the given file
descriptor. The remaining arguments are the same as for socket().
"""
nfd = dup(fd)
return socket(family, type, proto, nfd)
if hasattr(_socket.socket, "share"):
def fromshare(info):
""" fromshare(info) -> socket object
Create a socket object from the bytes object returned by
socket.share(pid).
"""
return socket(0, 0, 0, info)
__all__.append("fromshare")
if hasattr(_socket, "socketpair"):
def socketpair(family=None, type=SOCK_STREAM, proto=0):
"""socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is
AF_UNIX if defined on the platform; otherwise, the default is AF_INET.
"""
if family is None:
try:
family = AF_UNIX
except NameError:
family = AF_INET
a, b = _socket.socketpair(family, type, proto)
a = socket(family, type, proto, a.detach())
b = socket(family, type, proto, b.detach())
return a, b
else:
# Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain.
def socketpair(family=AF_INET, type=SOCK_STREAM, proto=0):
if family == AF_INET:
host = _LOCALHOST
elif family == AF_INET6:
host = _LOCALHOST_V6
else:
raise ValueError("Only AF_INET and AF_INET6 socket address families "
"are supported")
if type != SOCK_STREAM:
raise ValueError("Only SOCK_STREAM socket type is supported")
if proto != 0:
raise ValueError("Only protocol zero is supported")
# We create a connected TCP socket. Note the trick with
# setblocking(False) that prevents us from having to create a thread.
lsock = socket(family, type, proto)
try:
lsock.bind((host, 0))
lsock.listen()
# On IPv6, ignore flow_info and scope_id
addr, port = lsock.getsockname()[:2]
csock = socket(family, type, proto)
try:
csock.setblocking(False)
try:
csock.connect((addr, port))
except (BlockingIOError, InterruptedError):
pass
csock.setblocking(True)
ssock, _ = lsock.accept()
except:
csock.close()
raise
finally:
lsock.close()
return (ssock, csock)
__all__.append("socketpair")
socketpair.__doc__ = """socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is AF_UNIX
if defined on the platform; otherwise, the default is AF_INET.
"""
_blocking_errnos = { EAGAIN, EWOULDBLOCK }
class SocketIO(io.RawIOBase):
"""Raw I/O implementation for stream sockets.
This class supports the makefile() method on sockets. It provides
the raw I/O interface on top of a socket object.
"""
# One might wonder why not let FileIO do the job instead. There are two
# main reasons why FileIO is not adapted:
# - it wouldn't work under Windows (where you can't used read() and
# write() on a socket handle)
# - it wouldn't work with socket timeouts (FileIO would ignore the
# timeout and consider the socket non-blocking)
# XXX More docs
def __init__(self, sock, mode):
if mode not in ("r", "w", "rw", "rb", "wb", "rwb"):
raise ValueError("invalid mode: %r" % mode)
io.RawIOBase.__init__(self)
self._sock = sock
if "b" not in mode:
mode += "b"
self._mode = mode
self._reading = "r" in mode
self._writing = "w" in mode
self._timeout_occurred = False
def readinto(self, b):
"""Read up to len(b) bytes into the writable buffer *b* and return
the number of bytes read. If the socket is non-blocking and no bytes
are available, None is returned.
If *b* is non-empty, a 0 return value indicates that the connection
was shutdown at the other end.
"""
self._checkClosed()
self._checkReadable()
if self._timeout_occurred:
raise OSError("cannot read from timed out object")
while True:
try:
return self._sock.recv_into(b)
except timeout:
self._timeout_occurred = True
raise
except InterruptedError:
continue
except error as e:
if e.args[0] in _blocking_errnos:
return None
raise
def write(self, b):
"""Write the given bytes or bytearray object *b* to the socket
and return the number of bytes written. This can be less than
len(b) if not all data could be written. If the socket is
non-blocking and no bytes could be written None is returned.
"""
self._checkClosed()
self._checkWritable()
try:
return self._sock.send(b)
except error as e:
# XXX what about EINTR?
if e.args[0] in _blocking_errnos:
return None
raise
def readable(self):
"""True if the SocketIO is open for reading.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._reading
def writable(self):
"""True if the SocketIO is open for writing.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._writing
def seekable(self):
"""True if the SocketIO is open for seeking.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return super().seekable()
def fileno(self):
"""Return the file descriptor of the underlying socket.
"""
self._checkClosed()
return self._sock.fileno()
@property
def name(self):
if not self.closed:
return self.fileno()
else:
return -1
@property
def mode(self):
return self._mode
def close(self):
"""Close the SocketIO object. This doesn't close the underlying
socket, except if all references to it have disappeared.
"""
if self.closed:
return
io.RawIOBase.close(self)
self._sock._decref_socketios()
self._sock = None
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise error("getaddrinfo returns an empty list")
def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
"""Resolve host and port into list of address info entries.
Translate the host/port argument into a sequence of 5-tuples that contain
all the necessary arguments for creating a socket connected to that service.
host is a domain name, a string representation of an IPv4/v6 address or
None. port is a string service name such as 'http', a numeric port number or
None. By passing None as the value of host and port, you can pass NULL to
the underlying C API.
The family, type and proto arguments can be optionally specified in order to
narrow the list of addresses returned. Passing zero as a value for each of
these arguments selects the full range of results.
"""
# We override this function since we want to translate the numeric family
# and socket type values to enum constants.
addrlist = []
for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
af, socktype, proto, canonname, sa = res
addrlist.append((_intenum_converter(af, AddressFamily),
_intenum_converter(socktype, SocketKind),
proto, canonname, sa))
return addrlist
|
apache-2.0
| 2,936,274,193,800,808,000
| 34.393581
| 95
| 0.606214
| false
| 4.173904
| false
| false
| false
|
mastorak/udolc
|
udolc/UdolcWindow.py
|
1
|
4139
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2013 <Konstantinos Mastorakis> <mastorak at gmail dot com>
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
import os,stat
import gettext
from gettext import gettext as _
gettext.textdomain('udolc')
from gi.repository import Gtk # pylint: disable=E0611
import logging
logger = logging.getLogger('udolc')
from udolc_lib import Window
from udolc.AboutUdolcDialog import AboutUdolcDialog
from udolc.InfoDialog import InfoDialog
from udolc.InvalidAttributesDialog import InvalidAttributesDialog
# See udolc_lib.Window.py for more details about how this class works
class UdolcWindow(Window):
__gtype_name__ = "UdolcWindow"
def finish_initializing(self, builder): # pylint: disable=E1002
"""Set up the main window"""
super(UdolcWindow, self).finish_initializing(builder)
self.AboutDialog = AboutUdolcDialog
statusIcon = Gtk.StatusIcon()
statusIcon.set_from_file('data/media/udolc.svg')
statusIcon.set_visible(True)
# Get widgets
self.saveBtn=self.builder.get_object("saveBtn")
self.resetBtn=self.builder.get_object("resetBtn")
self.nameField=self.builder.get_object("nameField")
self.commentField=self.builder.get_object("commentField")
self.execField=self.builder.get_object("execField")
self.iconSelect=self.builder.get_object("iconSelect")
self.terminalCheckbox=self.builder.get_object("terminalCheckbox")
self.typeCombo=self.builder.get_object("typeCombo")
#Initialise widgets
self.iconSelect.set_filename("/usr/share/udolc/media/default_icon.png")
self.typeCombo.set_active(0)
def on_saveBtn_clicked(self,widget):
print "Saving laucher"
name=self.nameField.get_text()
comment=self.commentField.get_text()
if comment=="":
comment=name
executable=self.execField.get_text()
icon=self.iconSelect.get_filename()
launcherType=self.typeCombo.get_active_text()
terminalCheck=self.terminalCheckbox.get_active()
isTerminal="false"
if terminalCheck:
isTerminal="true"
if name=="" or executable=="":
print "Invalid Arguments"
error=InvalidAttributesDialog()
error.show()
return
else:
homeDir=os.getenv("HOME")
copyDir=homeDir+"/.local/share/applications/"
fileName=copyDir+name+".desktop"
f = open(fileName, 'w')
f.write("[Desktop Entry]\n")
f.write("Type=Application\n")
f.write("Name="+name+"\n")
f.write("Comment="+comment+"\n")
f.write("Exec="+executable+"\n")
f.write("Icon="+icon+"\n")
f.write("Terminal="+isTerminal+"\n")
f.write("Categories="+launcherType+";\n")
f.close()
os.chmod(fileName, stat.S_IRWXU)
info=InfoDialog()
os.system("xdg-open "+copyDir)
info.show()
def on_resetBtn_clicked(self,widget):
self.nameField.set_text("")
self.commentField.set_text("")
self.execField.set_text("")
self.iconSelect.set_filename("/usr/share/udolc/media/default_icon.png")
self.typeCombo.set_active(0)
|
gpl-3.0
| -7,600,585,048,314,815,000
| 35.955357
| 79
| 0.62648
| false
| 3.857409
| false
| false
| false
|
martin-green/skojjt
|
imports.py
|
1
|
3816
|
# -*- coding: utf-8 -*-
import time
from data import Semester, TaskProgress, UserPrefs
from dataimport import RunScoutnetImport
from google.appengine.ext import deferred, ndb
from flask import Blueprint, render_template, request, make_response, redirect
import_page = Blueprint('import_page', __name__, template_folder='templates')
@import_page.route('/', methods = ['POST', 'GET'])
def import_():
user = UserPrefs.current()
if not user.canImport():
return "denied", 403
breadcrumbs = [{'link':'/', 'text':'Hem'},
{'link':'/import', 'text':'Import'}]
currentSemester = Semester.getOrCreateCurrent()
semesters=[currentSemester]
semesters.extend(Semester.query(Semester.key!=currentSemester.key))
if request.method != 'POST':
return render_template('updatefromscoutnetform.html', heading="Import", breadcrumbs=breadcrumbs, user=user, semesters=semesters)
api_key = request.form.get('apikey').strip()
groupid = request.form.get('groupid').strip()
semester_key=ndb.Key(urlsafe=request.form.get('semester'))
return startAsyncImport(api_key, groupid, semester_key, user, request)
progress = Blueprint('progress_page', 'progress', template_folder='templates')
@progress.route('/<progress_url>')
@progress.route('/<progress_url>/')
@progress.route('/<progress_url>/<update>')
@progress.route('/<progress_url>/<update>/')
def importProgress(progress_url, update=None):
if update is not None:
taskProgress = None
for i in range(1, 2):
taskProgress = ndb.Key(urlsafe=progress_url).get()
if taskProgress is not None:
break
time.sleep(1)
if taskProgress is not None:
s = taskProgress.toJson()
else:
s = '{"messages": ["Error: Hittar inte uppgiften"], "failed": "true", "running": "false"}'
response = make_response(s)
response.headers['Content-Type'] = 'application/json'
return response
breadcrumbs = [{'link':'/', 'text':'Hem'}, {'link':'/import', 'text':'Import'}]
return render_template('importresult.html', tabletitle="Importresultat", rowtitle='Result', breadcrumbs=breadcrumbs)
def startAsyncImport(api_key, groupid, semester_key, user, request):
"""
:type api_key: str
:type groupid: str
:type semester_key: google.appengine.ext.ndb.Key
:type user: data.UserPrefs
:type request: werkzeug.local.LocalProxy
:rtype werkzeug.wrappers.response.Response
"""
taskProgress = TaskProgress(name='Import', return_url=request.url)
taskProgress.put()
deferred.defer(importTask, api_key, groupid, semester_key, taskProgress.key, user.key)
return redirect('/progress/' + taskProgress.key.urlsafe())
def importTask(api_key, groupid, semester_key, taskProgress_key, user_key):
"""
:type api_key: str
:type groupid: str
:type semester_key: google.appengine.ext.ndb.Key
:type taskProgress_key: google.appengine.ext.ndb.Key
:type user_key: google.appengine.ext.ndb.Key
"""
semester = semester_key.get() # type: data.Semester
user = user_key.get() # type: data.UserPrefs
progress = None
for i in range(1, 3):
progress = taskProgress_key.get() # type: data.TaskProgress
if progress is not None:
break
time.sleep(1) # wait for the eventual consistency
try:
success = RunScoutnetImport(groupid, api_key, user, semester, progress)
if not success:
progress.info("Importen misslyckades")
progress.failed = True
else:
progress.info("Import klar")
except Exception as e: # catch all exceptions so that defer stops running it again (automatic retry)
progress.info("Importfel: " + str(e))
progress.done()
|
apache-2.0
| 3,902,891,039,809,497,600
| 38.340206
| 136
| 0.659329
| false
| 3.566355
| false
| false
| false
|
bsgbryan/Ardus
|
node_modules/microtime/build/c4che/default.cache.py
|
1
|
1405
|
AR = '/usr/bin/ar'
ARFLAGS = 'rcs'
CCFLAGS = ['-g']
CCFLAGS_MACBUNDLE = ['-fPIC']
CCFLAGS_NODE = ['-D_LARGEFILE_SOURCE', '-D_FILE_OFFSET_BITS=64']
CC_VERSION = ('4', '2', '1')
COMPILER_CXX = 'g++'
CPP = '/usr/bin/cpp'
CPPFLAGS_NODE = ['-D_GNU_SOURCE', '-DEV_MULTIPLICITY=0']
CPPPATH_NODE = '/usr/local/include/node'
CPPPATH_ST = '-I%s'
CXX = ['/usr/bin/g++']
CXXDEFINES_ST = '-D%s'
CXXFLAGS = ['-g']
CXXFLAGS_DEBUG = ['-g']
CXXFLAGS_NODE = ['-D_LARGEFILE_SOURCE', '-D_FILE_OFFSET_BITS=64']
CXXFLAGS_RELEASE = ['-O2']
CXXLNK_SRC_F = ''
CXXLNK_TGT_F = ['-o', '']
CXX_NAME = 'gcc'
CXX_SRC_F = ''
CXX_TGT_F = ['-c', '-o', '']
DEST_CPU = 'x86_64'
DEST_OS = 'darwin'
FULLSTATIC_MARKER = '-static'
LIBDIR = '/Users/maynardb/.node_libraries'
LIBPATH_NODE = '/usr/local/lib'
LIBPATH_ST = '-L%s'
LIB_ST = '-l%s'
LINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup']
LINK_CXX = ['/usr/bin/g++']
NODE_PATH = '/Users/maynardb/.node_libraries'
PREFIX = '/usr/local'
PREFIX_NODE = '/usr/local'
RANLIB = '/usr/bin/ranlib'
RPATH_ST = '-Wl,-rpath,%s'
SHLIB_MARKER = ''
SONAME_ST = ''
STATICLIBPATH_ST = '-L%s'
STATICLIB_MARKER = ''
STATICLIB_ST = '-l%s'
macbundle_PATTERN = '%s.bundle'
program_PATTERN = '%s'
shlib_CXXFLAGS = ['-fPIC', '-compatibility_version', '1', '-current_version', '1']
shlib_LINKFLAGS = ['-dynamiclib']
shlib_PATTERN = 'lib%s.dylib'
staticlib_LINKFLAGS = []
staticlib_PATTERN = 'lib%s.a'
|
mit
| -3,397,733,078,171,443,700
| 28.270833
| 82
| 0.625623
| false
| 2.482332
| false
| true
| false
|
Elastica/kombu
|
kombu/clocks.py
|
1
|
4635
|
"""
kombu.clocks
============
Logical Clocks and Synchronization.
"""
from __future__ import absolute_import, unicode_literals
from threading import Lock
from itertools import islice
from operator import itemgetter
from .five import python_2_unicode_compatible, zip
__all__ = ['LamportClock', 'timetuple']
R_CLOCK = '_lamport(clock={0}, timestamp={1}, id={2} {3!r})'
@python_2_unicode_compatible
class timetuple(tuple):
"""Tuple of event clock information.
Can be used as part of a heap to keep events ordered.
:param clock: Event clock value.
:param timestamp: Event UNIX timestamp value.
:param id: Event host id (e.g. ``hostname:pid``).
:param obj: Optional obj to associate with this event.
"""
__slots__ = ()
def __new__(cls, clock, timestamp, id, obj=None):
return tuple.__new__(cls, (clock, timestamp, id, obj))
def __repr__(self):
return R_CLOCK.format(*self)
def __getnewargs__(self):
return tuple(self)
def __lt__(self, other):
# 0: clock 1: timestamp 3: process id
try:
A, B = self[0], other[0]
# uses logical clock value first
if A and B: # use logical clock if available
if A == B: # equal clocks use lower process id
return self[2] < other[2]
return A < B
return self[1] < other[1] # ... or use timestamp
except IndexError:
return NotImplemented
def __gt__(self, other):
return other < self
def __le__(self, other):
return not other < self
def __ge__(self, other):
return not self < other
clock = property(itemgetter(0))
timestamp = property(itemgetter(1))
id = property(itemgetter(2))
obj = property(itemgetter(3))
@python_2_unicode_compatible
class LamportClock(object):
"""Lamport's logical clock.
From Wikipedia:
A Lamport logical clock is a monotonically incrementing software counter
maintained in each process. It follows some simple rules:
* A process increments its counter before each event in that process;
* When a process sends a message, it includes its counter value with
the message;
* On receiving a message, the receiver process sets its counter to be
greater than the maximum of its own value and the received value
before it considers the message received.
Conceptually, this logical clock can be thought of as a clock that only
has meaning in relation to messages moving between processes. When a
process receives a message, it resynchronizes its logical clock with
the sender.
.. seealso::
* `Lamport timestamps`_
* `Lamports distributed mutex`_
.. _`Lamport Timestamps`: http://en.wikipedia.org/wiki/Lamport_timestamps
.. _`Lamports distributed mutex`: http://bit.ly/p99ybE
*Usage*
When sending a message use :meth:`forward` to increment the clock,
when receiving a message use :meth:`adjust` to sync with
the time stamp of the incoming message.
"""
#: The clocks current value.
value = 0
def __init__(self, initial_value=0, Lock=Lock):
self.value = initial_value
self.mutex = Lock()
def adjust(self, other):
with self.mutex:
value = self.value = max(self.value, other) + 1
return value
def forward(self):
with self.mutex:
self.value += 1
return self.value
def sort_heap(self, h):
"""List of tuples containing at least two elements, representing
an event, where the first element is the event's scalar clock value,
and the second element is the id of the process (usually
``"hostname:pid"``): ``sh([(clock, processid, ...?), (...)])``
The list must already be sorted, which is why we refer to it as a
heap.
The tuple will not be unpacked, so more than two elements can be
present.
Will return the latest event.
"""
if h[0][0] == h[1][0]:
same = []
for PN in zip(h, islice(h, 1, None)):
if PN[0][0] != PN[1][0]:
break # Prev and Next's clocks differ
same.append(PN[0])
# return first item sorted by process id
return sorted(same, key=lambda event: event[1])[0]
# clock values unique, return first item
return h[0]
def __str__(self):
return str(self.value)
def __repr__(self):
return '<LamportClock: {0.value}>'.format(self)
|
bsd-3-clause
| 5,739,204,968,868,085,000
| 28.711538
| 77
| 0.604962
| false
| 4.090909
| false
| false
| false
|
miquelcampos/GEAR_mc
|
gear/xsi/rig/component/chain_01/guide.py
|
1
|
3991
|
'''
This file is part of GEAR.
GEAR is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/lgpl.html>.
Author: Jeremie Passerin geerem@hotmail.com
Url: http://gear.jeremiepasserin.com
Date: 2010 / 11 / 15
'''
## @package gear.xsi.rig.component.chain_01.guide
# @author Jeremie Passerin
#
##########################################################
# GLOBAL
##########################################################
# gear
from gear.xsi import c
from gear.xsi.rig.component.guide import ComponentGuide
# guide info
AUTHOR = "Jeremie Passerin"
URL = "http://www.jeremiepasserin.com"
EMAIL = "geerem@hotmail.com"
VERSION = [1,0,1]
TYPE = "chain_01"
NAME = "chain"
DESCRIPTION = "Simple ik/fk chain"
##########################################################
# CLASS
##########################################################
class Guide(ComponentGuide):
compType = TYPE
compName = NAME
description = DESCRIPTION
author = AUTHOR
url = URL
email = EMAIL
version = VERSION
compatible = ["tail_01", "chain_cns_01"]
# =====================================================
##
# @param self
def postInit(self):
self.pick_transform = ["root", "#_loc"]
self.save_transform = ["root", "#_loc"]
self.save_blade = ["blade"]
self.addMinMax("#_loc", 1, -1)
# =====================================================
## Add more object to the object definition list.
# @param self
def addObjects(self):
self.root = self.addRoot()
self.locs = self.addLocMulti("#_loc", self.root)
self.blade = self.addBlade("blade", self.root, self.locs[0])
centers = [self.root]
centers.extend(self.locs)
self.dispcrv = self.addDispCurve("crv", centers)
# =====================================================
## Add more parameter to the parameter definition list.
# @param self
def addParameters(self):
self.pType = self.addParam("type", c.siInt4, 0, 0, None)
self.pBlend = self.addParam("blend", c.siInt4, 0, 0, 1)
self.pNeutralPose = self.addParam("neutralpose", c.siBool, False)
# =====================================================
## Add layout for new parameters.
# @param self
def addLayout(self):
# --------------------------------------------------
# Items
typeItems = ["fk only", 0,
"ik only", 1,
"ik / fk", 2]
blendItems = ["fk", 0,
"ik", 1]
# --------------------------------------------------
# Layout
tab = self.layout.addTab("Options")
group = tab.addGroup("Kinematic")
group.addEnumControl(self.pType.scriptName, typeItems, "Type", c.siControlCombo)
item = group.addItem(self.pNeutralPose.scriptName, "Set Neutral Pose on FK Controlers")
item.addCondition("PPG."+self.pType.scriptName+".Value != 1")
item = group.addEnumControl(self.pBlend.scriptName, blendItems, "Default blend", c.siControlCombo)
item.addCondition("PPG."+self.pType.scriptName+".Value == 2")
# =====================================================
## Add logic for new layout.
# @param self
def addLogic(self):
self.logic.addOnChangedRefresh(self.pType.scriptName)
|
lgpl-3.0
| 630,757,503,374,750,000
| 31.185484
| 106
| 0.527938
| false
| 3.998998
| false
| false
| false
|
google/skia
|
infra/bots/assets/cmake_mac/create.py
|
1
|
1130
|
#!/usr/bin/env python
#
# Copyright 2019 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create the asset."""
import argparse
import os
import subprocess
import sys
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
INFRA_BOTS_DIR = os.path.realpath(os.path.join(FILE_DIR, os.pardir, os.pardir))
sys.path.insert(0, INFRA_BOTS_DIR)
import utils
VERSION = '3.13.4'
URL = ('https://github.com/Kitware/CMake/releases/download/v%s/'
'cmake-%s-Darwin-x86_64.tar.gz') % (VERSION, VERSION)
def create_asset(target_dir):
"""Create the asset."""
with utils.tmp_dir():
subprocess.check_call(['wget', URL, '--output-document=cmake.tar.gz'])
subprocess.check_call(['tar', '--extract', '--gunzip', '--file',
'cmake.tar.gz', '--directory', target_dir,
'--strip-components', '1'])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target_dir', '-t', required=True)
args = parser.parse_args()
create_asset(args.target_dir)
if __name__ == '__main__':
main()
|
bsd-3-clause
| -5,843,847,538,980,746,000
| 24.111111
| 79
| 0.635398
| false
| 3.237822
| false
| false
| false
|
shengqh/ngsperl
|
lib/QC/bamSoftClipPosition.py
|
1
|
3130
|
import pysam
import argparse
import sys
import logging
import os
from asyncore import read
parser = argparse.ArgumentParser(description="Build soft clip position distribution in BAM file.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
DEBUG=False
NOT_DEBUG = not DEBUG
parser.add_argument('-i', '--input', action='store', nargs='?', help='Input BAM file', required=NOT_DEBUG)
parser.add_argument('--min-mapq', action='store', nargs='?', type=int, default=10, help="Minimum mapping quality of read")
parser.add_argument('--binsize', action='store', nargs='?', type=int, default=1000, help="Bin size of position")
parser.add_argument('--min-depth', action='store', nargs='?', type=int, default=100, help="Minimum depth for output")
parser.add_argument('-o', '--output', action='store', nargs='?', help="Output soft clip distribution file name", required=NOT_DEBUG)
if NOT_DEBUG and len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if DEBUG:
args.input = "/scratch/cqs/shengq2/jennifer/20190906_lindsay_exomeseq_3772_hg38/softclip/P_175_06.indel.recal.TP53.bam"
args.output = "/scratch/cqs/shengq2/jennifer/20190906_lindsay_exomeseq_3772_hg38/softclip/P_175_06.softclip.position.tsv"
logger = logging.getLogger('bamSoftClipPosition')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')
def filterReadQuality(read, min_mapq):
return(read.is_unmapped or read.mapping_quality < min_mapq or read.is_secondary or read.is_qcfail or read.is_duplicate or read.is_supplementary)
def hasSoftClip(read):
return("S" in read.cigarstring)
chrPositionMap = {}
processed = 0
logger.info("reading %s" % args.input)
with pysam.Samfile(args.input, "rb") as samfile:
for read in samfile.fetch(until_eof=True):
processed += 1
if processed % 1000000 == 0:
logger.info("processed %d" % processed)
#break
if filterReadQuality(read, args.min_mapq):
continue
if len(read.reference_name) > 5:
continue
if not read.reference_name in chrPositionMap:
chrPositionMap[read.reference_name] = {}
positionMap = chrPositionMap[read.reference_name]
position = int(read.reference_start / args.binsize)
if not position in positionMap:
positionMap[position] = [0, 0]
posvalues = positionMap[position]
if hasSoftClip(read):
posvalues[0] = posvalues[0] + 1
else:
posvalues[1] = posvalues[1] + 1
with open(args.output, "wt") as sw:
sw.write("Chr\tStartPosition\tSoftClipRead\tOtherRead\tSoftClipPerc\n")
for chr in chrPositionMap.keys():
positionMap = chrPositionMap[chr]
positions = sorted(positionMap.keys())
for pos in positions:
posvalues = positionMap[pos]
sread = posvalues[0]
oread = posvalues[1]
allread = sread + oread
if allread >= args.min_depth:
sw.write("%s\t%d\t%d\t%d\t%.2f\n" % (chr, pos * args.binsize, sread, oread, sread * 1.0 / allread) )
logger.info("done.")
|
apache-2.0
| 2,228,004,793,218,113,500
| 36.195122
| 146
| 0.675399
| false
| 3.243523
| false
| false
| false
|
msg/g2ools
|
nord/convert/lfo.py
|
1
|
10685
|
#
# lfo.py - Lfo tab conversion objects
#
# Copyright (c) 2006,2007 Matt Gerassimoff
#
# This file is part of g2ools.
#
# g2ools is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# g2ools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from nord.utils import setv, getv, isnm1osc
from nord.nm1.colors import nm1conncolors
from nord.convert import Convert
from nord.convert.convert import handleoscmasterslv, handlekbt, doslvcables
from nord.convert.table import modtable
def handleslv(conv, ratemodin, ratemodparam):
nmm, g2m = conv.nmmodule, conv.g2module
nmmp, g2mp = nmm.params, g2m.params
slv, kbt = None, g2m.inputs.Rate
if len(nmm.outputs.Slv.cables):
oscmaster = conv.add_module('OscMaster')
setv(g2mp.Rate, 64)
setv(oscmaster.params.Kbt, 0) # Off
setv(oscmaster.params.FreqCoarse, getv(nmmp.Rate))
#setv(oscmaster.params.PitchMod, modtable[getv(nmmp.RateMod)][0])
conv.connect(oscmaster.outputs.Out, g2m.inputs.Rate)
ratemodin = oscmaster.inputs.PitchVar
ratemodparam = oscmaster.params.PitchMod
slv = g2m.inputs.Rate
kbt = oscmaster.inputs.Pitch
conv.kbt = oscmaster.params.Kbt
if getv(nmmp.Range) == 1: # Lo
slv = handleoscmasterslv(conv, oscmaster, 64, 40, 50, 103, 41, True)
else:
slv = handleoscmasterslv(conv, oscmaster, 76, 64, 52, 104, 35, False)
# add fine tuning
if len(nmm.inputs.Rate.cables):
mod = getv(nmmp.RateMod)
if mod == 0 or mod == 127:
setv(ratemodparam, mod)
else:
setv(ratemodparam, modtable[mod][0])
adj = conv.add_module('Mix2-1B', name='PitchAdj')
conv.connect(adj.outputs.Out, ratemodin)
conv.connect(adj.inputs.Chain, adj.inputs.In1)
conv.connect(adj.inputs.In1, adj.inputs.In2)
setv(adj.params.Inv1, 1)
setv(adj.params.Lev1, modtable[mod][1])
setv(adj.params.Lev2, modtable[mod][2])
ratemodin = adj.inputs.Chain
return ratemodin, ratemodparam, slv, kbt
def postmst(conv, mstindex):
nmm, g2m = conv.nmmodule, conv.g2module
nmmp, g2mp = nmm.params, g2m.params
mstin = nmm.inputs.Mst
if not len(mstin.cables):
return
if not mstin.net.output:
return
mstconv = mstin.net.output.module.conv
mst = mstconv.g2module
if hasattr(mst.params, 'PolyMono'):
setv(g2mp.PolyMono, getv(mst.params.PolyMono))
if hasattr(mst.params, 'Kbt') and hasattr(g2mp, 'Kbt'):
setv(g2mp.Kbt, getv(mst.params.Kbt))
if mstin.net.output.rate != nm1conncolors.slave:
oscc = conv.add_module('OscC', name='')
setv(oscc.params.FreqCoarse, 0)
setv(oscc.params.FmAmount, 79)
setv(oscc.params.Kbt, 0)
pout = conv.add_module('ZeroCnt', name='')
conv.connect(oscc.outputs.Out, pout.inputs.In)
conv.connect(pout.outputs.Out, g2m.inputs.Rate)
setv(g2mp.Range, 2)
conv.inputs[mstindex] = oscc.inputs.FmMod
return
if isnm1osc(mst):
setv(g2mp.Range, 2)
elif hasattr(mst.params, 'Range'):
setv(g2mp.Range, getv(mst.params.Range))
else:
setv(g2mp.Range, 1)
class ConvLFOA(Convert):
maing2module = 'LfoB'
parammap = ['Rate', 'Range', 'Waveform', 'RateMod', ['PolyMono', 'Mono'],
None, 'Phase', ['Active', 'Mute']]
inputmap = ['Rate', 'Rst']
outputmap = ['', 'Out'] # Slv
def domodule(self):
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
# handle special parameters
waveform = getv(nmmp.Waveform)
setv(g2mp.Waveform, [0, 1, 2, 2, 3][waveform])
if waveform != 3:
setv(g2mp.OutputType, 5) # BipInv
else:
# 180 phase
setv(g2mp.Phase, (range(64, 128)+range(64))[getv(nmmp.Phase)])
setv(g2mp.Active, 1-getv(nmmp.Mute))
self.kbt = g2m.params.Kbt
# update Rate input, Slv Output
ratemodin, rateparam, slv, kbt = handleslv(self,
g2m.inputs.RateVar, g2mp.RateMod)
self.inputs[0], self.outputs[0], kbt = ratemodin, slv, kbt
self.kbtout = handlekbt(self, kbt, 4, False)
def precables(self):
doslvcables(self)
class ConvLFOB(Convert):
maing2module = 'LfoShpA'
parammap = ['Rate', 'Range', 'Phase', 'RateMod', ['PolyMono', 'Mono'],
None, ['PhaseMod', 'PwMod'], ['Shape', 'Pw']]
inputmap = ['Rate', 'Rst', 'ShapeMod']
outputmap = ['Out', ''] # Slv
def domodule(self):
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
setv(g2mp.Waveform, 5)
setv(g2mp.OutputType, 5) # BipInv
setv(g2mp.PhaseMod, getv(nmmp.PwMod))
self.kbt = g2m.params.Kbt
ratemodin, rateparam, slv, kbt = handleslv(self,
g2m.inputs.RateVar, g2mp.RateMod)
self.inputs[0], self.outputs[1], kbt = ratemodin, slv, kbt
self.kbtout = handlekbt(self, kbt, 4, False)
def precables(self):
doslvcables(self)
class ConvLFOC(Convert):
maing2module = 'LfoA'
parammap = ['Rate', 'Range', 'Waveform', 'RateMod', ['PolyMono', 'Mono'],
['Active', 'Mute']]
inputmap = ['RateVar']
outputmap = ['Out', '']
def domodule(self):
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
waveform = getv(nmmp.Waveform)
setv(g2mp.Waveform, [0, 1, 2, 2, 3][waveform])
if waveform != 3:
setv(g2mp.OutputType, 5) # BipInv
setv(g2mp.Active, 1-getv(nmmp.Mute))
self.kbt = g2m.params.Kbt
ratemodin, rateparam, slv, kbt = handleslv(self,
g2m.inputs.RateVar, g2mp.RateMod)
self.inputs[0], self.outputs[1], kbt = ratemodin, slv, kbt
def precables(self):
doslvcables(self)
class ConvLFOSlvA(Convert):
maing2module = 'LfoB'
parammap = ['Rate', 'Phase', 'Waveform', ['PolyMono', 'Mono'],
['Active', 'Mute']]
inputmap = ['Rate', 'Rst']
outputmap = ['Out']
def domodule(self):
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
# handle special parameters
waveform = getv(nmmp.Waveform)
setv(g2mp.Waveform, [0, 1, 2, 2, 3][waveform])
if waveform != 3:
setv(g2mp.OutputType, 5) # BipInv
else:
# 180 phase
setv(g2mp.Phase, (range(64, 128)+range(64))[getv(nmmp.Phase)])
setv(g2mp.Active, 1-getv(nmmp.Mute))
postmst(self, 0)
class ConvLFOSlvB(Convert):
maing2module = 'LfoC'
waveform = 2
parammap = ['Rate']
inputmap = ['Rate']
outputmap = ['Out']
def domodule(self):
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
# handle special parameters
g2m.modes.Waveform.value = self.waveform
if self.waveform != 2:
setv(g2mp.OutputType, 4) # Bip
else:
setv(g2mp.OutputType, 5) # BipInv
postmst(self, 0)
class ConvLFOSlvC(ConvLFOSlvB):
waveform = 0
#3phase thinks we may need this. I'm leaving it as a comment for now.
#def domodule(self):
# ConvLFOSlvB.domodule(self)
# setv(self.g2module.params.OutputType, 5) # BipInv
class ConvLFOSlvD(ConvLFOSlvB):
waveform = 3
class ConvLFOSlvE(ConvLFOSlvC):
waveform = 1
class ConvClkGen(Convert):
maing2module = 'ClkGen'
parammap = ['Rate', ['Active', 'On/Off']]
inputmap = ['Rst']
outputmap = ['1/96', '1/16', '', 'Sync']
def domodule(self):
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
setv(g2mp.Active, getv(getattr(nmmp, 'On/Off')))
setv(g2mp.Source, 0) # Internal
if len(nmm.outputs.Sync.cables) != 0:
pulse = self.add_module('Pulse')
setv(pulse.params.Time, 32)
self.connect(g2m.outputs.ClkActive, pulse.inputs.In)
self.outputs[3] = pulse.outputs.Out
#handle Slv connections
if len(nmm.outputs.Slv.cables):
zerocnt = self.add_module('ZeroCnt', name='96th In')
oscmaster = self.add_module('OscMaster', name='26-241 BPM')
setv(oscmaster.params.FreqCoarse, 9) # -55 semi
setv(oscmaster.params.Kbt, 0) # off
self.connect(getattr(g2m.outputs, '1/96'), zerocnt.inputs.In)
self.connect(zerocnt.outputs.Out, oscmaster.inputs.Pitch)
self.outputs[2] = oscmaster.outputs.Out
class ConvClkRndGen(Convert):
maing2module = 'RndClkA'
parammap = [['PolyMono', 'Mono'], ['StepProb', 'Color']]
inputmap = ['Clk']
outputmap = ['Out']
def domodule(self):
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
if getv(nmmp.Color) == 1:
setv(g2mp.StepProb, 43)
else:
setv(g2mp.StepProb, 127)
class ConvRndStepGen(ConvLFOSlvB):
waveform = 4
class ConvRandomGen(ConvLFOSlvB):
waveform = 5
class ConvRndPulseGen(Convert):
maing2module = 'RndTrig'
parammap = [['StepProb', 'Density']]
outputmap = ['Out']
def domodule(self):
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
setv(g2mp.StepProb, 96)
lfoc = self.add_module('LfoC', name='Clk')
self.connect(lfoc.outputs.Out, g2m.inputs.Clk)
setv(lfoc.params.Rate, getv(nmmp.Density))
self.params[0] = lfoc.params.Rate
class ConvPatternGen(Convert):
maing2module = 'RndPattern'
parammap = [['PatternA', 'Pattern'], ['PatternB', 'Bank'],
['StepProb', 'LowDelta'], ['LoopCount', 'Step'],
None]
inputmap = ['Clk', 'Rst', 'A']
outputmap = ['Out']
def domodule(self):
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
pattern = (getv(nmmp.Pattern) + 64) % 128
setv(g2mp.PatternA, pattern)
bank = (getv(nmmp.Bank) + 64) % 128
setv(g2mp.PatternB, bank)
# PatternA and PatternB receive same input
if len(getattr(nmm.inputs, 'Pattern&Bank').cables):
self.connect(g2m.inputs.A, g2m.inputs.B)
lowdelta = getv(nmmp.LowDelta)
if lowdelta:
notequant = self.add_module('NoteQuant')
self.connect(g2m.outputs.Out, notequant.inputs.In)
setv(notequant.params.Range, 77)
setv(notequant.params.Notes, 1)
self.outputs[0] = notequant.outputs.Out
stepprob, add = 55, 75
setv(g2mp.StepProb, 55)
else:
stepprob, add = 127, 74
setv(g2mp.StepProb, stepprob)
levadd = self.add_module('LevAdd')
self.connect(self.outputs[0], levadd.inputs.In)
setv(levadd.params.Level, add)
self.outputs[0] = levadd.outputs.Out
|
gpl-2.0
| -1,390,390,369,953,476,000
| 29.971014
| 76
| 0.656341
| false
| 2.7216
| false
| false
| false
|
HybridF5/jacket
|
jacket/api/compute/openstack/compute/limits.py
|
1
|
2400
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from jacket.api.compute.openstack.compute.legacy_v2 import limits
from jacket.api.compute.openstack.compute.views import limits as limits_views
from jacket.api.compute.openstack import extensions
from jacket.api.compute.openstack import wsgi
from jacket.compute import quota
# NOTE(alex_xu): This is just for keeping backward compatible with v2 endpoint
# in api-paste.ini. This will be removed after v2 API code deprecated in the
# future.
RateLimitingMiddleware = limits.RateLimitingMiddleware
QUOTAS = quota.QUOTAS
ALIAS = 'limits'
authorize = extensions.os_compute_authorizer(ALIAS)
class LimitsController(wsgi.Controller):
"""Controller for accessing limits in the OpenStack API."""
@extensions.expected_errors(())
def index(self, req):
"""Return all global and rate limit information."""
context = req.environ['compute.context']
authorize(context)
project_id = req.params.get('tenant_id', context.project_id)
quotas = QUOTAS.get_project_quotas(context, project_id,
usages=False)
abs_limits = {k: v['limit'] for k, v in quotas.items()}
rate_limits = req.environ.get("compute.limits", [])
builder = self._get_view_builder(req)
return builder.build(rate_limits, abs_limits)
def _get_view_builder(self, req):
return limits_views.ViewBuilderV21()
class Limits(extensions.V21APIExtensionBase):
"""Limits support."""
name = "Limits"
alias = ALIAS
version = 1
def get_resources(self):
resource = [extensions.ResourceExtension(ALIAS,
LimitsController())]
return resource
def get_controller_extensions(self):
return []
|
apache-2.0
| 5,010,607,489,460,766,000
| 34.820896
| 78
| 0.68625
| false
| 4.088586
| false
| false
| false
|
Fokko/incubator-airflow
|
airflow/contrib/example_dags/example_emr_job_flow_automatic_steps.py
|
1
|
2363
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This is an example dag for a AWS EMR Pipeline with auto steps.
"""
from datetime import timedelta
import airflow
from airflow import DAG
from airflow.contrib.operators.emr_create_job_flow_operator import EmrCreateJobFlowOperator
from airflow.contrib.sensors.emr_job_flow_sensor import EmrJobFlowSensor
DEFAULT_ARGS = {
'owner': 'Airflow',
'depends_on_past': False,
'start_date': airflow.utils.dates.days_ago(2),
'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False
}
SPARK_TEST_STEPS = [
{
'Name': 'calculate_pi',
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': [
'/usr/lib/spark/bin/run-example',
'SparkPi',
'10'
]
}
}
]
JOB_FLOW_OVERRIDES = {
'Name': 'PiCalc',
'Steps': SPARK_TEST_STEPS
}
with DAG(
dag_id='emr_job_flow_automatic_steps_dag',
default_args=DEFAULT_ARGS,
dagrun_timeout=timedelta(hours=2),
schedule_interval='0 3 * * *'
) as dag:
job_flow_creator = EmrCreateJobFlowOperator(
task_id='create_job_flow',
job_flow_overrides=JOB_FLOW_OVERRIDES,
aws_conn_id='aws_default',
emr_conn_id='emr_default'
)
job_sensor = EmrJobFlowSensor(
task_id='check_job_flow',
job_flow_id="{{ task_instance.xcom_pull(task_ids='create_job_flow', key='return_value') }}",
aws_conn_id='aws_default'
)
job_flow_creator >> job_sensor
|
apache-2.0
| -6,546,665,006,355,624,000
| 29.294872
| 100
| 0.658485
| false
| 3.511144
| false
| false
| false
|
elvandy/nltools
|
nltools/datasets.py
|
1
|
4739
|
'''
NeuroLearn datasets
===================
functions to help download datasets
'''
## Notes:
# Need to figure out how to speed up loading and resampling of data
__all__ = ['download_nifti',
'get_collection_image_metadata',
'download_collection',
'fetch_emotion_ratings',
'fetch_pain']
__author__ = ["Luke Chang"]
__license__ = "MIT"
import os
import pandas as pd
from nltools.data import Brain_Data
from nilearn.datasets.utils import _get_dataset_dir, _fetch_file
from pynv import Client
# Optional dependencies
try:
import requests
except ImportError:
pass
def download_nifti(url, data_dir=None):
''' Download a image to a nifti file.'''
local_filename = url.split('/')[-1]
if data_dir is not None:
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
local_filename = os.path.join(data_dir,local_filename)
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return local_filename
def get_collection_image_metadata(collection=None, data_dir = None,
limit=10):
''' Get image metadata associated with collection
Args:
collection: (int) collection id
data_dir: (str) data directory
limit: (int) number of images to increment
Returns:
metadata: (pd.DataFrame) Dataframe with full image metadata from
collection
'''
if os.path.isfile(os.path.join(data_dir, 'metadata.csv')):
dat = pd.read_csv(os.path.join(data_dir, 'metadata.csv'))
else:
offset = 0
api = Client()
i = api.get_collection_images(collection_id=collection, limit=limit,offset=offset)
dat = pd.DataFrame(columns=i['results'][0].keys())
while int(offset) < int(i['count']):
for x in i['results']:
dat = dat.append(x, ignore_index=True)
offset = offset + limit
i = api.get_collection_images(collection_id=collection, limit=limit, offset=offset)
dat.to_csv(os.path.join(data_dir,'metadata.csv'), index=False)
return dat
def download_collection(collection=None, data_dir=None, overwrite=False,
resume=True, verbose=1):
''' Download images and metadata from Neurovault collection
Args:
collection: (int) collection id
data_dir: (str) data directory
Returns:
metadata: (pd.DataFrame) Dataframe with full image metadata from
collection
files: (list) list of files of downloaded collection
'''
if data_dir is None:
data_dir = _get_dataset_dir(str(collection), data_dir=data_dir,
verbose=verbose)
# Get collection Metadata
metadata = get_collection_image_metadata(collection=collection,
data_dir=data_dir)
# Get images
files = []
for f in metadata['file']:
files.append(_fetch_file(f, data_dir, resume=resume, verbose=verbose,
overwrite=overwrite))
return (metadata,files)
def fetch_pain(data_dir=None, resume=True, verbose=1):
'''Download and loads pain dataset from neurovault
Args:
data_dir: string, optional
Path of the data directory. Used to force data storage in a
specified location. Default: None
n_subjects: int, optional
Number of subjects, from 1 to 6.
NOTE: n_subjects is deprecated from 0.2.6 and will be removed in 0.3
Use `subjects` instead.
subjects : list or int, optional
Either a list of subjects or the number of subjects to load, from
1 to 6. By default, 2nd subject will be loaded. Empty list returns
no subject data
Returns:
'''
collection = 504
dataset_name = 'chang2015_pain'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
metadata, files = download_collection(collection=collection,
data_dir=data_dir, resume=resume,
verbose=verbose)
return Brain_Data(data=files, X=metadata)
def fetch_emotion_ratings(data_dir=None, resume=True, verbose=1):
'''Download and loads emotion rating dataset from neurovault
Args:
data_dir: (string, optional). Path of the data directory. Used to
force data storage in a specified location. Default: None
n_subjects: (int, optional) Number of subjects, from 1 to 6.
NOTE: n_subjects is deprecated from 0.2.6 and will be
removed in 0.3 Use `subjects` instead.
subjects : (list or int, optional) Either a list of subjects or the
number of subjects to load, from 1 to 6. By default,
2nd subject will be loaded. Empty list returns no subject
data
Returns:
'''
collection = 1964
dataset_name = 'chang2015_emotion_ratings'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
metadata, files = download_collection(collection=collection,
data_dir=data_dir, resume=resume,
verbose=verbose)
return Brain_Data(data=files, X=metadata)
|
mit
| 7,979,483,591,160,874,000
| 28.993671
| 86
| 0.703313
| false
| 3.219429
| false
| false
| false
|
zuowang/Paddle
|
python/paddle/trainer/PyDataProvider2.py
|
1
|
13558
|
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cPickle
import logging
import collections
import functools
import itertools
logging.basicConfig(
format="[%(levelname)s %(asctime)s %(filename)s:%(lineno)s]"
" %(message)s")
class SequenceType(object):
NO_SEQUENCE = 0
SEQUENCE = 1
SUB_SEQUENCE = 2
# TODO(yuyang18): Add string data type here.
class DataType(object):
Dense = 0
SparseNonValue = 1
SparseValue = 2
Index = 3
class CacheType(object):
NO_CACHE = 0 # No cache at all
# First pass, read data from python. And store them in memory. Read from
# memory during rest passes.
CACHE_PASS_IN_MEM = 1
class InputType(object):
__slots__ = ['dim', 'seq_type', 'type']
def __init__(self, dim, seq_type, tp):
self.dim = dim
self.seq_type = seq_type
self.type = tp
def dense_slot(dim, seq_type=SequenceType.NO_SEQUENCE):
return InputType(dim, seq_type, DataType.Dense)
def sparse_non_value_slot(dim, seq_type=SequenceType.NO_SEQUENCE):
return InputType(dim, seq_type, DataType.SparseNonValue)
def sparse_value_slot(dim, seq_type=SequenceType.NO_SEQUENCE):
return InputType(dim, seq_type, DataType.SparseValue)
def index_slot(dim, seq_type=SequenceType.NO_SEQUENCE):
return InputType(dim, seq_type, DataType.Index)
dense_vector = dense_slot
sparse_binary_vector = sparse_non_value_slot
sparse_vector = sparse_value_slot
integer_value = index_slot
def dense_vector_sequence(dim):
return dense_vector(dim, seq_type=SequenceType.SEQUENCE)
def dense_vector_sub_sequence(dim):
return dense_vector(dim, seq_type=SequenceType.SUB_SEQUENCE)
def sparse_binary_vector_sequence(dim):
return sparse_binary_vector(dim, seq_type=SequenceType.SEQUENCE)
def sparse_binary_vector_sub_sequence(dim):
return sparse_binary_vector(dim, seq_type=SequenceType.SUB_SEQUENCE)
def sparse_vector_sequence(dim):
return sparse_vector(dim, seq_type=SequenceType.SEQUENCE)
def sparse_vector_sub_sequence(dim):
return sparse_vector(dim, seq_type=SequenceType.SUB_SEQUENCE)
def integer_value_sequence(dim):
return integer_value(dim, seq_type=SequenceType.SEQUENCE)
def integer_value_sub_sequence(dim):
return integer_value(dim, seq_type=SequenceType.SUB_SEQUENCE)
def integer_sequence(dim):
return index_slot(dim, seq_type=SequenceType.SEQUENCE)
class SingleSlotWrapper(object):
def __init__(self, generator):
self.generator = generator
def __call__(self, obj, filename):
for item in self.generator(obj, filename):
if isinstance(item, dict):
yield item
else:
yield [item]
class InputOrderWrapper(object):
def __init__(self, generator, input_order):
self.generator = generator
self.input_order = input_order
def __call__(self, obj, filename):
for item in self.generator(obj, filename):
if isinstance(item, dict):
yield [item.get(input_name, None) for input_name in
self.input_order]
else:
yield item
class CheckWrapper(object):
def __init__(self, generator, input_types, check_fail_continue, logger):
self.generator = generator
self.input_types = input_types
self.check_fail_continue = check_fail_continue
self.logger = logger
def __call__(self, obj, filename):
for items in self.generator(obj, filename):
try:
assert len(items) == len(self.input_types)
assert len(filter(lambda x: x is None, items)) == 0
for item, input_type in itertools.izip(items, self.input_types):
callback = functools.partial(CheckWrapper.loop_callback,
input_type)
for _ in xrange(input_type.seq_type):
callback = functools.partial(CheckWrapper.loop_check,
callback)
callback(item)
yield items
except AssertionError as e:
self.logger.warning(
"Item (%s) is not fit the input type with error %s"
% (repr(item), repr(e)))
if self.check_fail_continue:
continue
else:
raise
@staticmethod
def loop_callback(input_type, each):
assert isinstance(input_type, InputType)
if input_type.type == DataType.Dense:
assert isinstance(each, collections.Sequence)
for d in each:
assert isinstance(d, float)
assert len(each, input_type.dim)
elif input_type.type == DataType.Index:
assert isinstance(each, int)
assert each < input_type.dim
elif input_type.type == DataType.SparseNonValue \
or input_type.type == DataType.SparseValue:
assert isinstance(each, collections.Sequence)
sparse_id = set()
for k in each:
if input_type.type == DataType.SparseValue:
k, v = k
assert isinstance(v, float)
assert isinstance(k, int)
assert k < input_type.dim
sparse_id.add(k)
assert len(sparse_id) == len(each)
else:
raise RuntimeError("Not support input type")
@staticmethod
def loop_check(callback, item):
for each in item:
callback(each)
def provider(input_types=None, should_shuffle=None, pool_size=-1,
min_pool_size=-1,
can_over_batch_size=True,
calc_batch_size=None,
cache=CacheType.NO_CACHE,
check=False, check_fail_continue=False,
init_hook=None, **kwargs):
"""
Provider decorator. Use it to make a function into PyDataProvider2 object.
In this function, user only need to get each sample for some train/test
file.
The basic usage is:
.. code-block:: python
@provider(some data provider config here...)
def process(settings, file_name):
while not at end of file_name:
sample = readOneSampleFromFile(file_name)
yield sample.
The configuration of data provider should be setup by\:
:param input_types: Specify the input types, can also be set in init_hook.
It could be a list of InputType object. For example,
input_types=[dense_vector(9), integer_value(2)]. Or user
can set a dict of InputType object, which key is
data_layer's name. For example, input_types=\
{'img': img_features, 'label': label}. when using dict of
InputType, user could yield a dict of feature values, which
key is also data_layer's name.
:type input_types: list|tuple|dict
:param should_shuffle: True if data should shuffle. Pass None means shuffle
when is training and not to shuffle when is testing.
:type should_shuffle: bool
:param pool_size: Max number of sample in data pool.
:type pool_size: int
:param min_pool_size: Set minimal sample in data pool. The PaddlePaddle will
random pick sample in pool. So the min_pool_size
effect the randomize of data.
:type min_pool_size: int
:param can_over_batch_size: True if paddle can return a mini-batch larger
than batch size in settings. It is useful when
custom calculate one sample's batch_size.
It is very danger to set it to false and use
calc_batch_size together. Default is false.
:type can_over_batch_size: bool
:param calc_batch_size: a method to calculate each sample's batch size.
Default each sample's batch size is 1. But to you
can customize each sample's batch size.
:type calc_batch_size: callable
:param cache: Cache strategy of Data Provider. Default is CacheType.NO_CACHE
:type cache: int
:param init_hook: Initialize hook. Useful when data provider need load some
external data like dictionary. The parameter is
(settings, file_list, \*\*kwargs).
- settings. It is the global settings object. User can set
settings.input_types here.
- file_list. All file names for passed to data provider.
- is_train. Is this data provider used for training or not.
- kwargs. Other keyword arguments passed from
trainer_config's args parameter.
:type init_hook: callable
:param check: Check the yield data format is as same as input_types. Enable
this will make data provide process slow but it is very useful
for debug. Default is disabled.
:type check: bool
:param check_fail_continue: Continue train or not when check failed. Just
drop the wrong format data when it is True. Has
no effect when check set to False.
:type check_fail_continue: bool
"""
def __wrapper__(generator):
class DataProvider(object):
def __init__(self, file_list, **kwargs):
self.logger = logging.getLogger("")
self.logger.setLevel(logging.INFO)
self.input_types = None
if 'slots' in kwargs:
self.logger.warning('setting slots value is deprecated, '
'please use input_types instead.')
self.slots = kwargs['slots']
self.slots = input_types
self.should_shuffle = should_shuffle
true_table = [1, 't', 'true', 'on']
false_table = [0, 'f', 'false', 'off']
if not isinstance(self.should_shuffle, bool) and \
self.should_shuffle is not None:
if isinstance(self.should_shuffle, basestring):
self.should_shuffle = self.should_shuffle.lower()
if self.should_shuffle in true_table:
self.should_shuffle = True
elif self.should_shuffle in false_table:
self.should_shuffle = False
else:
self.logger.warning(
"Could not recognize should_shuffle (%s), "
"just use default value of should_shuffle."
" Please set should_shuffle to bool value or "
"something in %s" % (
repr(self.should_shuffle),
repr(true_table + false_table)))
self.should_shuffle = None
self.pool_size = pool_size
self.can_over_batch_size = can_over_batch_size
self.calc_batch_size = calc_batch_size
self.file_list = file_list
self.generator = generator
self.cache = cache
self.min_pool_size = min_pool_size
self.input_order = kwargs['input_order']
self.check = check
if init_hook is not None:
init_hook(self, file_list=file_list, **kwargs)
if self.input_types is not None:
self.slots = self.input_types
assert self.slots is not None
assert self.generator is not None
use_dynamic_order = False
if isinstance(self.slots, dict): # reorder input_types
self.slots = [self.slots[ipt] for ipt in self.input_order]
use_dynamic_order = True
if len(self.slots) == 1:
self.generator = SingleSlotWrapper(self.generator)
if use_dynamic_order:
self.generator = InputOrderWrapper(self.generator,
self.input_order)
if self.check:
self.generator = CheckWrapper(self.generator,
self.slots,
check_fail_continue,
self.logger)
return DataProvider
return __wrapper__
def deserialize_args(args):
"""
Internal use only.
:param args:
:return:
"""
return cPickle.loads(args)
|
apache-2.0
| -1,903,039,986,429,761,800
| 35.544474
| 83
| 0.567119
| false
| 4.495358
| false
| false
| false
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/SCOP/__init__.py
|
1
|
31882
|
# Copyright 2001 by Gavin E. Crooks. All rights reserved.
# Modifications Copyright 2004/2005 James Casbon. All rights Reserved.
# Modifications Copyright 2010 Jeffrey Finkelstein. All rights reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Changes made by James Casbon:
# - New Astral class
# - SQL functionality for both Scop and Astral classes
# - All sunids are int not strings
#
# Code written by Jeffrey Chang to access SCOP over the internet, which
# was previously in Bio.WWW.SCOP, has now been merged into this module.
""" SCOP: Structural Classification of Proteins.
The SCOP database aims to provide a manually constructed classification of
all know protein structures into a hierarchy, the main levels of which
are family, superfamily and fold.
* "SCOP":http://scop.mrc-lmb.cam.ac.uk/scop/
* "Introduction":http://scop.mrc-lmb.cam.ac.uk/scop/intro.html
* "SCOP parsable files":http://scop.mrc-lmb.cam.ac.uk/scop/parse/
The Scop object in this module represents the entire SCOP classification. It
can be built from the three SCOP parsable files, modified is so desired, and
converted back to the same file formats. A single SCOP domain (represented
by the Domain class) can be obtained from Scop using the domain's SCOP
identifier (sid).
nodeCodeDict -- A mapping between known 2 letter node codes and a longer
description. The known node types are 'cl' (class), 'cf'
(fold), 'sf' (superfamily), 'fa' (family), 'dm' (domain),
'sp' (species), 'px' (domain). Additional node types may
be added in the future.
This module also provides code to access SCOP over the WWW.
Functions:
search -- Access the main CGI script.
_open -- Internally used function.
"""
from __future__ import print_function
import os
import re
from . import Des
from . import Cla
from . import Hie
from . import Residues
from Bio import SeqIO
from Bio.Seq import Seq
__docformat__ = "restructuredtext en"
nodeCodeDict = {'cl': 'class', 'cf': 'fold', 'sf': 'superfamily',
'fa': 'family', 'dm': 'protein', 'sp': 'species', 'px': 'domain'}
_nodetype_to_code = {'class': 'cl', 'fold': 'cf', 'superfamily': 'sf',
'family': 'fa', 'protein': 'dm', 'species': 'sp', 'domain': 'px'}
nodeCodeOrder = ['ro', 'cl', 'cf', 'sf', 'fa', 'dm', 'sp', 'px']
astralBibIds = [10, 20, 25, 30, 35, 40, 50, 70, 90, 95, 100]
astralEvs = [10, 5, 1, 0.5, 0.1, 0.05, 0.01, 0.005, 0.001, 1e-4, 1e-5, 1e-10, 1e-15,
1e-20, 1e-25, 1e-50]
astralEv_to_file = {10: 'e+1', 5: 'e+0,7', 1: 'e+0', 0.5: 'e-0,3', 0.1: 'e-1',
0.05: 'e-1,3', 0.01: 'e-2', 0.005: 'e-2,3', 0.001: 'e-3',
1e-4: 'e-4', 1e-5: 'e-5', 1e-10: 'e-10', 1e-15: 'e-15',
1e-20: 'e-20', 1e-25: 'e-25', 1e-50: 'e-50'}
astralEv_to_sql = {10: 'e1', 5: 'e0_7', 1: 'e0', 0.5: 'e_0_3', 0.1: 'e_1',
0.05: 'e_1_3', 0.01: 'e_2', 0.005: 'e_2_3', 0.001: 'e_3',
1e-4: 'e_4', 1e-5: 'e_5', 1e-10: 'e_10', 1e-15: 'e_15',
1e-20: 'e_20', 1e-25: 'e_25', 1e-50: 'e_50'}
try:
# See if the cmp function exists (will on Python 2)
_cmp = cmp
except NameError:
def _cmp(a, b):
"""Implementation of cmp(x,y) for Python 3 (PRIVATE).
Based on Python 3 docs which say if you really need the cmp()
functionality, you could use the expression (a > b) - (a < b)
as the equivalent for cmp(a, b)
"""
return (a > b) - (a < b)
def cmp_sccs(sccs1, sccs2):
"""Order SCOP concise classification strings (sccs).
a.4.5.1 < a.4.5.11 < b.1.1.1
A sccs (e.g. a.4.5.11) compactly represents a domain's classification.
The letter represents the class, and the numbers are the fold,
superfamily, and family, respectively.
"""
s1 = sccs1.split(".")
s2 = sccs2.split(".")
if s1[0] != s2[0]:
return _cmp(s1[0], s2[0])
s1 = [int(x) for x in s1[1:]]
s2 = [int(x) for x in s2[1:]]
return _cmp(s1, s2)
_domain_re = re.compile(r">?([\w_\.]*)\s+([\w\.]*)\s+\(([^)]*)\) (.*)")
def parse_domain(str):
"""Convert an ASTRAL header string into a Scop domain.
An ASTRAL (http://astral.stanford.edu/) header contains a concise
description of a SCOP domain. A very similar format is used when a
Domain object is converted into a string. The Domain returned by this
method contains most of the SCOP information, but it will not be located
within the SCOP hierarchy (i.e. The parent node will be None). The
description is composed of the SCOP protein and species descriptions.
A typical ASTRAL header looks like --
>d1tpt_1 a.46.2.1 (1-70) Thymidine phosphorylase {Escherichia coli}
"""
m = _domain_re.match(str)
if (not m):
raise ValueError("Domain: " + str)
dom = Domain()
dom.sid = m.group(1)
dom.sccs = m.group(2)
dom.residues = Residues.Residues(m.group(3))
if not dom.residues.pdbid:
dom.residues.pdbid = dom.sid[1:5]
dom.description = m.group(4).strip()
return dom
def _open_scop_file(scop_dir_path, version, filetype):
filename = "dir.%s.scop.txt_%s" % (filetype, version)
handle = open(os.path.join(scop_dir_path, filename))
return handle
class Scop(object):
"""The entire SCOP hierarchy.
root -- The root node of the hierarchy
"""
def __init__(self, cla_handle=None, des_handle=None, hie_handle=None,
dir_path=None, db_handle=None, version=None):
"""Build the SCOP hierarchy from the SCOP parsable files, or a sql backend.
If no file handles are given, then a Scop object with a single
empty root node is returned.
If a directory and version are given (with dir_path=.., version=...) or
file handles for each file, the whole scop tree will be built in memory.
If a MySQLdb database handle is given, the tree will be built as needed,
minimising construction times. To build the SQL database to the methods
write_xxx_sql to create the tables.
"""
self._sidDict = {}
self._sunidDict = {}
if all(h is None for h in [cla_handle, des_handle, hie_handle, dir_path, db_handle]):
return
if dir_path is None and db_handle is None:
if cla_handle is None or des_handle is None or hie_handle is None:
raise RuntimeError("Need CLA, DES and HIE files to build SCOP")
sunidDict = {}
self.db_handle = db_handle
try:
if db_handle:
# do nothing if we have a db handle, we'll do it all on the fly
pass
else:
# open SCOP parseable files
if dir_path:
if not version:
raise RuntimeError("Need SCOP version to find parsable files in directory")
if cla_handle or des_handle or hie_handle:
raise RuntimeError("Cannot specify SCOP directory and specific files")
cla_handle = _open_scop_file(dir_path, version, 'cla')
des_handle = _open_scop_file(dir_path, version, 'des')
hie_handle = _open_scop_file(dir_path, version, 'hie')
root = Node()
domains = []
root.sunid = 0
root.type = 'ro'
sunidDict[root.sunid] = root
self.root = root
root.description = 'SCOP Root'
# Build the rest of the nodes using the DES file
records = Des.parse(des_handle)
for record in records:
if record.nodetype == 'px':
n = Domain()
n.sid = record.name
domains.append(n)
else:
n = Node()
n.sunid = record.sunid
n.type = record.nodetype
n.sccs = record.sccs
n.description = record.description
sunidDict[n.sunid] = n
# Glue all of the Nodes together using the HIE file
records = Hie.parse(hie_handle)
for record in records:
if record.sunid not in sunidDict:
print(record.sunid)
n = sunidDict[record.sunid]
if record.parent != '': # Not root node
if record.parent not in sunidDict:
raise ValueError("Incomplete data?")
n.parent = sunidDict[record.parent]
for c in record.children:
if c not in sunidDict:
raise ValueError("Incomplete data?")
n.children.append(sunidDict[c])
# Fill in the gaps with information from the CLA file
sidDict = {}
records = Cla.parse(cla_handle)
for record in records:
n = sunidDict[record.sunid]
assert n.sccs == record.sccs
assert n.sid == record.sid
n.residues = record.residues
sidDict[n.sid] = n
# Clean up
self._sunidDict = sunidDict
self._sidDict = sidDict
self._domains = tuple(domains)
finally:
if dir_path:
# If we opened the files, we close the files
if cla_handle:
cla_handle.close()
if des_handle:
des_handle.close()
if hie_handle:
hie_handle.close()
def getRoot(self):
return self.getNodeBySunid(0)
def getDomainBySid(self, sid):
"""Return a domain from its sid"""
if sid in self._sidDict:
return self._sidDict[sid]
if self.db_handle:
self.getDomainFromSQL(sid=sid)
if sid in self._sidDict:
return self._sidDict[sid]
else:
return None
def getNodeBySunid(self, sunid):
"""Return a node from its sunid"""
if sunid in self._sunidDict:
return self._sunidDict[sunid]
if self.db_handle:
self.getDomainFromSQL(sunid=sunid)
if sunid in self._sunidDict:
return self._sunidDict[sunid]
else:
return None
def getDomains(self):
"""Returns an ordered tuple of all SCOP Domains"""
if self.db_handle:
return self.getRoot().getDescendents('px')
else:
return self._domains
def write_hie(self, handle):
"""Build an HIE SCOP parsable file from this object"""
# We order nodes to ease comparison with original file
for n in sorted(self._sunidDict.values(), key=lambda n: n.sunid):
handle.write(str(n.toHieRecord()))
def write_des(self, handle):
"""Build a DES SCOP parsable file from this object"""
# Origional SCOP file is not ordered?
for n in sorted(self._sunidDict.values(), key=lambda n: n.sunid):
if n != self.root:
handle.write(str(n.toDesRecord()))
def write_cla(self, handle):
"""Build a CLA SCOP parsable file from this object"""
# We order nodes to ease comparison with original file
for n in sorted(self._sidDict.values(), key=lambda n: n.sunid):
handle.write(str(n.toClaRecord()))
def getDomainFromSQL(self, sunid=None, sid=None):
"""Load a node from the SQL backend using sunid or sid"""
if sunid is None and sid is None:
return None
cur = self.db_handle.cursor()
if sid:
cur.execute("SELECT sunid FROM cla WHERE sid=%s", sid)
res = cur.fetchone()
if res is None:
return None
sunid = res[0]
cur.execute("SELECT * FROM des WHERE sunid=%s", sunid)
data = cur.fetchone()
if data is not None:
n = None
# determine if Node or Domain
if data[1] != "px":
n = Node(scop=self)
cur.execute("SELECT child FROM hie WHERE parent=%s", sunid)
children = []
for c in cur.fetchall():
children.append(c[0])
n.children = children
else:
n = Domain(scop=self)
cur.execute("select sid, residues, pdbid from cla where sunid=%s",
sunid)
[n.sid, n.residues, pdbid] = cur.fetchone()
n.residues = Residues.Residues(n.residues)
n.residues.pdbid = pdbid
self._sidDict[n.sid] = n
[n.sunid, n.type, n.sccs, n.description] = data
if data[1] != 'ro':
cur.execute("SELECT parent FROM hie WHERE child=%s", sunid)
n.parent = cur.fetchone()[0]
n.sunid = int(n.sunid)
self._sunidDict[n.sunid] = n
def getAscendentFromSQL(self, node, type):
"""Get ascendents using SQL backend"""
if nodeCodeOrder.index(type) >= nodeCodeOrder.index(node.type):
return None
cur = self.db_handle.cursor()
cur.execute("SELECT " + type + " from cla WHERE " + node.type + "=%s", (node.sunid))
result = cur.fetchone()
if result is not None:
return self.getNodeBySunid(result[0])
else:
return None
def getDescendentsFromSQL(self, node, type):
"""Get descendents of a node using the database backend. This avoids
repeated iteration of SQL calls and is therefore much quicker than
repeatedly calling node.getChildren().
"""
if nodeCodeOrder.index(type) <= nodeCodeOrder.index(node.type):
return []
des_list = []
# SQL cla table knows nothing about 'ro'
if node.type == 'ro':
for c in node.getChildren():
for d in self.getDescendentsFromSQL(c, type):
des_list.append(d)
return des_list
cur = self.db_handle.cursor()
if type != 'px':
cur.execute("SELECT DISTINCT des.sunid,des.type,des.sccs,description FROM \
cla,des WHERE cla." + node.type + "=%s AND cla." + type + "=des.sunid", (node.sunid))
data = cur.fetchall()
for d in data:
if int(d[0]) not in self._sunidDict:
n = Node(scop=self)
[n.sunid, n.type, n.sccs, n.description] = d
n.sunid = int(n.sunid)
self._sunidDict[n.sunid] = n
cur.execute("SELECT parent FROM hie WHERE child=%s", n.sunid)
n.parent = cur.fetchone()[0]
cur.execute("SELECT child FROM hie WHERE parent=%s", n.sunid)
children = []
for c in cur.fetchall():
children.append(c[0])
n.children = children
des_list.append(self._sunidDict[int(d[0])])
else:
cur.execute("SELECT cla.sunid,sid,pdbid,residues,cla.sccs,type,description,sp\
FROM cla,des where cla.sunid=des.sunid and cla." + node.type + "=%s",
node.sunid)
data = cur.fetchall()
for d in data:
if int(d[0]) not in self._sunidDict:
n = Domain(scop=self)
# [n.sunid, n.sid, n.pdbid, n.residues, n.sccs, n.type,
# n.description,n.parent] = data
[n.sunid, n.sid, pdbid, n.residues, n.sccs, n.type, n.description,
n.parent] = d[0:8]
n.residues = Residues.Residues(n.residues)
n.residues.pdbid = pdbid
n.sunid = int(n.sunid)
self._sunidDict[n.sunid] = n
self._sidDict[n.sid] = n
des_list.append(self._sunidDict[int(d[0])])
return des_list
def write_hie_sql(self, handle):
"""Write HIE data to SQL database"""
cur = handle.cursor()
cur.execute("DROP TABLE IF EXISTS hie")
cur.execute("CREATE TABLE hie (parent INT, child INT, PRIMARY KEY (child),\
INDEX (parent) )")
for p in self._sunidDict.values():
for c in p.children:
cur.execute("INSERT INTO hie VALUES (%s,%s)" % (p.sunid, c.sunid))
def write_cla_sql(self, handle):
"""Write CLA data to SQL database"""
cur = handle.cursor()
cur.execute("DROP TABLE IF EXISTS cla")
cur.execute("CREATE TABLE cla (sunid INT, sid CHAR(8), pdbid CHAR(4),\
residues VARCHAR(50), sccs CHAR(10), cl INT, cf INT, sf INT, fa INT,\
dm INT, sp INT, px INT, PRIMARY KEY (sunid), INDEX (SID) )")
for n in self._sidDict.values():
c = n.toClaRecord()
cur.execute("INSERT INTO cla VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",
(n.sunid, n.sid, c.residues.pdbid, c.residues, n.sccs,
n.getAscendent('cl').sunid, n.getAscendent('cf').sunid,
n.getAscendent('sf').sunid, n.getAscendent('fa').sunid,
n.getAscendent('dm').sunid, n.getAscendent('sp').sunid,
n.sunid))
def write_des_sql(self, handle):
"""Write DES data to SQL database"""
cur = handle.cursor()
cur.execute("DROP TABLE IF EXISTS des")
cur.execute("CREATE TABLE des (sunid INT, type CHAR(2), sccs CHAR(10),\
description VARCHAR(255),\
PRIMARY KEY (sunid) )")
for n in self._sunidDict.values():
cur.execute("INSERT INTO des VALUES (%s,%s,%s,%s)",
(n.sunid, n.type, n.sccs, n.description))
class Node(object):
""" A node in the Scop hierarchy
sunid -- SCOP unique identifiers. e.g. '14986'
parent -- The parent node
children -- A list of child nodes
sccs -- SCOP concise classification string. e.g. 'a.1.1.2'
type -- A 2 letter node type code. e.g. 'px' for domains
description --
"""
def __init__(self, scop=None):
"""Create a Node in the scop hierarchy. If a Scop instance is provided to the
constructor, this will be used to lookup related references using the SQL
methods. If no instance is provided, it is assumed the whole tree exists
and is connected."""
self.sunid = ''
self.parent = None
self.children = []
self.sccs = ''
self.type = ''
self.description = ''
self.scop = scop
def __str__(self):
s = []
s.append(str(self.sunid))
s.append(self.sccs)
s.append(self.type)
s.append(self.description)
return " ".join(s)
def toHieRecord(self):
"""Return an Hie.Record"""
rec = Hie.Record()
rec.sunid = str(self.sunid)
if self.getParent(): # Not root node
rec.parent = str(self.getParent().sunid)
else:
rec.parent = '-'
for c in self.getChildren():
rec.children.append(str(c.sunid))
return rec
def toDesRecord(self):
"""Return a Des.Record"""
rec = Des.Record()
rec.sunid = str(self.sunid)
rec.nodetype = self.type
rec.sccs = self.sccs
rec.description = self.description
return rec
def getChildren(self):
"""Return a list of children of this Node"""
if self.scop is None:
return self.children
else:
return [self.scop.getNodeBySunid(x) for x in self.children]
def getParent(self):
"""Return the parent of this Node"""
if self.scop is None:
return self.parent
else:
return self.scop.getNodeBySunid(self.parent)
def getDescendents(self, node_type):
""" Return a list of all decendent nodes of the given type. Node type can a
two letter code or longer description. e.g. 'fa' or 'family'
"""
if node_type in _nodetype_to_code:
node_type = _nodetype_to_code[node_type]
nodes = [self]
if self.scop:
return self.scop.getDescendentsFromSQL(self, node_type)
while nodes[0].type != node_type:
if nodes[0].type == 'px':
return [] # Fell of the bottom of the hierarchy
child_list = []
for n in nodes:
for child in n.getChildren():
child_list.append(child)
nodes = child_list
return nodes
def getAscendent(self, node_type):
""" Return the ancenstor node of the given type, or None.Node type can a
two letter code or longer description. e.g. 'fa' or 'family'"""
if node_type in _nodetype_to_code:
node_type = _nodetype_to_code[node_type]
if self.scop:
return self.scop.getAscendentFromSQL(self, node_type)
else:
n = self
if n.type == node_type:
return None
while n.type != node_type:
if n.type == 'ro':
return None # Fell of the top of the hierarchy
n = n.getParent()
return n
class Domain(Node):
""" A SCOP domain. A leaf node in the Scop hierarchy.
- sid -- The SCOP domain identifier. e.g. ``"d5hbib_"``
- residues -- A Residue object. It defines the collection
of PDB atoms that make up this domain.
"""
def __init__(self, scop=None):
Node.__init__(self, scop=scop)
self.sid = ''
self.residues = None
def __str__(self):
s = []
s.append(self.sid)
s.append(self.sccs)
s.append("(" + str(self.residues) + ")")
if not self.getParent():
s.append(self.description)
else:
sp = self.getParent()
dm = sp.getParent()
s.append(dm.description)
s.append("{" + sp.description + "}")
return " ".join(s)
def toDesRecord(self):
"""Return a Des.Record"""
rec = Node.toDesRecord(self)
rec.name = self.sid
return rec
def toClaRecord(self):
"""Return a Cla.Record"""
rec = Cla.Record()
rec.sid = self.sid
rec.residues = self.residues
rec.sccs = self.sccs
rec.sunid = self.sunid
n = self
while n.sunid != 0: # Not root node
rec.hierarchy[n.type] = str(n.sunid)
n = n.getParent()
# Order does not matter in the hierarchy field. For more info, see
# http://scop.mrc-lmb.cam.ac.uk/scop/release-notes.html
# rec.hierarchy.reverse()
return rec
class Astral(object):
"""Abstraction of the ASTRAL database, which has sequences for all the SCOP domains,
as well as clusterings by percent id or evalue.
"""
def __init__(self, dir_path=None, version=None, scop=None,
astral_file=None, db_handle=None):
"""
Initialise the astral database.
You must provide either a directory of SCOP files:
- dir_path - string, the path to location of the scopseq-x.xx directory
(not the directory itself), and
- version -a version number.
or, a FASTA file:
- astral_file - string, a path to a fasta file (which will be loaded in memory)
or, a MYSQL database:
- db_handle - a database handle for a MYSQL database containing a table
'astral' with the astral data in it. This can be created
using writeToSQL.
"""
if astral_file is None and dir_path is None and db_handle is None:
raise RuntimeError("Need either file handle, or (dir_path + "
+ "version) or database handle to construct Astral")
if not scop:
raise RuntimeError("Must provide a Scop instance to construct")
self.scop = scop
self.db_handle = db_handle
if not astral_file and not db_handle:
if dir_path is None or version is None:
raise RuntimeError("must provide dir_path and version")
self.version = version
self.path = os.path.join(dir_path, "scopseq-%s" % version)
astral_file = "astral-scopdom-seqres-all-%s.fa" % self.version
astral_file = os.path.join(self.path, astral_file)
if astral_file:
# Build a dictionary of SeqRecord objects in the FASTA file, IN MEMORY
self.fasta_dict = SeqIO.to_dict(SeqIO.parse(astral_file, "fasta"))
self.astral_file = astral_file
self.EvDatasets = {}
self.EvDatahash = {}
self.IdDatasets = {}
self.IdDatahash = {}
def domainsClusteredByEv(self, id):
"""get domains clustered by evalue"""
if id not in self.EvDatasets:
if self.db_handle:
self.EvDatasets[id] = self.getAstralDomainsFromSQL(astralEv_to_sql[id])
else:
if not self.path:
raise RuntimeError("No scopseq directory specified")
file_prefix = "astral-scopdom-seqres-sel-gs"
filename = "%s-e100m-%s-%s.id" % (file_prefix, astralEv_to_file[id],
self.version)
filename = os.path.join(self.path, filename)
self.EvDatasets[id] = self.getAstralDomainsFromFile(filename)
return self.EvDatasets[id]
def domainsClusteredById(self, id):
"""get domains clustered by percent id"""
if id not in self.IdDatasets:
if self.db_handle:
self.IdDatasets[id] = self.getAstralDomainsFromSQL("id" + str(id))
else:
if not self.path:
raise RuntimeError("No scopseq directory specified")
file_prefix = "astral-scopdom-seqres-sel-gs"
filename = "%s-bib-%s-%s.id" % (file_prefix, id, self.version)
filename = os.path.join(self.path, filename)
self.IdDatasets[id] = self.getAstralDomainsFromFile(filename)
return self.IdDatasets[id]
def getAstralDomainsFromFile(self, filename=None, file_handle=None):
"""Get the scop domains from a file containing a list of sids"""
if file_handle is None and filename is None:
raise RuntimeError("You must provide a filename or handle")
if not file_handle:
file_handle = open(filename)
doms = []
while True:
line = file_handle.readline()
if not line:
break
line = line.rstrip()
doms.append(line)
if filename:
file_handle.close()
doms = [a for a in doms if a[0] == 'd']
doms = [self.scop.getDomainBySid(x) for x in doms]
return doms
def getAstralDomainsFromSQL(self, column):
"""Load a set of astral domains from a column in the astral table of a MYSQL
database (which can be created with writeToSQL(...)"""
cur = self.db_handle.cursor()
cur.execute("SELECT sid FROM astral WHERE " + column + "=1")
data = cur.fetchall()
data = [self.scop.getDomainBySid(x[0]) for x in data]
return data
def getSeqBySid(self, domain):
"""get the seq record of a given domain from its sid"""
if self.db_handle is None:
return self.fasta_dict[domain].seq
else:
cur = self.db_handle.cursor()
cur.execute("SELECT seq FROM astral WHERE sid=%s", domain)
return Seq(cur.fetchone()[0])
def getSeq(self, domain):
"""Return seq associated with domain"""
return self.getSeqBySid(domain.sid)
def hashedDomainsById(self, id):
"""Get domains clustered by sequence identity in a dict"""
if id not in self.IdDatahash:
self.IdDatahash[id] = {}
for d in self.domainsClusteredById(id):
self.IdDatahash[id][d] = 1
return self.IdDatahash[id]
def hashedDomainsByEv(self, id):
"""Get domains clustered by evalue in a dict"""
if id not in self.EvDatahash:
self.EvDatahash[id] = {}
for d in self.domainsClusteredByEv(id):
self.EvDatahash[id][d] = 1
return self.EvDatahash[id]
def isDomainInId(self, dom, id):
"""Returns true if the domain is in the astral clusters for percent ID"""
return dom in self.hashedDomainsById(id)
def isDomainInEv(self, dom, id):
"""Returns true if the domain is in the ASTRAL clusters for evalues"""
return dom in self.hashedDomainsByEv(id)
def writeToSQL(self, db_handle):
"""Write the ASTRAL database to a MYSQL database"""
cur = db_handle.cursor()
cur.execute("DROP TABLE IF EXISTS astral")
cur.execute("CREATE TABLE astral (sid CHAR(8), seq TEXT, PRIMARY KEY (sid))")
for dom in self.fasta_dict:
cur.execute("INSERT INTO astral (sid,seq) values (%s,%s)",
(dom, self.fasta_dict[dom].seq.data))
for i in astralBibIds:
cur.execute("ALTER TABLE astral ADD (id" + str(i) + " TINYINT)")
for d in self.domainsClusteredById(i):
cur.execute("UPDATE astral SET id" + str(i) + "=1 WHERE sid=%s",
d.sid)
for ev in astralEvs:
cur.execute("ALTER TABLE astral ADD (" + astralEv_to_sql[ev] + " TINYINT)")
for d in self.domainsClusteredByEv(ev):
cur.execute("UPDATE astral SET " + astralEv_to_sql[ev] + "=1 WHERE sid=%s",
d.sid)
def search(pdb=None, key=None, sid=None, disp=None, dir=None, loc=None,
cgi='http://scop.mrc-lmb.cam.ac.uk/scop/search.cgi', **keywds):
"""search(pdb=None, key=None, sid=None, disp=None, dir=None, loc=None,
cgi='http://scop.mrc-lmb.cam.ac.uk/scop/search.cgi', **keywds)
Access search.cgi and return a handle to the results. See the
online help file for an explanation of the parameters:
http://scop.mrc-lmb.cam.ac.uk/scop/help.html
Raises an IOError if there's a network error.
"""
params = {'pdb': pdb, 'key': key, 'sid': sid, 'disp': disp,
'dir': dir, 'loc': loc}
variables = {}
for k, v in params.items():
if v is not None:
variables[k] = v
variables.update(keywds)
return _open(cgi, variables)
def _open(cgi, params={}, get=1):
"""_open(cgi, params={}, get=1) -> UndoHandle
Open a handle to SCOP. cgi is the URL for the cgi script to access.
params is a dictionary with the options to pass to it. get is a boolean
that describes whether a GET should be used. Does some
simple error checking, and will raise an IOError if it encounters one.
"""
from Bio._py3k import urlopen, urlencode
# Open a handle to SCOP.
options = urlencode(params)
if get: # do a GET
if options:
cgi += "?" + options
handle = urlopen(cgi)
else: # do a POST
handle = urlopen(cgi, data=options)
return handle
|
apache-2.0
| 2,075,764,463,224,599,300
| 34.662192
| 99
| 0.552977
| false
| 3.671772
| false
| false
| false
|
mcs07/ChemDataExtractor
|
chemdataextractor/cli/pos.py
|
1
|
11619
|
# -*- coding: utf-8 -*-
"""
chemdataextractor.cli.pos
~~~~~~~~~~~~~~~~~~~~~~~~~
Part of speech tagging commands.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import click
from ..doc import Document, Text
from ..nlp.corpus import genia_training, wsj_training, wsj_evaluation, genia_evaluation
from ..nlp.pos import TAGS, ChemApPosTagger, ChemCrfPosTagger
log = logging.getLogger(__name__)
@click.group(name='pos')
@click.pass_context
def pos_cli(ctx):
"""POS tagger commands."""
pass
@pos_cli.command()
@click.option('--output', '-o', help='Output model file.', required=True)
@click.pass_context
def train_all(ctx, output):
"""Train POS tagger on WSJ, GENIA, and both. With and without cluster features."""
click.echo('chemdataextractor.pos.train_all')
click.echo('Output: %s' % output)
ctx.invoke(train, output='%s_wsj_nocluster.pickle' % output, corpus='wsj', clusters=False)
ctx.invoke(train, output='%s_wsj.pickle' % output, corpus='wsj', clusters=True)
ctx.invoke(train, output='%s_genia_nocluster.pickle' % output, corpus='genia', clusters=False)
ctx.invoke(train, output='%s_genia.pickle' % output, corpus='genia', clusters=True)
ctx.invoke(train, output='%s_wsj_genia_nocluster.pickle' % output, corpus='wsj+genia', clusters=False)
ctx.invoke(train, output='%s_wsj_genia.pickle' % output, corpus='wsj+genia', clusters=True)
@pos_cli.command()
@click.argument('model', required=True)
@click.pass_context
def evaluate_all(ctx, model):
"""Evaluate POS taggers on WSJ and GENIA."""
click.echo('chemdataextractor.pos.evaluate_all')
click.echo('Model: %s' % model)
ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='genia', clusters=True)
ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='genia', clusters=True)
ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='genia', clusters=True)
@pos_cli.command()
@click.option('--output', '-o', help='Output model file.', required=True)
@click.option('--corpus', type=click.Choice(['wsj', 'genia', 'wsj+genia']), help='Training corpus')
@click.option('--clusters/--no-clusters', help='Whether to use cluster features', default=True)
@click.pass_context
def train(ctx, output, corpus, clusters):
"""Train POS Tagger."""
click.echo('chemdataextractor.pos.train')
click.echo('Output: %s' % output)
click.echo('Corpus: %s' % corpus)
click.echo('Clusters: %s' % clusters)
wsj_sents = []
genia_sents = []
if corpus == 'wsj' or corpus == 'wsj+genia':
wsj_sents = list(wsj_training.tagged_sents())
# For WSJ, remove all tokens with -NONE- tag
for i, wsj_sent in enumerate(wsj_sents):
wsj_sents[i] = [t for t in wsj_sent if not t[1] == '-NONE-']
if corpus == 'genia' or corpus == 'wsj+genia':
genia_sents = list(genia_training.tagged_sents())
# Translate GENIA
for i, genia_sent in enumerate(genia_sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == '(':
genia_sents[i][j] = (token, '-LRB-') # ( to -RLB- (also do for evaluation)
elif tag == ')':
genia_sents[i][j] = (token, '-RRB-') # ) to -RRB- (also do for evaluation)
elif tag == 'CT':
genia_sents[i][j] = (token, 'DT') # Typo?
elif tag == 'XT':
genia_sents[i][j] = (token, 'DT') # Typo?
elif tag == '-':
genia_sents[i][j] = (token, ':') # Single hyphen character for dash
elif tag == 'N':
genia_sents[i][j] = (token, 'NN') # Typo?
elif tag == 'PP':
genia_sents[i][j] = (token, 'PRP') # Typo?
elif tag == '' and token == ')':
genia_sents[i][j] = (token, '-RRB-') # Typo?
elif tag == '' and token == 'IFN-gamma':
genia_sents[i][j] = (token, 'NN') # Typo?
elif '|' in tag:
genia_sents[i][j] = (token, tag.split('|')[0]) # If contains |, choose first part
# Filter any tags not in the allowed tagset (Shouldn't be any left anyway)
genia_sents[i] = [t for t in genia_sent if t[1] in TAGS]
if corpus == 'wsj':
training_corpus = wsj_sents
elif corpus == 'genia':
training_corpus = genia_sents
elif corpus == 'wsj+genia':
training_corpus = wsj_sents + genia_sents
else:
raise click.ClickException('Invalid corpus')
tagger = ChemCrfPosTagger(clusters=clusters)
tagger.train(training_corpus, output)
@pos_cli.command()
@click.argument('model', required=True)
@click.option('--corpus', type=click.Choice(['wsj', 'genia']), help='Evaluation corpus')
@click.option('--clusters/--no-clusters', help='Whether to use cluster features', default=True)
@click.pass_context
def evaluate(ctx, model, corpus, clusters):
"""Evaluate performance of POS Tagger."""
click.echo('chemdataextractor.pos.evaluate')
if corpus == 'wsj':
evaluation = wsj_evaluation
sents = list(evaluation.tagged_sents())
for i, wsj_sent in enumerate(sents):
sents[i] = [t for t in wsj_sent if not t[1] == '-NONE-']
elif corpus == 'genia':
evaluation = genia_evaluation
sents = list(evaluation.tagged_sents())
# Translate GENIA bracket tags
for i, genia_sent in enumerate(sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == '(':
sents[i][j] = (token, '-LRB-')
elif tag == ')':
sents[i][j] = (token, '-RRB-')
else:
raise click.ClickException('Invalid corpus')
tagger = ChemCrfPosTagger(model=model, clusters=clusters)
accuracy = tagger.evaluate(sents)
click.echo('%s on %s: %s' % (model, evaluation, accuracy))
@pos_cli.command()
@click.option('--output', '-o', type=click.File('wb'), help='Output model file.', required=True)
@click.option('--corpus', type=click.Choice(['wsj', 'genia', 'wsj+genia']), help='Training corpus')
@click.option('--clusters/--no-clusters', help='Whether to use cluster features', default=True)
@click.pass_obj
def train_perceptron(ctx, output, corpus, clusters):
"""Train Averaged Perceptron POS Tagger."""
click.echo('chemdataextractor.pos.train')
click.echo('Output: %s' % output)
click.echo('Corpus: %s' % corpus)
click.echo('Clusters: %s' % clusters)
wsj_sents = []
genia_sents = []
if corpus == 'wsj' or corpus == 'wsj+genia':
wsj_sents = list(wsj_training.tagged_sents())
# For WSJ, remove all tokens with -NONE- tag
for i, wsj_sent in enumerate(wsj_sents):
wsj_sents[i] = [t for t in wsj_sent if not t[1] == '-NONE-']
if corpus == 'genia' or corpus == 'wsj+genia':
genia_sents = list(genia_training.tagged_sents())
# Translate GENIA
for i, genia_sent in enumerate(genia_sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == '(':
genia_sents[i][j] = (token, '-LRB-') # ( to -RLB- (also do for evaluation)
elif tag == ')':
genia_sents[i][j] = (token, '-RRB-') # ) to -RRB- (also do for evaluation)
elif tag == 'CT':
genia_sents[i][j] = (token, 'DT') # Typo?
elif tag == 'XT':
genia_sents[i][j] = (token, 'DT') # Typo?
elif tag == '-':
genia_sents[i][j] = (token, ':') # Single hyphen character for dash
elif tag == 'N':
genia_sents[i][j] = (token, 'NN') # Typo?
elif tag == 'PP':
genia_sents[i][j] = (token, 'PRP') # Typo?
elif tag == '' and token == ')':
genia_sents[i][j] = (token, '-RRB-') # Typo?
elif tag == '' and token == 'IFN-gamma':
genia_sents[i][j] = (token, 'NN') # Typo?
elif '|' in tag:
genia_sents[i][j] = (token, tag.split('|')[0]) # If contains |, choose first part
# Filter any tags not in the allowed tagset (Shouldn't be any left anyway)
genia_sents[i] = [t for t in genia_sent if t[1] in TAGS]
if corpus == 'wsj':
training_corpus = wsj_sents
elif corpus == 'genia':
training_corpus = genia_sents
elif corpus == 'wsj+genia':
training_corpus = wsj_sents + genia_sents
else:
raise click.ClickException('Invalid corpus')
tagger = ChemApPosTagger(clusters=clusters)
tagger.train(training_corpus)
tagger.save(output)
@pos_cli.command()
@click.argument('model', required=True)
@click.option('--corpus', type=click.Choice(['wsj', 'genia']), help='Evaluation corpus')
@click.pass_obj
def evaluate_perceptron(ctx, model, corpus):
"""Evaluate performance of Averaged Perceptron POS Tagger."""
click.echo('chemdataextractor.pos.evaluate')
if corpus == 'wsj':
evaluation = wsj_evaluation
sents = list(evaluation.tagged_sents())
for i, wsj_sent in enumerate(sents):
sents[i] = [t for t in wsj_sent if not t[1] == u'-NONE-']
elif corpus == 'genia':
evaluation = genia_evaluation
sents = list(evaluation.tagged_sents())
# Translate GENIA bracket tags
for i, genia_sent in enumerate(sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == u'(':
sents[i][j] = (token, u'-LRB-')
elif tag == u')':
sents[i][j] = (token, u'-RRB-')
else:
raise click.ClickException('Invalid corpus')
tagger = ChemApPosTagger(model=model)
accuracy = tagger.evaluate(sents)
click.echo('%s on %s: %s' % (model, evaluation, accuracy))
@pos_cli.command()
@click.option('--output', '-o', type=click.File('w', encoding='utf8'), help='Output file.', default=click.get_text_stream('stdout'))
@click.argument('input', type=click.File('rb'), default=click.get_binary_stream('stdin'))
@click.pass_obj
def tag(ctx, input, output):
"""Output POS-tagged tokens."""
log.info('chemdataextractor.pos.tag')
log.info('Reading %s' % input.name)
doc = Document.from_file(input)
for element in doc.elements:
if isinstance(element, Text):
for sentence in element.sentences:
output.write(u' '.join(u'/'.join([token, tag]) for token, tag in sentence.pos_tagged_tokens))
output.write(u'\n')
|
mit
| -1,216,581,177,639,905,800
| 42.845283
| 132
| 0.58697
| false
| 3.275726
| false
| false
| false
|
CollinRooney12/htsprophet
|
htsprophet/fitForecast.py
|
1
|
13224
|
# -*- coding: utf-8 -*-
"""
Name: fitForecast.py
Author: Collin Rooney
Last Updated: 7/18/2017
This script will contain functions for all types of hierarchical modeling approaches.
It will use the prophet package as a forecasting tool.
The general idea of it is very similar to the hts package in R, but it is a little
more specific with how the dataframe is put together.
Credit to Rob J. Hyndman and research partners as much of the code was developed with the help of their work
https://www.otexts.org/fpp
https://robjhyndman.com/publications/
Credit to Facebook and their fbprophet package
https://facebookincubator.github.io/prophet/
It was my intention to make some of the code look similar to certain sections in the Prophet and (Hyndman's) hts packages
"""
import pandas as pd
import numpy as np
from fbprophet import Prophet
import contextlib, os
from scipy.special import inv_boxcox
#%%
def fitForecast(y, h, sumMat, nodes, method, freq, include_history, cap, capF, changepoints, n_changepoints, \
yearly_seasonality, weekly_seasonality, daily_seasonality, holidays, seasonality_prior_scale, \
holidays_prior_scale, changepoint_prior_scale, mcmc_samples, interval_width, uncertainty_samples, \
boxcoxT, skipFitting):
forecastsDict = {}
mse = {}
resids = {}
nForecasts = sumMat.shape[0]
##
# If you have a ditionary of Prophet Dataframes already, skip the prophet part, and put all the values into a dictionary
##
if skipFitting == True:
for key in range(len(y.columns.tolist())-1):
forecastsDict[key] = pd.DataFrame(y.iloc[:,key+1])
forecastsDict[key] = forecastsDict[key].rename(columns = {forecastsDict[key].columns[0] : 'yhat'})
if skipFitting == False:
if method == 'FP':
nForecasts = sum(list(map(sum, nodes)))+1
for node in range(nForecasts):
nodeToForecast = pd.concat([y.iloc[:, [0]], y.iloc[:, node+1]], axis = 1)
if isinstance(cap, pd.DataFrame):
cap1 = cap.iloc[:, node]
else:
cap1 = cap
if isinstance(capF, pd.DataFrame):
cap2 = capF.iloc[:, node]
else:
cap2 = capF
if isinstance(changepoints, pd.DataFrame):
changepoints1 = changepoints[:, node]
else:
changepoints1 = changepoints
if isinstance(n_changepoints, list):
n_changepoints1 = n_changepoints[node]
else:
n_changepoints1 = n_changepoints
##
# Put the forecasts into a dictionary of dataframes
##
with contextlib.redirect_stdout(open(os.devnull, "w")):
# Prophet related stuff
nodeToForecast = nodeToForecast.rename(columns = {nodeToForecast.columns[0] : 'ds'})
nodeToForecast = nodeToForecast.rename(columns = {nodeToForecast.columns[1] : 'y'})
if capF is None:
growth = 'linear'
m = Prophet(growth=growth,
changepoints=changepoints1,
n_changepoints=n_changepoints1,
yearly_seasonality=yearly_seasonality,
weekly_seasonality=weekly_seasonality,
daily_seasonality=daily_seasonality,
holidays=holidays,
seasonality_prior_scale=seasonality_prior_scale,
holidays_prior_scale=holidays_prior_scale,
changepoint_prior_scale=changepoint_prior_scale,
mcmc_samples=mcmc_samples,
interval_width=interval_width,
uncertainty_samples=uncertainty_samples)
else:
growth = 'logistic'
m = Prophet(growth=growth,
changepoints=changepoints,
n_changepoints=n_changepoints,
yearly_seasonality=yearly_seasonality,
weekly_seasonality=weekly_seasonality,
daily_seasonality=daily_seasonality,
holidays=holidays,
seasonality_prior_scale=seasonality_prior_scale,
holidays_prior_scale=holidays_prior_scale,
changepoint_prior_scale=changepoint_prior_scale,
mcmc_samples=mcmc_samples,
interval_width=interval_width,
uncertainty_samples=uncertainty_samples)
nodeToForecast['cap'] = cap1
m.fit(nodeToForecast)
future = m.make_future_dataframe(periods = h, freq = freq, include_history = include_history)
if capF is not None:
future['cap'] = cap2
##
# Base Forecasts, Residuals, and MSE
##
forecastsDict[node] = m.predict(future)
resids[node] = y.iloc[:, node+1] - forecastsDict[node].yhat[:-h].values
mse[node] = np.mean(np.array(resids[node])**2)
##
# If logistic use exponential function, so that values can be added correctly
##
if capF is not None:
forecastsDict[node].yhat = np.exp(forecastsDict[node].yhat)
if boxcoxT is not None:
forecastsDict[node].yhat = inv_boxcox(forecastsDict[node].yhat, boxcoxT[node])
forecastsDict[node].trend = inv_boxcox(forecastsDict[node].trend, boxcoxT[node])
if "seasonal" in forecastsDict[node].columns.tolist():
forecastsDict[node].seasonal = inv_boxcox(forecastsDict[node].seasonal, boxcoxT[node])
if "daily" in forecastsDict[node].columns.tolist():
forecastsDict[node].daily = inv_boxcox(forecastsDict[node].daily, boxcoxT[node])
if "weekly" in forecastsDict[node].columns.tolist():
forecastsDict[node].weekly = inv_boxcox(forecastsDict[node].weekly, boxcoxT[node])
if "yearly" in forecastsDict[node].columns.tolist():
forecastsDict[node].yearly = inv_boxcox(forecastsDict[node].yearly, boxcoxT[node])
if "holidays" in forecastsDict[node].columns.tolist():
forecastsDict[node].yearly = inv_boxcox(forecastsDict[node].yearly, boxcoxT[node])
##
# Now, Revise them
##
if method == 'BU' or method == 'AHP' or method == 'PHA':
y1 = y.copy()
nCols = len(list(forecastsDict.keys()))+1
if method == 'BU':
'''
Pros:
No information lost due to aggregation
Cons:
Bottom level data can be noisy and more challenging to model and forecast
'''
hatMat = np.zeros([len(forecastsDict[0].yhat),1])
for key in range(nCols-sumMat.shape[1]-1, nCols-1):
f1 = np.array(forecastsDict[key].yhat)
f2 = f1[:, np.newaxis]
if np.all(hatMat == 0):
hatMat = f2
else:
hatMat = np.concatenate((hatMat, f2), axis = 1)
if method == 'AHP':
'''
Pros:
Creates reliable aggregate forecasts, and good for low count data
Cons:
Unable to capture individual series dynamics
'''
if boxcoxT is not None:
for column in range(len(y.columns.tolist())-1):
y1.iloc[:,column+1] = inv_boxcox(y1.iloc[:, column+1], boxcoxT[column])
##
# Find Proportions
##
fcst = forecastsDict[0].yhat
fcst = fcst[:, np.newaxis]
numBTS = sumMat.shape[1]
btsDat = pd.DataFrame(y1.iloc[:,nCols-numBTS:nCols])
divs = np.divide(np.transpose(np.array(btsDat)),np.array(y1.iloc[:,1]))
props = divs.mean(1)
props = props[:, np.newaxis]
hatMat = np.dot(np.array(fcst),np.transpose(props))
if method == 'PHA':
'''
Pros:
Creates reliable aggregate forecasts, and good for low count data
Cons:
Unable to capture individual series dynamics
'''
if boxcoxT is not None:
for column in range(len(y.columns.tolist())-1):
y1.iloc[:,column+1] = inv_boxcox(y1.iloc[:, column+1], boxcoxT[column])
##
# Find Proportions
##
fcst = forecastsDict[0].yhat
fcst = fcst[:, np.newaxis]
numBTS = sumMat.shape[1]
btsDat = pd.DataFrame(y1.iloc[:,nCols-numBTS:nCols])
btsSum = btsDat.sum(0)
topSum = sum(y1.iloc[:,1])
props = btsSum/topSum
props = props[:, np.newaxis]
hatMat = np.dot(np.array(fcst),np.transpose(props))
newMat = np.empty([hatMat.shape[0],sumMat.shape[0]])
for i in range(hatMat.shape[0]):
newMat[i,:] = np.dot(sumMat, np.transpose(hatMat[i,:]))
if method == 'FP':
newMat = forecastProp(forecastsDict, nodes)
if method == 'OLS' or method == 'WLSS' or method == 'WLSV':
if capF is not None:
print("An error might occur because of how these methods are defined (They can produce negative values). If it does, then please use another method")
newMat = optimalComb(forecastsDict, sumMat, method, mse)
for key in forecastsDict.keys():
values = forecastsDict[key].yhat.values
values = newMat[:,key]
forecastsDict[key].yhat = values
##
# If Logistic fit values with natural log function to revert back to format of input
##
if capF is not None:
forecastsDict[key].yhat = np.log(forecastsDict[key].yhat)
return forecastsDict
#%%
def forecastProp(forecastsDict, nodes):
'''
Cons:
Produces biased revised forecasts even if base forecasts are unbiased
'''
nCols = len(list(forecastsDict.keys()))+1
##
# Find proportions of forecast at each step ahead, and then alter forecasts
##
levels = len(nodes)
column = 0
firstNode = 1
newMat = np.empty([len(forecastsDict[0].yhat),nCols - 1])
newMat[:,0] = forecastsDict[0].yhat
lst = [x for x in range(nCols-1)]
for level in range(levels):
nodesInLevel = len(nodes[level])
foreSum = 0
for node in range(nodesInLevel):
numChild = nodes[level][node]
lastNode = firstNode + numChild
lst = [x for x in range(firstNode, lastNode)]
baseFcst = np.array([forecastsDict[k].yhat[:] for k in lst])
foreSum = np.sum(baseFcst, axis = 0)
foreSum = foreSum[:, np.newaxis]
if column == 0:
revTop = np.array(forecastsDict[column].yhat)
revTop = revTop[:, np.newaxis]
else:
revTop = np.array(newMat[:,column])
revTop = revTop[:, np.newaxis]
newMat[:,firstNode:lastNode] = np.divide(np.multiply(np.transpose(baseFcst), revTop), foreSum)
column += 1
firstNode += numChild
return newMat
#%%
def optimalComb(forecastsDict, sumMat, method, mse):
hatMat = np.zeros([len(forecastsDict[0].yhat),1])
for key in forecastsDict.keys():
f1 = np.array(forecastsDict[key].yhat)
f2 = f1[:, np.newaxis]
if np.all(hatMat == 0):
hatMat = f2
else:
hatMat = np.concatenate((hatMat, f2), axis = 1)
##
# Multiply the Summing Matrix Together S*inv(S'S)*S'
##
if method == "OLS":
optiMat = np.dot(np.dot(sumMat, np.linalg.inv(np.dot(np.transpose(sumMat), sumMat))),np.transpose(sumMat))
if method == "WLSS":
diagMat = np.diag(np.transpose(np.sum(sumMat, axis = 1)))
optiMat = np.dot(np.dot(np.dot(sumMat, np.linalg.inv(np.dot(np.dot(np.transpose(sumMat), np.linalg.inv(diagMat)), sumMat))), np.transpose(sumMat)), np.linalg.inv(diagMat))
if method == "WLSV":
diagMat = [mse[key] for key in mse.keys()]
diagMat = np.diag(np.flip(np.hstack(diagMat)+0.0000001, 0))
optiMat = np.dot(np.dot(np.dot(sumMat, np.linalg.inv(np.dot(np.dot(np.transpose(sumMat), np.linalg.inv(diagMat)), sumMat))), np.transpose(sumMat)), np.linalg.inv(diagMat))
newMat = np.empty([hatMat.shape[0],sumMat.shape[0]])
for i in range(hatMat.shape[0]):
newMat[i,:] = np.dot(optiMat, np.transpose(hatMat[i,:]))
return newMat
|
mit
| 7,184,139,017,834,954,000
| 45.080139
| 179
| 0.548851
| false
| 3.780446
| false
| false
| false
|
tommasoberlose/p2p_bittorrent
|
SocketFunc.py
|
1
|
1263
|
import socket
####### SOCKET
def create_socket_server(myHost, port):
s = None
for res in socket.getaddrinfo(None, int(port), socket.AF_UNSPEC,socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error as msg:
s = None
continue
try:
s.bind(sa)
s.listen(1000)
except socket.error as msg:
s.close()
s = None
continue
break
return s
def create_socket_client(myHost, port):
s = None
for res in socket.getaddrinfo(myHost, int(port), socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error as msg:
s = None
continue
try:
s.connect(sa)
except socket.error as msg:
s.close()
s = None
continue
break
return s
def forward(pk, addr, l):
if pk != bytes(const.ERROR_PKT, "ascii"):
for x in l:
if addr != x[0]:
s = func.create_socket_client(func.roll_the_dice(x[0]), x[1])
if not(s is None):
s.sendall(pk)
#write_daemon_success("Daemon", "-", "Forward da " + addr + " a " + x[0])
s.close()
|
mit
| -3,098,031,255,870,166,000
| 24.26
| 107
| 0.585115
| false
| 2.944056
| false
| false
| false
|
nlgndnmz/ctpsingle
|
BeyerHedetmieni.py
|
1
|
3481
|
import sys
# G is the gamma matrix
# par is the parent array
# n is the number of nodes
def writeGammaMatrix(gammaFile, G, par, n):
for i in range(n):
for j in range(n):
G[i][j] = 0
for i in range(n):
G[i][i] = 1
j = par[i]-1
while j > -1:
G[j][i] = 1
j = par[j]-1
for i in range(n):
for j in range(n):
gammaFile.write(str(G[i][j]) + ' ')
gammaFile.write('\n')
gammaFile.write('\n')
# par is the parent array
# n is the number of nodes
def writeAdjMatrix(adjFile, par, n):
adjFile.write(str(n-1)) # number of edges
for i in range(1, n):
adjFile.write(' ' + str(par[i]-1) + ' ' + str(i) + ' ')
adjFile.write('\n');
# writes a dot file to be processed with Graphviz
def writeDotFile(par, n, num):
dotFile = open('./GammaAdjMatrices/dotFile' + str(n) + '_' + str(num-1) + '.dot', 'w')
dotFile.write('digraph G { \n')
for i in range(1, n):
dotFile.write(str(par[i]-1) + ' -> ' + str(i) + ';\n')
dotFile.write('}\n')
dotFile.close()
# n is the number of nodes
# k is the max number of children allowed per node
def getRootedTrees(adjFile, gammaFile, n, k, writeDots):
num = 0
L = []
par = []
levels = []
children = []
G = []
p = n-1
q = 0
for i in range(n):
L.append(i)
par.append(i)
levels.append(i+1)
children.append(0)
G.append([])
for j in range(n):
G[i].append(0)
while (p > 0):
for i in range(n):
children[i] = 0
for i in range(n):
children[par[i]] += 1
if max(children) <= k:
num += 1
writeAdjMatrix(adjFile, par, n)
writeGammaMatrix(gammaFile, G, par, n)
if writeDots:
writeDotFile(par, n, num)
p = 0
for i in range(n-1, -1, -1):
if L[i] > 1:
p = i
break
if p == 0:
break
for i in range(p-1, -1, -1):
if L[i] == L[p] - 1:
q = i
break
for i in range(p, n):
L[i] = L[i-p+q]
for i in range(1, n):
x = L[i]
par[i] = levels[x-1]
levels[x] = i+1
# n is the number of nodes
# k is the max number of children allowed per node
def getNumTrees(n, k):
num = 0
L = []
par = []
levels = []
children = []
p = n-1
q = 0
for i in range(n):
L.append(i)
par.append(i)
levels.append(i+1)
children.append(0)
while (p > 0):
for i in range(n):
children[i] = 0
for i in range(n):
children[par[i]] += 1
if max(children) <= k:
num += 1
p = 0
for i in range(n-1, -1, -1):
if L[i] > 1:
p = i
break
if p == 0:
break
for i in range(p-1, -1, -1):
if L[i] == L[p] - 1:
q = i
break
for i in range(p, n):
L[i] = L[i-p+q]
for i in range(1, n):
x = L[i]
par[i] = levels[x-1]
levels[x] = i+1
return num
if __name__ == "__main__":
if len(sys.argv) < 2:
print 'Usage: python', sys.argv[0], '<max_num_nodes> [<max_branching_factor>]'
sys.exit(0)
maxNumNodes = 1 + int(sys.argv[1])
k = maxNumNodes
if len(sys.argv) == 3:
k = int(sys.argv[2])
for i in range(2, maxNumNodes):
x = getNumTrees(i, k)
print 'Number of trees with ', i, ' nodes: ', x
if x > 100000:
print 'Sorry, that is too many trees to write on file. Aborting now, already written files are ok to use.'
sys.exit(3)
adjFile = open('./GammaAdjMatrices/AdjacencyMatrix' + str(i) + '.txt', 'w')
gammaFile = open('./GammaAdjMatrices/GammaMatrix' + str(i) + '.txt', 'w')
adjFile.write(str(i) + ' ' + str(x) + '\n\n')
gammaFile.write(str(i) + ' ' + str(x) + '\n')
getRootedTrees(adjFile, gammaFile, i, k, False)
adjFile.close()
gammaFile.close()
|
gpl-3.0
| 5,346,986,811,560,547,000
| 17.918478
| 109
| 0.561046
| false
| 2.334675
| false
| false
| false
|
fzimmermann89/pyload
|
module/plugins/crypter/DevhostStFolder.py
|
1
|
2306
|
# -*- coding: utf-8 -*-
#
# Test links:
# http://d-h.st/users/shine/?fld_id=37263#files
import re
import urlparse
from module.plugins.internal.SimpleCrypter import SimpleCrypter, create_getInfo
class DevhostStFolder(SimpleCrypter):
__name__ = "DevhostStFolder"
__type__ = "crypter"
__version__ = "0.08"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?d-h\.st/users/(?P<USER>\w+)(/\?fld_id=(?P<ID>\d+))?'
__config__ = [("activated" , "bool", "Activated" , True),
("use_premium" , "bool", "Use premium account if available" , True),
("use_subfolder" , "bool", "Save package to subfolder" , True),
("subfolder_per_pack", "bool", "Create a subfolder for each package", True)]
__description__ = """D-h.st folder decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("zapp-brannigan", "fuerst.reinje@web.de"),
("Walter Purcaro", "vuolter@gmail.com")]
LINK_PATTERN = r'(?:/> |;">)<a href="(.+?)"(?!>Back to \w+<)'
OFFLINE_PATTERN = r'"/cHP">test\.png<'
def check_name_size(self, getinfo=True):
if not self.info or getinfo:
self.log_debug("File info (BEFORE): %s" % self.info)
self.info.update(self.get_info(self.pyfile.url, self.html))
self.log_debug("File info (AFTER): %s" % self.info)
try:
if self.info['pattern']['ID'] == "0":
raise
p = r'href="(.+?)">Back to \w+<'
m = re.search(p, self.html)
html = self.load(urlparse.urljoin("http://d-h.st/", m.group(1)),
cookies=False)
p = '\?fld_id=%s.*?">(.+?)<' % self.info['pattern']['ID']
m = re.search(p, html)
self.pyfile.name = m.group(1)
except Exception, e:
self.log_debug(e, trace=True)
self.pyfile.name = self.info['pattern']['USER']
try:
folder = self.info['folder'] = self.pyfile.name
except Exception:
pass
self.log_debug("File name: %s" % self.pyfile.name,
"File folder: %s" % self.pyfile.name)
getInfo = create_getInfo(DevhostStFolder)
|
gpl-3.0
| 6,376,989,188,903,538,000
| 33.41791
| 95
| 0.506071
| false
| 3.280228
| false
| false
| false
|
dbousque/lymp
|
srcs/lymp.py
|
1
|
6462
|
from time import time
from struct import pack, unpack
import bson, sys, os, codecs
from random import randint
from traceback import print_exc
def int_to_int64_bytes(i):
return pack('>q', i)
def py_to_bson(val):
if type(val) is int:
return bson.int64.Int64(val)
if sys.version_info.major == 2 and type(val) is str:
return bson.binary.Binary(val)
return val
def exit_lymp():
# closing 'python_log'
sys.stdout.close()
exit(0)
# A communication class, could be implemented using other ipc methods,
# it only needs the methods 'send_bytes' and 'get_bytes'
class PipeReaderWriter:
def __init__(self, read_pipe_name, write_pipe_name):
self.get_pipes(read_pipe_name, write_pipe_name)
def get_pipes(self, read_pipe_name, write_pipe_name):
# Order of open matters, since it is blocking, should match OCaml order
# 0 to be unbuffered, so we don't have to flush (better performance ?)
self.write_pipe = open(write_pipe_name, 'wb', 0)
self.read_pipe = open(read_pipe_name, 'rb', 0)
def send_bytes(self, byts):
# '>q' to force signed 8 bytes integer
self.write_pipe.write(pack('>q', len(byts)))
#self.write_pipe.flush()
self.write_pipe.write(byts)
#self.write_pipe.flush()
def get_bytes(self):
# '>q' to force signed 8 bytes integer
try:
nb_bytes = unpack('>q', self.read_pipe.read(8))[0]
except:
# ocaml process has been terminated
exit_lymp()
byts = b'' if sys.version_info.major == 3 else ""
while len(byts) < nb_bytes:
byts += self.read_pipe.read(nb_bytes)
return byts
class ExecutionHandler:
to_ret_types = {
int: "i",
tuple: "t",
list: "l",
str: "s",
float: "f",
type(None): "n",
bool: "b",
bytes: "B"
}
# for python 2, unicode is str and str is bytes
if sys.version_info.major == 2:
to_ret_types[unicode] = "s"
to_ret_types[str] = "B"
def __init__(self, reader_writer):
self.reader_writer = reader_writer
self.modules = {}
self.objs = {}
self.ref_nb = 0
def loop(self):
# don't recursively call .loop, to avoid stack overflow
while True:
command_bytes = self.reader_writer.get_bytes()
if command_bytes == b'done':
exit_lymp()
instruction = bson.BSON.decode(bson.BSON(command_bytes))
try:
ret = self.execute_instruction(instruction)
# data may still be in the buffer
sys.stdout.flush()
self.send_ret(ret, ret_ref=("R" in instruction))
except BaseException as e:
# exception whilst executing, inform ocaml side
print_exc()
# data may still be in the buffer
sys.stdout.flush()
self.send_ret("", exception=True)
def ret_to_msg(self, ret, ret_ref):
msg = {}
# reference (type not supported or explicitely asked to)
if ret_ref or (type(ret) not in self.to_ret_types):
self.ref_nb += 1
self.objs[self.ref_nb] = ret
msg["t"] = "r"
msg["v"] = bson.code.Code(str(self.ref_nb))
else:
msg["t"] = self.to_ret_types[type(ret)]
# tuples are just like lists, but their type "t" is "t" instead of "l"
if type(ret) is tuple:
ret = list(ret)
# if type is list, further resolve
if type(ret) is list:
msg["v"] = []
for elt in ret:
# ret_ref is false here (would not be in the else otherwise)
msg["v"].append(self.ret_to_msg(elt, False))
else:
msg["v"] = py_to_bson(ret)
return msg
def send_ret(self, ret, exception=False, ret_ref=False):
if exception:
msg = {}
msg["t"] = "e"
msg["v"] = ""
else:
msg = self.ret_to_msg(ret, ret_ref)
msg = bytes(bson.BSON.encode(msg))
self.reader_writer.send_bytes(msg)
def resolve_args(self, args):
named = {}
i = 0
for arg in args:
# resolve named args (list of size 2, first one being a bson.code.Code starting with "!")
if type(arg) is list and len(arg) == 2 and type(arg[0]) is bson.code.Code and str(arg[0])[0] == "!":
named[str(arg[0])[1:]] = self.resolve_args([arg[1]])[0][0]
del args[i]
continue
# if bytes
if type(arg) is bson.binary.Binary:
args[i] = bytes(arg)
# resolve reference args (using bson jscode)
if type(arg) is bson.code.Code:
args[i] = self.objs[int(arg)]
if type(arg) is bson.int64.Int64:
args[i] = int(arg)
# for python 2, if arg is str, convert to unicode
if sys.version_info.major == 2 and type(arg) is str:
args[i] = args[i].decode('utf-8')
# for python 2, if arg is bytes, convert to str
if sys.version_info.major == 2 and type(arg) is bson.binary.Binary:
args[i] = str(arg)
# if we have a list, we must recursively resolve
if type(arg) is list:
args[i] = self.resolve_args(arg)[0]
# if we have a dict, it is a tuple inside "v"
if type(arg) is dict:
args[i] = tuple(self.resolve_args(arg["v"])[0])
i += 1
return args, named
def execute_instruction(self, instruction):
if "r" in instruction:
# if we are asked to realease an inexisting of already released reference
if "d" in instruction and instruction["r"] not in self.objs:
return None
# module is the object referenced, later we call getattr to get the method called
module = self.objs[instruction["r"]]
# if we were asked to 'detach' (release) the reference
if "d" in instruction:
del self.objs[instruction["r"]]
return None
# if we were asked to return the reference
if "g" in instruction:
return module
else:
# python 2 builtin module has a different name
if sys.version_info.major == 2 and instruction["m"] == "builtins":
instruction["m"] = "__builtin__"
if instruction["m"] not in self.modules:
__import__(instruction["m"])
self.modules[instruction["m"]] = sys.modules[instruction["m"]]
module = self.modules[instruction["m"]]
# set attribute
if "s" in instruction:
args, named = self.resolve_args(instruction["a"])
arg = args[0]
setattr(module, instruction["f"], arg)
return None
func_or_attr = getattr(module, instruction["f"])
# get attribute
if "t" in instruction:
return func_or_attr
args = instruction["a"]
args, named = self.resolve_args(args)
ret = func_or_attr(*args, **named)
return ret
working_directory = sys.argv[1]
write_pipe_path = sys.argv[2]
read_pipe_path = sys.argv[3]
# changing dir
os.chdir(working_directory)
sys.path.insert(0, working_directory)
# redirect stdout to 'python_log'
sys.stdout = codecs.open('python_log', 'w', encoding='utf-8')
sys.stderr = sys.stdout
communication = PipeReaderWriter(read_pipe_path, write_pipe_path)
handler = ExecutionHandler(communication)
handler.loop()
|
mit
| -693,447,540,580,453,000
| 29.625592
| 103
| 0.656608
| false
| 2.864362
| false
| false
| false
|
johnjohnlin/nicotb
|
sim/standalone/test_semaphore.py
|
1
|
1602
|
#!/usr/bin/env python
# Copyright (C) 2017,2019, Yu Sheng Lin, johnjohnlys@media.ee.ntu.edu.tw
# This file is part of Nicotb.
# Nicotb is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Nicotb is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Nicotb. If not, see <http://www.gnu.org/licenses/>.
from nicotb import *
from nicotb.utils import Scoreboard
from nicotb.primitives import Semaphore
from nicotb.event import waiting_coro
import numpy as np
ACQ = [1,2,3,4]
SUM = sum(ACQ)
def producer():
global resource
for i in range(SUM):
for j in range(1+np.random.randint(20)):
yield ck_ev
resource += 1
yield from sem.Release()
def consumer():
global resource
scb = Scoreboard("Semaphore")
tst = scb.GetTest("test")
tst.Expect([])
for i in ACQ:
for j in range(1+np.random.randint(10)):
yield ck_ev
yield from sem.Acquire(i)
resource -= i
assert resource >= 0
tst.Get([])
scb.ReportAll()
ck_ev = CreateEvent()
resource = 0
sem = Semaphore(-1)
RegisterCoroutines([
producer(),
consumer(),
])
for i in range(1000):
SignalEvent(ck_ev)
MainLoop()
if not waiting_coro[ck_ev]:
break
print("Simulation stop at {}".format(i))
|
gpl-3.0
| 9,214,477,608,298,877,000
| 24.83871
| 72
| 0.721598
| false
| 3.098646
| false
| false
| false
|
mkauppila/rachel
|
bot/parse.py
|
1
|
1792
|
import irc
def parse_nick(nick):
""" Separates nick from the mode characters.
Examples:
parse_nick('@_markus') => (_markus, 'o')
parse_nick('+_markus') => ('_markus', 'v')
"""
converter = {'@' : 'o', '+' : 'v'}
modes = converter.keys()
first_character = nick[0]
if first_character in modes:
return (nick[1:], converter[first_character])
else:
return (nick, None)
def parse_nick_from_prefix(prefix):
""" Parse nick from the beginning of message prefix
Used by JOIN and PART message handlers.
"""
end_index = prefix.find('!')
return prefix[0:end_index]
def parse_messages_from(data):
""" Separate server messages
"""
return data.split('\r\n')
def parse_message(message):
""" Parse messages from IRC server.
Message format is:
[:prefix] command [[param1] param2] [:trailing]
Only command is mandatory, other parts are optional.
Args:
Message: Server message that'll be parsed
Returns:
Message object containing the parsed information.
"""
if not message or message == '':
return None
prefix, command, params, trailing = None, None, None, None
# parse prefix
if message[0] == ':':
end_index = message.find(' ')
prefix = message[1:end_index]
# remove the parsed section of the message and the whitespace
message = message[end_index + 1:]
# parse trailing
start_index_of_trailing = message.find(':')
if start_index_of_trailing != -1: # has trailing
trailing = message[start_index_of_trailing + 1:]
# update the message, only command and params left
message = message[0:start_index_of_trailing]
# remove redundant white space
message = message.strip(' ')
command_and_params = message.split(' ')
command = command_and_params[0]
params = command_and_params[1:]
return irc.Message(prefix, command, params, trailing)
|
mit
| -63,711,807,403,173,780
| 23.547945
| 63
| 0.682478
| false
| 3.246377
| false
| false
| false
|
ImmaculateObsession/nest
|
pebbles/forms.py
|
1
|
3224
|
from django import forms
from django.utils.text import slugify
from suit_redactor.widgets import RedactorWidget
from pebbles.models import (
Pebble,
PebblePage,
)
class PebblePageForm(forms.Form):
title = forms.CharField(
max_length=140,
required=True,
widget=forms.TextInput(attrs={'class':'form-control',}),
)
slug = forms.SlugField(
required=False,
widget=forms.TextInput(attrs={'class':'form-control',}),
)
is_live = forms.BooleanField(initial=False, required=False)
standalone = forms.BooleanField(initial=False, required=False)
content = forms.CharField(
required=False,
widget=RedactorWidget,
)
def __init__(self, *args, **kwargs):
selected_pebble = None
if kwargs.get('selected_pebble'):
selected_pebble = kwargs.pop('selected_pebble')
self.pebbles = kwargs.pop('pebbles')
super(PebblePageForm, self).__init__(*args, **kwargs)
choices = [(pebble.id, pebble.title) for pebble in self.pebbles]
if choices and not selected_pebble:
selected_pebble = choices[0][0]
self.fields['pebble'] = forms.ChoiceField(
choices=choices,
initial=selected_pebble,
widget=forms.Select(attrs={'class':'form-control',}),
)
def clean_slug(self):
slug = self.cleaned_data.get('slug')
if not slug or slug == '':
slug = slugify(self.cleaned_data['title'])
return slug
def clean(self):
cleaned_data = self.cleaned_data
slug = cleaned_data.get('slug')
pebble = Pebble.objects.get(id=cleaned_data.get('pebble'))
if slug != self.initial.get('slug') and PebblePage.objects.filter(pebble=pebble, slug=slug).exists():
raise forms.ValidationError("Slug matches an existing page")
return cleaned_data
class PebbleSettingsForm(forms.Form):
site_title = forms.CharField(
max_length=140,
required=False,
widget=forms.TextInput(attrs={'class':'form-control',}),
)
facebook_page = forms.CharField(
max_length=140,
required=False,
widget=forms.TextInput(attrs={'class':'form-control',}),
)
twitter_page = forms.CharField(
max_length=140,
required=False,
widget=forms.TextInput(attrs={'class':'form-control',}),
)
youtube_channel = forms.CharField(
max_length=100,
required=False,
widget=forms.TextInput(attrs={'class':'form-control',}),
)
tagline = forms.CharField(
max_length=140,
required=False,
widget=forms.TextInput(attrs={'class':'form-control',}),
)
show_rss = forms.BooleanField()
copyright = forms.CharField(
max_length=140,
required=False,
widget=forms.TextInput(attrs={'class':'form-control',}),
)
feed_description = forms.CharField(
max_length=140,
required=False,
widget=forms.TextInput(attrs={'class':'form-control',}),
)
feed_title = forms.CharField(
max_length=140,
required=False,
widget=forms.TextInput(attrs={'class':'form-control',}),
)
|
mit
| 3,485,214,062,149,560,000
| 29.130841
| 109
| 0.611042
| false
| 3.856459
| false
| false
| false
|
philroche/Django-tinymce-filebrowser
|
mce_filebrowser/models.py
|
1
|
1228
|
import datetime
from django.db import models
from django.utils.translation import ugettext as _
from mce_filebrowser.conf import LOCAL_MCE_FILEBROWSER_UPLOADDIR,LOCAL_MCE_FILEBROWSER_PERUSER
def content_file_name(instance, filename):
if LOCAL_MCE_FILEBROWSER_PERUSER == True:
return "%s/%s/%s/%s" %(LOCAL_MCE_FILEBROWSER_UPLOADDIR,'user-%s' % str(instance.user_id), datetime.datetime.now().strftime("%Y/%m/%d"), filename)
else:
return "%s/%s/%s" %(LOCAL_MCE_FILEBROWSER_UPLOADDIR, datetime.datetime.now().strftime("%Y/%m/%d"), filename)
class FileBrowserFile(models.Model):
""" Uploaded file model """
FILE_TYPES = (
('img', _('Image')),
('doc', _('Document')),
)
file_type = models.CharField(max_length=3, choices=FILE_TYPES)
uploaded_file = models.FileField(
upload_to=content_file_name,
verbose_name = _('File / Image'),
max_length=300,
)
create_date = models.DateTimeField(
auto_now_add=True, verbose_name=_('Create date')
)
user_id = models.IntegerField(null=True, blank=True, verbose_name=_('Who does this file belong to?'))
def __unicode__(self):
return u'%s' % self.uploaded_file.name
|
mit
| -6,621,422,880,594,318,000
| 33.111111
| 153
| 0.645765
| false
| 3.430168
| false
| false
| false
|
wrightjb/bolt-planar
|
setup.py
|
1
|
2408
|
# setup.py for planar
#
# $Id$
import os
import sys
import shutil
from distutils.core import setup, Extension
try:
from distutils.command.build_py import build_py_2to3 as build_py
except ImportError:
if sys.version_info >= (3, 0):
raise ImportError("build_py_2to3 not found in distutils - it is required for Python 3.x")
from distutils.command.build_py import build_py
suffix = ""
else:
suffix = "-py3k"
srcdir = os.path.dirname(__file__)
def read(fname):
return open(os.path.join(srcdir, fname)).read()
include_dirs = ['include']
extra_compile_args = []
if 'SETUP_PY_CFLAGS' in os.environ:
# SETUP_PY_CFLAGS allows you to pass in CFLAGS
# in a disutils-friendly way. Using CFLAGS directly
# causes linking to fail for some python versions
extra_compile_args.append(os.environ['SETUP_PY_CFLAGS'])
setup(
name='planar',
version='0.4', # *** REMEMBER TO UPDATE __init__.py ***
description='2D planar geometry library for Python.',
long_description=read('README.txt'),
provides=['planar'],
author='Casey Duncan',
author_email='casey.duncan@gmail.com',
url='http://bitbucket.org/caseman/planar/',
license='BSD',
classifiers = [
'Development Status :: 3 - Alpha',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
],
platforms = 'any',
package_dir={'planar': 'lib/planar',
'planar.test': 'test'},
packages=['planar', 'planar.test'],
ext_modules=[
Extension('planar.c',
['lib/planar/cmodule.c',
'lib/planar/cvector.c',
'lib/planar/ctransform.c',
'lib/planar/cline.c',
'lib/planar/cbox.c',
'lib/planar/cpolygon.c',
],
include_dirs=include_dirs,
#library_dirs=library_dirs,
#libraries=libraries,
#extra_link_args=extra_link_args,
extra_compile_args=extra_compile_args,
#define_macros=macros,
),
],
cmdclass = {'build_py': build_py},
)
|
bsd-3-clause
| -2,776,766,464,232,461,000
| 28.365854
| 97
| 0.634551
| false
| 3.415603
| false
| false
| false
|
teddy-michel/Mimir
|
games/forms.py
|
1
|
2992
|
from django.forms import ModelForm, Textarea, HiddenInput, IntegerField, CharField, Select
from django.utils.translation import ugettext as _
from base.models import Tag
from .models import Game, Saga, SagaGame, GameAttribute, GameLink, GameTag, GameUser
class GameForm(ModelForm):
class Meta:
model = Game
fields = ["title", "title_vo", "year", "infos", "image"]
widgets = {
"infos": Textarea(attrs={"rows": 4}),
}
class GameAttributeForm(ModelForm):
class Meta:
model = GameAttribute
fields = ["name", "value"]
class GameLinkForm(ModelForm):
class Meta:
model = GameLink
fields = ["name", "uri", "lang"]
class GameTagForm(ModelForm):
tag_name = CharField(label=_("Tag name"), max_length=100)
def __init__(self, *args, **kwargs):
super(ModelForm, self).__init__(*args, **kwargs)
self.fields["tag"].required = False
if hasattr(self.instance, "tag"):
self.initial["tag_name"] = self.instance.tag.name
else:
self.initial["tag_name"] = ""
def save(self, commit=True):
name = self.cleaned_data.get("tag_name").strip()
if self.initial["tag_name"] != name:
tag = Tag.objects.get_or_create(name=name)[0]
self.instance.tag = tag
return super(ModelForm, self).save(commit=commit)
class Meta:
model = GameTag
fields = ["tag", "tag_name", "infos"]
widgets = {
"tag": HiddenInput(),
}
class SagaForm(ModelForm):
class Meta:
model = Saga
fields = ["title", "title_vo", "infos"]
widgets = {
"infos": Textarea(attrs={"rows": 4}),
}
class SagaGameForm(ModelForm):
game_id = IntegerField(label=_("Title"), widget=Select(attrs={"class": "select_game"}), required=False)
game_title = CharField(widget=HiddenInput(), max_length=150, required=False)
def __init__(self, *args, **kwargs):
super(ModelForm, self).__init__(*args, **kwargs)
if hasattr(self.instance, "game"):
self.initial["game_id"] = self.instance.game.id
self.initial["game_title"] = self.instance.game.title
if self.instance.game.year:
self.initial["game_title"] += " (%s)" % self.instance.game.year
else:
self.initial["game_id"] = None
self.initial["game_title"] = ""
def save(self, commit=True):
if self.instance.game is None and self.cleaned_data.get("game_id"):
self.instance.game = Game.objects.get(id=int(self.cleaned_data.get("game_id")))
return super(ModelForm, self).save(commit=commit)
class Meta:
model = SagaGame
fields = ["game_id", "game_title", "game"]
widgets = {
"game": HiddenInput(),
}
class GameUserForm(ModelForm):
class Meta:
model = GameUser
fields = ["bought", "played", "finished"]
|
gpl-3.0
| -415,004,628,209,892,350
| 28.048544
| 107
| 0.580548
| false
| 3.792142
| false
| false
| false
|
acutesoftware/AIKIF
|
aikif/lib/cls_goal_time.py
|
1
|
1843
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# cls_goal_time.py
from aikif.lib.cls_goal import Goal
class GoalTime(Goal):
"""
goals around time - eg maximise use of object / minimize time of task
"""
def __init__(self, maximise=True, current_val=0, target_val=0):
"""
set maximise = True for class to find maximum time (usage) or
set to False to minimise the amount of time (eg reduce task time)
"""
self.current_val = current_val
self.target_val = target_val
self.maximise = maximise
self.strategy = [
{'name':'Travel_walk', 'speed':1, 'max_km_day':30, 'dest_flexibility':100, 'money_cost':0, 'environ_cost':0},
{'name':'Travel_bike', 'speed':5, 'max_km_day':200, 'dest_flexibility':50, 'money_cost':0, 'environ_cost':0},
{'name':'Travel_car', 'speed':60, 'max_km_day':1500, 'dest_flexibility':30, 'money_cost':50, 'environ_cost':50},
{'name':'Travel_bus', 'speed':60, 'max_km_day':1500, 'dest_flexibility':20, 'money_cost':10, 'environ_cost':15}
]
def check_for_success(self):
if self.maximise:
if self.current_val > self.target_val:
return False
else:
return True
else:
if self.current_val <= self.target_val:
return False
else:
return True
def run_plan(self, strategy):
"""
executes a plan by running the passed strategy
and then updates the local results
"""
print ("TODO running strategy : " + strategy['name'] )
def find_best_plan(self):
"""
try each strategy with different amounts
"""
for strat in self.strategy:
self.run_plan(strat)
|
gpl-3.0
| -5,118,131,896,329,419,000
| 33.773585
| 124
| 0.546934
| false
| 3.627953
| false
| false
| false
|
domob1812/huntercore
|
test/functional/auxpow_mining.py
|
1
|
6838
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 Daniel Kraft
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test the merge-mining RPC interface:
# getauxblock, createauxblock, submitauxblock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework import auxpow
class AuxpowMiningTest (BitcoinTestFramework):
def set_test_params (self):
self.num_nodes = 2
def add_options (self, parser):
parser.add_option ("--segwit", dest="segwit", default=False,
action="store_true",
help="Test behaviour with SegWit active")
def run_test (self):
# Enable mock time to be out of IBD.
self.enable_mocktime ()
# Activate segwit if requested.
if self.options.segwit:
self.nodes[0].generate (500)
self.sync_all ()
# Test with getauxblock and createauxblock/submitauxblock.
self.test_getauxblock ()
self.test_create_submit_auxblock ()
def test_common (self, create, submit):
"""
Common test code that is shared between the tests for getauxblock and the
createauxblock / submitauxblock method pair.
"""
# Verify data that can be found in another way.
auxblock = create ()
assert_equal (auxblock['chainid'], 6)
assert_equal (auxblock['algo'], 0)
assert_equal (auxblock['height'], self.nodes[0].getblockcount () + 1)
assert_equal (auxblock['previousblockhash'],
self.nodes[0].getblockhash (auxblock['height'] - 1))
# Calling again should give the same block.
# Try using the (default) algo parameter.
auxblock2 = create (0)
assert_equal (auxblock2, auxblock)
# If we receive a new block, the old hash will be replaced.
self.sync_all ()
self.nodes[1].generate (1)
self.sync_all ()
auxblock2 = create ()
assert auxblock['hash'] != auxblock2['hash']
assert_raises_rpc_error (-8, 'block hash unknown', submit,
auxblock['hash'], "x")
# Invalid format for auxpow.
assert_raises_rpc_error (-1, None, submit,
auxblock2['hash'], "x")
# Invalidate the block again, send a transaction and query for the
# auxblock to solve that contains the transaction.
self.nodes[0].generate (1)
addr = self.nodes[1].getnewaddress ()
txid = self.nodes[0].sendtoaddress (addr, 1)
self.sync_all ()
assert_equal (self.nodes[1].getrawmempool (), [txid])
auxblock = create ()
target = auxpow.reverseHex (auxblock['_target'])
# Compute invalid auxpow.
apow = auxpow.computeAuxpow (auxblock['hash'], target, False)
res = submit (auxblock['hash'], apow)
assert not res
# Compute and submit valid auxpow.
apow = auxpow.computeAuxpow (auxblock['hash'], target, True)
res = submit (auxblock['hash'], apow)
assert res
# Make sure that the block is indeed accepted.
self.sync_all ()
assert_equal (self.nodes[1].getrawmempool (), [])
height = self.nodes[1].getblockcount ()
assert_equal (height, auxblock['height'])
assert_equal (self.nodes[1].getblockhash (height), auxblock['hash'])
# Call getblock and verify the auxpow field.
data = self.nodes[1].getblock (auxblock['hash'])
assert 'auxpow' in data
auxJson = data['auxpow']
assert_equal (auxJson['index'], 0)
assert_equal (auxJson['chainindex'], 0)
assert_equal (auxJson['merklebranch'], [])
assert_equal (auxJson['chainmerklebranch'], [])
assert_equal (auxJson['parentblock'], apow[-160:])
# Also previous blocks should have 'auxpow', since all blocks (also
# those generated by "generate") are merge-mined.
oldHash = self.nodes[1].getblockhash (100)
data = self.nodes[1].getblock (oldHash)
assert 'auxpow' in data
# Check that it paid correctly to the first node.
t = self.nodes[0].listtransactions ("*", 1)
assert_equal (len (t), 1)
t = t[0]
assert_equal (t['category'], "immature")
assert_equal (t['blockhash'], auxblock['hash'])
assert t['generated']
assert_greater_than_or_equal (t['amount'], Decimal ("0.05"))
assert_equal (t['confirmations'], 1)
# Verify the coinbase script. Ensure that it includes the block height
# to make the coinbase tx unique. The expected block height is around
# 200, so that the serialisation of the CScriptNum ends in an extra 00.
# The vector has length 2, which makes up for 02XX00 as the serialised
# height. Check this. (With segwit, the height is different, so we skip
# this for simplicity.)
if not self.options.segwit:
blk = self.nodes[1].getblock (auxblock['hash'])
tx = self.nodes[1].getrawtransaction (blk['tx'][0], 1)
coinbase = tx['vin'][0]['coinbase']
assert_equal ("02%02x00" % auxblock['height'], coinbase[0 : 6])
def test_getauxblock (self):
"""
Test the getauxblock method.
"""
create = self.nodes[0].getauxblock
submit = self.nodes[0].getauxblock
self.test_common (create, submit)
# Ensure that the payout address is changed from one block to the next.
hash1 = auxpow.mineAuxpowBlockWithMethods (create, submit)
hash2 = auxpow.mineAuxpowBlockWithMethods (create, submit)
self.sync_all ()
addr1 = auxpow.getCoinbaseAddr (self.nodes[1], hash1)
addr2 = auxpow.getCoinbaseAddr (self.nodes[1], hash2)
assert addr1 != addr2
info = self.nodes[0].getaddressinfo (addr1)
assert info['ismine']
info = self.nodes[0].getaddressinfo (addr2)
assert info['ismine']
def test_create_submit_auxblock (self):
"""
Test the createauxblock / submitauxblock method pair.
"""
# Check for errors with wrong parameters.
assert_raises_rpc_error (-1, None, self.nodes[0].createauxblock)
assert_raises_rpc_error (-5, "Invalid coinbase payout address",
self.nodes[0].createauxblock,
"this_an_invalid_address")
# Fix a coinbase address and construct methods for it.
coinbaseAddr = self.nodes[0].getnewaddress ()
def create (*algo):
return self.nodes[0].createauxblock (coinbaseAddr, *algo)
submit = self.nodes[0].submitauxblock
# Run common tests.
self.test_common (create, submit)
# Ensure that the payout address is the one which we specify
hash1 = auxpow.mineAuxpowBlockWithMethods (create, submit)
hash2 = auxpow.mineAuxpowBlockWithMethods (create, submit)
self.sync_all ()
addr1 = auxpow.getCoinbaseAddr (self.nodes[1], hash1)
addr2 = auxpow.getCoinbaseAddr (self.nodes[1], hash2)
assert_equal (addr1, coinbaseAddr)
assert_equal (addr2, coinbaseAddr)
if __name__ == '__main__':
AuxpowMiningTest ().main ()
|
mit
| 2,870,474,276,787,400,000
| 35.962162
| 77
| 0.659842
| false
| 3.629512
| true
| false
| false
|
algorhythms/LeetCode
|
276 Paint Fence.py
|
1
|
3181
|
"""
Premium Question
"""
__author__ = 'Daniel'
class Solution(object):
def numWays_oneliner(self, n, k):
return 0 if n < 1 else sum(reduce(lambda F, i: [(k-1)*(F[0]+F[1]), F[0]], xrange(1, n), [k, 0]))
def numWays(self, n, k):
"""
You need to abstract number of colors to binary value (is different color)
Let F1[i] be the number of ways for A[:i] with last two with different colors
F2[i] be the number of ways for A[:i] with last two with same color
F1[i] = (k-1)*(F1[i-1]+F2[i-1])
F2[i] = F1[i-1]
Optimize the space since only depends on i and i-1
:type n: int
:type k: int
:rtype: int
"""
if n < 1:
return 0
num_diff = k
num_same = 0
for _ in xrange(1, n):
num_diff, num_same = (k-1)*(num_diff+num_same), num_diff
return num_diff+num_same
def numWays_MLE2(self, n, k):
"""
DP
Let F[i][j][l] be the number of ways of painting for A[:i] with A[i-1] as color j and A[i-2] as color l
:type n: int
:type k: int
:rtype: int
"""
if n < 1:
return 0
F = [[[0 for _ in xrange(k)] for _ in xrange(k)] for _ in xrange(2)]
EMPTY = 0
for j0 in xrange(k):
F[1][j0][EMPTY] = 1
for i in xrange(2, n+1):
for j0 in xrange(k):
for j1 in xrange(k):
F[i%2][j0][j1] = 0
for j0 in xrange(k):
for j1 in xrange(k):
for j2 in xrange(k):
if i == 2:
F[i%2][j0][j1] = F[(i-1)%2][j1][EMPTY]
elif j1 == j2 and j0 != j1:
F[i%2][j0][j1] += F[(i-1)%2][j1][j2]
elif j1 != j2:
F[i%2][j0][j1] += F[(i-1)%2][j1][j2]
ret = 0
for j0 in xrange(k):
for j1 in xrange(k):
ret += F[n%2][j0][j1]
return ret
def numWays_MLE(self, n, k):
"""
DP
let F[i][j][l] be the number of ways of painting for A[:i] with A[i-1] as color j and A[i-2] as color l
:type n: int
:type k: int
:rtype: int
"""
if n < 1:
return 0
F = [[[0 for _ in xrange(k)] for _ in xrange(k)] for _ in xrange(n+1)]
EMPTY = 0
for j0 in xrange(k):
F[1][j0][EMPTY] = 1
for i in xrange(2, n+1):
for j0 in xrange(k):
for j1 in xrange(k):
for j2 in xrange(k):
if i == 2:
F[i][j0][j1] = F[i-1][j1][EMPTY]
elif j1 == j2 and j0 != j1:
F[i][j0][j1] += F[i-1][j1][j2]
elif j1 != j2:
F[i][j0][j1] += F[i-1][j1][j2]
ret = 0
for j0 in xrange(k):
for j1 in xrange(k):
ret += F[n][j0][j1]
return ret
if __name__ == "__main__":
assert Solution().numWays(3, 2) == 6
|
mit
| 631,004,810,354,814,800
| 26.188034
| 111
| 0.408362
| false
| 3.162028
| false
| false
| false
|
Jc2k/libcloudcore
|
libcloudcore/driver.py
|
1
|
2268
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from .error_parser import ErrorParser
from .validation import Validation
logger = logging.getLogger(__name__)
class Driver(ErrorParser, Validation):
def _get_params_for(self, target, shape, params):
result = {}
for member in shape.iter_members():
if member.target == target and member.name in params:
result[member.name] = params[member.name]
return result
def before_call(self, request, operation, **params):
request.scheme = operation.http['scheme']
request.host = operation.http['host']
request.port = operation.http['port']
request.uri = operation.http['uri'].lstrip("/").format(**params)
request.method = operation.http['method']
request.query = self._get_params_for(
"query",
operation.input_shape,
params,
)
super(Driver, self).before_call(request, operation, **params)
logger.debug("{}: {}".format(request.method, request.uri))
logger.debug(request.body)
logger.debug(request.headers)
def after_call(self, operation, request, response):
logger.debug(response.status_code)
logger.debug(response.body)
result = {
'Metadata': {
'StatusCode': response.status_code
}
}
result.update(
super(Driver, self).after_call(operation, request, response)
)
return result
|
apache-2.0
| 6,896,000,010,012,746,000
| 34.4375
| 74
| 0.659171
| false
| 4.369942
| false
| false
| false
|
lailongwei/llbc
|
wrap/pyllbc/script/comm/Timer.py
|
1
|
3690
|
# -*- coding: utf-8 -*-
import time as _time
from datetime import datetime as _dt
from datetime import tzinfo as _tzinfo
from datetime import timedelta as _timedelta
import llbc
class _pyllbcGMT(_tzinfo):
"""llbc library GMT tzinfo class encapsulation"""
_delta = _timedelta(0)
def utcoffset(self, dt):
return self._delta
def tzname(self):
return "GMT+0"
def dst(self, dt):
return None
class _pyllbcLOCAL(_tzinfo):
"""llbc library LOCAL tzinfo class encapsulation"""
_delta = _timedelta(seconds=-_time.timezone)
_tzname = 'GMT +{}'.format(-_time.timezone / 3600) if _time.timezone < 0 else \
('GMT -{}'.format(_time.timezone / 3600) if _time.timezone > 0 else 'GMT +0')
def utcoffset(self, dt):
return self._delta
def tzname(self):
return self._tzname
def dst(self, dt):
return None
class pyllbcTimer(object):
"""llbc library timer class encapsulation"""
INVALID_TIMER_ID = 0
"""llbc library timer class encapsulation"""
def __init__(self, ontimeout, oncancel=None):
if not callable(ontimeout):
raise TypeError("'ontimeout' obj must callable")
if not callable(oncancel):
self.__c_obj = llbc.inl.NewPyTimer(self, ontimeout)
else:
self.__c_obj = llbc.inl.NewPyTimer(self, ontimeout, oncancel)
def __del__(self):
llbc.inl.DelPyTimer(self.__c_obj)
@property
def timerid(self):
return llbc.inl.PyTimerGetTimerId(self.__c_obj)
@property
def duetime(self):
return llbc.inl.PyTimerGetDueTime(self.__c_obj)
@property
def period(self):
return llbc.inl.PyTimerGetPeriod(self.__c_obj)
@property
def isscheduling(self):
return llbc.inl.PyTimerIsScheduling(self.__c_obj)
@property
def istimeouting(self):
return llbc.inl.PyTimerIsTimeouting(self.__c_obj)
@property
def iscancelling(self):
return llbc.inl.PyTimerIsCancelling(self.__c_obj)
@property
def ignored_dead_ref(self):
return llbc.inl.PyTimerIsIgnoredDeadRef(self.__c_obj)
@ignored_dead_ref.setter
def ignored_dead_ref(self, flag):
llbc.inl.PyTimerSetIgnoredDeadRef(self.__c_obj, flag)
def schedule(self, duetime, period=None):
"""Schedule timer"""
if period is None:
period = duetime
llbc.inl.PyTimerSchedule(self.__c_obj, duetime, period)
def schedule2(self, duetime, period, fmtstr='%Y-%m-%d %H:%M:%S'):
"""
Schedule timer, arguments is datetime type object, str type object, or numeric type object,
if duetime type is datetime type object, will use it as expire time.
if duetime type is str type, will convert to datetime type to use.
if duetime type is numeric type, will as timestamp to use, as seconds.
If not specified the tzinfo, llbc will automatic use local tzinfo to fill.
"""
if isinstance(duetime, unicode):
duetime = duetime.decode('utf-8')
if isinstance(duetime, str):
duetime = _dt.strptime(duetime, fmtstr)
if isinstance(duetime, _dt):
ts = _time.mktime(duetime.timetuple()) + duetime.microsecond / 1000000.0
else:
ts = duetime
now = _time.time()
if ts < now:
raise llbc.error('duetime[{}] < nowtime[{}], schedule timer failed'.format(duetime, _dt.fromtimestamp(now)))
self.schedule(int((ts - now) * 1000), int(period * 1000))
def cancel(self):
"""Cancel timer"""
llbc.inl.PyTimerCancel(self.__c_obj)
llbc.Timer = pyllbcTimer
|
mit
| -2,180,250,508,961,024,300
| 29.213115
| 120
| 0.620867
| false
| 3.614104
| false
| false
| false
|
juanchopanza/NeuroM
|
neurom/morphmath.py
|
1
|
12380
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Mathematical and geometrical functions used to compute morphometrics'''
import math
from itertools import combinations
import numpy as np
from neurom.core.dataformat import COLS
def vector(p1, p2):
'''compute vector between two 3D points
Args:
p1, p2: indexable objects with
indices 0, 1, 2 corresponding to 3D cartesian coordinates.
Returns:
3-vector from p1 - p2
'''
return np.subtract(p1[COLS.XYZ], p2[COLS.XYZ])
def linear_interpolate(p1, p2, fraction):
'''Returns the point p satisfying: p1 + fraction * (p2 - p1)'''
return np.array((p1[0] + fraction * (p2[0] - p1[0]),
p1[1] + fraction * (p2[1] - p1[1]),
p1[2] + fraction * (p2[2] - p1[2])))
def interpolate_radius(r1, r2, fraction):
'''Calculate the radius that corresponds to a point P that lies at a fraction of the length
of a cut cone P1P2 where P1, P2 are the centers of the circles that bound the shape with radii
r1 and r2 respectively.
Args:
r1: float
Radius of the first node of the segment.
r2: float
Radius of the second node of the segment
fraction: float
The fraction at which the interpolated radius is calculated.
Returns: float
The interpolated radius.
Note: The fraction is assumed from point P1, not from point P2.
'''
def f(a, b, c):
''' Returns the length of the interpolated radius calculated
using similar triangles.
'''
return a + c * (b - a)
return f(r2, r1, 1. - fraction) if r1 > r2 else f(r1, r2, fraction)
def path_fraction_id_offset(points, fraction, relative_offset=False):
'''Find the segment which corresponds to the fraction
of the path length along the piecewise linear curve which
is constructed from the set of points.
Args:
points: an iterable of indexable objects with indices
0, 1, 2 correspoding to 3D cartesian coordinates
fraction: path length fraction (0.0 <= fraction <= 1.0)
relative_offset: return absolute or relative segment distance
Returns:
(segment ID, segment offset) pair.
'''
if not (0. <= fraction <= 1.0):
raise ValueError("Invalid fraction: %.3f" % fraction)
pts = np.array(points)[:, COLS.XYZ]
lengths = np.linalg.norm(np.diff(pts, axis=0), axis=1)
cum_lengths = np.cumsum(lengths)
offset = cum_lengths[-1] * fraction
seg_id = np.argmin(cum_lengths < offset)
if seg_id > 0:
offset -= cum_lengths[seg_id - 1]
if relative_offset:
offset /= lengths[seg_id]
return seg_id, offset
def path_fraction_point(points, fraction):
'''Computes the point which corresponds to the fraction
of the path length along the piecewise linear curve which
is constructed from the set of points.
Args:
points: an iterable of indexable objects with indices
0, 1, 2 correspoding to 3D cartesian coordinates
fraction: path length fraction (0 <= fraction <= 1)
Returns:
The 3D coordinates of the aforementioned point
'''
seg_id, offset = path_fraction_id_offset(points, fraction, relative_offset=True)
return linear_interpolate(points[seg_id], points[seg_id + 1], offset)
def scalar_projection(v1, v2):
'''compute the scalar projection of v1 upon v2
Args:
v1, v2: iterable
indices 0, 1, 2 corresponding to cartesian coordinates
Returns:
3-vector of the projection of point p onto the direction of v
'''
return np.dot(v1, v2) / np.linalg.norm(v2)
def vector_projection(v1, v2):
'''compute the vector projection of v1 upon v2
Args:
v1, v2: iterable
indices 0, 1, 2 corresponding to cartesian coordinates
Returns:
3-vector of the projection of point p onto the direction of v
'''
return scalar_projection(v1, v2) * v2 / np.linalg.norm(v2)
def dist_point_line(p, l1, l2):
'''compute the orthogonal distance between from the line that goes through
the points l1, l2 and the point p
Args:
p, l1, l2 : iterable
point
indices 0, 1, 2 corresponding to cartesian coordinates
'''
cross_prod = np.cross(l2 - l1, p - l1)
return np.linalg.norm(cross_prod) / np.linalg.norm(l2 - l1)
def point_dist2(p1, p2):
'''compute the square of the euclidian distance between two 3D points
Args:
p1, p2: indexable objects with
indices 0, 1, 2 corresponding to 3D cartesian coordinates.
Returns:
The square of the euclidian distance between the points.
'''
v = vector(p1, p2)
return np.dot(v, v)
def point_dist(p1, p2):
'''compute the euclidian distance between two 3D points
Args:
p1, p2: indexable objects with
indices 0, 1, 2 corresponding to 3D cartesian coordinates.
Returns:
The euclidian distance between the points.
'''
return np.sqrt(point_dist2(p1, p2))
def angle_3points(p0, p1, p2):
''' compute the angle in radians between three 3D points
Calculated as the angle between p1-p0 and p2-p0.
Args:
p0, p1, p2: indexable objects with
indices 0, 1, 2 corresponding to 3D cartesian coordinates.
Returns:
Angle in radians between (p1-p0) and (p2-p0).
0.0 if p0==p1 or p0==p2.
'''
vec1 = vector(p1, p0)
vec2 = vector(p2, p0)
return math.atan2(np.linalg.norm(np.cross(vec1, vec2)),
np.dot(vec1, vec2))
def polygon_diameter(points):
''' Compute the maximun euclidian distance between any two points
in a list of points
'''
return max(point_dist(p0, p1) for (p0, p1) in combinations(points, 2))
def average_points_dist(p0, p_list):
"""
Computes the average distance between a list of points
and a given point p0.
"""
return np.mean(list(point_dist(p0, p1) for p1 in p_list))
def path_distance(points):
"""
Compute the path distance from given set of points
"""
vecs = np.diff(points, axis=0)[:, :3]
d2 = [np.dot(p, p) for p in vecs]
return np.sum(np.sqrt(d2))
def segment_length(seg):
'''Return the length of a segment.
Returns: Euclidian distance between centres of points in seg
'''
return point_dist(seg[0], seg[1])
def segment_length2(seg):
'''Return the square of the length of a segment.
Returns: Square of Euclidian distance between centres of points in seg
'''
return point_dist2(seg[0], seg[1])
def segment_radius(seg):
'''Return the mean radius of a segment
Returns: arithmetic mean of the radii of the points in seg
'''
return (seg[0][COLS.R] + seg[1][COLS.R]) / 2.
def segment_x_coordinate(seg):
'''Return the mean x coordinate of a segment
Returns: arithmetic mean of the x coordinates of the points in seg
'''
return (seg[0][COLS.X] + seg[1][COLS.X]) / 2.
def segment_y_coordinate(seg):
'''Return the mean y coordinate of a segment
Returns: arithmetic mean of the y coordinates of the points in seg
'''
return (seg[0][COLS.Y] + seg[1][COLS.Y]) / 2.
def segment_z_coordinate(seg):
'''Return the mean z coordinate of a segment
Returns: arithmetic mean of the z coordinates of the points in seg
'''
return (seg[0][COLS.Z] + seg[1][COLS.Z]) / 2.
def segment_radial_dist(seg, pos):
'''Return the radial distance of a tree segment to a given point
The radial distance is the euclidian distance between the mid-point of
the segment and the point in question.
Parameters:
seg: tree segment
pos: origin to which distances are measured. It must have at lease 3
components. The first 3 components are (x, y, z).
'''
return point_dist(pos, np.divide(np.add(seg[0], seg[1]), 2.0))
def segment_area(seg):
'''Compute the surface area of a segment.
Approximated as a conical frustum. Does not include the surface area
of the bounding circles.
'''
r0 = seg[0][COLS.R]
r1 = seg[1][COLS.R]
h2 = point_dist2(seg[0], seg[1])
return math.pi * (r0 + r1) * math.sqrt((r0 - r1) ** 2 + h2)
def segment_volume(seg):
'''Compute the volume of a segment.
Approximated as a conical frustum.
'''
r0 = seg[0][COLS.R]
r1 = seg[1][COLS.R]
h = point_dist(seg[0], seg[1])
return math.pi * h * ((r0 * r0) + (r0 * r1) + (r1 * r1)) / 3.0
def taper_rate(p0, p1):
'''Compute the taper rate between points p0 and p1
Args:
p0, p1: iterables with first 4 components containing (x, y, z, r)
Returns:
The taper rate, defined as the absolute value of the difference in
the diameters of p0 and p1 divided by the euclidian distance
between them.
'''
return 2 * abs(p0[COLS.R] - p1[COLS.R]) / point_dist(p0, p1)
def segment_taper_rate(seg):
'''Compute the taper rate of a segment
Returns:
The taper rate, defined as the absolute value of the difference in
the diameters of the segment's two points divided by the euclidian
distance between them.
'''
return taper_rate(seg[0], seg[1])
def pca(points):
'''
Estimate the principal components of the covariance on the given point cloud
Input
A numpy array of points of the form ((x1,y1,z1), (x2, y2, z2)...)
Ouptut
Eigenvalues and respective eigenvectors
'''
return np.linalg.eig(np.cov(points.transpose()))
def sphere_area(r):
''' Compute the area of a sphere with radius r
'''
return 4. * math.pi * r ** 2
# Useful alias for path_distance
section_length = path_distance
def principal_direction_extent(points):
'''Calculate the extent of a set of 3D points.
The extent is defined as the maximum distance between
the projections on the principal directions of the covariance matrix
of the points.
Parameter:
points : a 2D numpy array of points
Returns:
extents : the extents for each of the eigenvectors of the cov matrix
eigs : eigenvalues of the covariance matrix
eigv : respective eigenvectors of the covariance matrix
'''
# center the points around 0.0
points = np.copy(points)
points -= np.mean(points, axis=0)
# principal components
_, eigv = pca(points)
extent = np.zeros(3)
for i in range(eigv.shape[1]):
# orthogonal projection onto the direction of the v component
scalar_projs = np.sort(np.array([np.dot(p, eigv[:, i]) for p in points]))
extent[i] = scalar_projs[-1]
if scalar_projs[0] < 0.:
extent -= scalar_projs[0]
return extent
|
bsd-3-clause
| -7,683,719,952,775,599,000
| 29.79602
| 98
| 0.656704
| false
| 3.622001
| false
| false
| false
|
karolyi/forum-django
|
backend/migrate_to_django/markdownparser.py
|
1
|
1130
|
import html
from bs4.element import Tag
from forum.base.models import Comment
from html2text import html2text
def markdown_smilies(img_tag: Tag):
img_src = img_tag.get('src', '')
if img_src.startswith('/static/images/smiliereplace/'):
img_alt = img_tag.get('alt', '')
img_tag.replace_with(img_alt)
return
if img_src.startswith('/static/images/smilies/'):
img_tag.replace_with('[SMIL:%s]' % img_src[22:])
return
def replace_images(content: Tag):
for img_tag in content.select('img'):
markdown_smilies(img_tag)
def parse_to_markdown(content: Tag, comment_item: Comment, md_property: str):
replace_images(content)
for embed_item in content.select('div.embedded-player'):
embed_item.replace_with(embed_item.md_url)
content_md_html = content.body.decode_contents()\
.replace('></source>', '/>')\
.replace('\r\n', '\n')
md_content = html2text(content_md_html, bodywidth=0)
# Convert 2 BRs to Ps
md_content = html.unescape(md_content).replace(' \n \n', '\n\n')
setattr(comment_item, md_property, md_content)
|
mit
| -5,486,757,108,375,557,000
| 28.736842
| 77
| 0.643363
| false
| 3.247126
| false
| false
| false
|
roac-monitoring/roac-agent
|
roac/logs.py
|
1
|
1388
|
# vim: set fileencoding=utf-8 :
from __future__ import absolute_import
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
def log_to_stderr(logger=None, level = logging.DEBUG):
"""Configures the python log system to log to stderr
logger: Logger to configure. Pass none to use the root logger.
Makes the root logger log to stderr and sets up a formatter that prints
the date, loglevel and logger name
"""
if logger is None:
logger = logging.getLogger()
else:
logger = logging.getLogger(logger)
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(level)
formatter = logging.Formatter(
'%(asctime)s [%(levelname)s|%(name)s] %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
def setup_logging(app):
"""Setup the python logging system according to whether the given app
is in debug mode
"""
if app.debug:
# Configure the root logger to output on stderr
log_to_stderr()
else:
# Configure the package logger to use NullHandler and avoid errors.
# The application should set up a proper handler if it wants logging
# in production.
pkg_logger = logging.getLogger(__package__)
handler = NullHandler()
pkg_logger.addHandler(handler)
|
bsd-3-clause
| -5,082,369,550,257,231,000
| 27.916667
| 76
| 0.669308
| false
| 4.3375
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.