repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
FedoraScientific/salome-kernel
|
bin/appliskel/salome_tester/salome_instance.py
|
1
|
3184
|
# Copyright (C) 2015-2016 CEA/DEN, EDF R&D, OPEN CASCADE
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
import sys
import os
# Example of args:
# args=["--gui", "--show-desktop=1", "--splash=0"]
# args=["--terminal","--modules=MED,PARAVIS,GUI"]
class SalomeInstance(object):
def __init__(self):
self.port = None
#
def get_port(self):
return self.port
#
@staticmethod
def start(shutdown_servers=False, with_gui=False, args=[]):
import tempfile
log = tempfile.NamedTemporaryFile(suffix='_nsport.log', delete=False)
log.close()
instance_args = [
"--ns-port-log=%s"%log.name,
"--shutdown-servers=%d"%shutdown_servers
] + args
salome_instance = SalomeInstance()
salome_instance.__run(args=instance_args, with_gui=with_gui)
with open(log.name) as f:
salome_instance.port = int(f.readline())
os.remove(log.name)
return salome_instance
#
def __run(self, args=None, with_gui=False):
if args is None:
args = []
sys.argv = ['runSalome'] + args
if with_gui:
# :WARNING: NOT TESTED YET
sys.argv += ["--gui"]
sys.argv += ["--show-desktop=1"]
sys.argv += ["--splash=0"]
#sys.argv += ["--standalone=study"]
#sys.argv += ["--embedded=SalomeAppEngine,cppContainer,registry,moduleCatalog"]
else:
sys.argv += ["--terminal"]
#sys.argv += ["--shutdown-servers=1"]
#sys.argv += ["--modules=MED,PARAVIS,GUI"]
pass
import setenv
setenv.main(True)
import runSalome
runSalome.runSalome()
if not with_gui:
import salome
salome.salome_init()
session_server = salome.naming_service.Resolve('/Kernel/Session')
if session_server:
session_server.emitMessage("connect_to_study")
session_server.emitMessage("activate_viewer/ParaView")
pass
#
def stop(self):
from multiprocessing import Process
from killSalomeWithPort import killMyPort
import tempfile
with tempfile.NamedTemporaryFile():
p = Process(target = killMyPort, args=(self.port,))
p.start()
p.join()
pass
#
#
if __name__ == "__main__":
print "##### Start instance..."
salome_instance = SalomeInstance.start()
port = salome_instance.get_port()
print "##### ...instance started on port %s"%port
print "##### Terminate instance running on port %s"%port
salome_instance.stop()
#
|
lgpl-2.1
| -4,801,765,214,694,821,000
| 27.428571
| 85
| 0.652952
| false
| 3.593679
| false
| false
| false
|
dtroyer/python-openstacksdk
|
openstack/network/v2/router.py
|
1
|
5820
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.network import network_service
from openstack.network.v2 import tag
from openstack import resource
from openstack import utils
class Router(resource.Resource, tag.TagMixin):
resource_key = 'router'
resources_key = 'routers'
base_path = '/routers'
service = network_service.NetworkService()
# capabilities
allow_create = True
allow_get = True
allow_update = True
allow_delete = True
allow_list = True
# NOTE: We don't support query on datetime, list or dict fields
_query_mapping = resource.QueryParameters(
'description', 'flavor_id', 'name', 'status',
is_admin_state_up='admin_state_up',
is_distributed='distributed',
is_ha='ha',
project_id='tenant_id',
**tag.TagMixin._tag_query_parameters
)
# Properties
#: Availability zone hints to use when scheduling the router.
#: *Type: list of availability zone names*
availability_zone_hints = resource.Body('availability_zone_hints',
type=list)
#: Availability zones for the router.
#: *Type: list of availability zone names*
availability_zones = resource.Body('availability_zones', type=list)
#: Timestamp when the router was created.
created_at = resource.Body('created_at')
#: The router description.
description = resource.Body('description')
#: The ``network_id``, for the external gateway. *Type: dict*
external_gateway_info = resource.Body('external_gateway_info', type=dict)
#: The ID of the flavor.
flavor_id = resource.Body('flavor_id')
#: The administrative state of the router, which is up ``True``
#: or down ``False``. *Type: bool*
is_admin_state_up = resource.Body('admin_state_up', type=bool)
#: The distributed state of the router, which is distributed ``True``
#: or not ``False``. *Type: bool* *Default: False*
is_distributed = resource.Body('distributed', type=bool, default=False)
#: The highly-available state of the router, which is highly available
#: ``True`` or not ``False``. *Type: bool* *Default: False*
is_ha = resource.Body('ha', type=bool, default=False)
#: The router name.
name = resource.Body('name')
#: The ID of the project this router is associated with.
project_id = resource.Body('tenant_id')
#: Revision number of the router. *Type: int*
revision_number = resource.Body('revision', type=int)
#: The extra routes configuration for the router.
routes = resource.Body('routes', type=list)
#: The router status.
status = resource.Body('status')
#: Timestamp when the router was created.
updated_at = resource.Body('updated_at')
def add_interface(self, session, **body):
"""Add an internal interface to a logical router.
:param session: The session to communicate through.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param dict body: The body requested to be updated on the router
:returns: The body of the response as a dictionary.
"""
url = utils.urljoin(self.base_path, self.id, 'add_router_interface')
resp = session.put(url, json=body)
return resp.json()
def remove_interface(self, session, **body):
"""Remove an internal interface from a logical router.
:param session: The session to communicate through.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param dict body: The body requested to be updated on the router
:returns: The body of the response as a dictionary.
"""
url = utils.urljoin(self.base_path, self.id, 'remove_router_interface')
resp = session.put(url, json=body)
return resp.json()
def add_gateway(self, session, **body):
"""Add an external gateway to a logical router.
:param session: The session to communicate through.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param dict body: The body requested to be updated on the router
:returns: The body of the response as a dictionary.
"""
url = utils.urljoin(self.base_path, self.id,
'add_gateway_router')
resp = session.put(url, json=body)
return resp.json()
def remove_gateway(self, session, **body):
"""Remove an external gateway from a logical router.
:param session: The session to communicate through.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param dict body: The body requested to be updated on the router
:returns: The body of the response as a dictionary.
"""
url = utils.urljoin(self.base_path, self.id,
'remove_gateway_router')
resp = session.put(url, json=body)
return resp.json()
class L3AgentRouter(Router):
resource_key = 'router'
resources_key = 'routers'
base_path = '/agents/%(agent_id)s/l3-routers'
resource_name = 'l3-router'
service = network_service.NetworkService()
# capabilities
allow_create = False
allow_retrieve = True
allow_update = False
allow_delete = False
allow_list = True
# NOTE: No query parameter is supported
|
apache-2.0
| -3,754,145,010,826,601,500
| 38.060403
| 79
| 0.656873
| false
| 4.124734
| false
| false
| false
|
dparks1134/GenomeTreeTk
|
scripts/checkm_compare.py
|
1
|
4871
|
#!/usr/bin/env python
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__prog_name__ = 'checkm_compare.py'
__prog_desc__ = 'compare CheckM estimates'
__author__ = 'Donovan Parks'
__copyright__ = 'Copyright 2018'
__credits__ = ['Donovan Parks']
__license__ = 'GPL3'
__version__ = '0.0.1'
__maintainer__ = 'Donovan Parks'
__email__ = 'donovan.parks@gmail.com'
__status__ = 'Development'
import os
import sys
import argparse
import tempfile
import ntpath
import shutil
class Compare(object):
"""Compare CheckM estimates."""
def __init__(self):
"""Initialization."""
pass
def run(self, qc_failed_file, checkm_qa_files, output_file):
"""compare CheckM estimates."""
orig_estimates = {}
with open(qc_failed_file) as f:
header = f.readline().strip().split('\t')
acc_index = header.index('Accession')
comp_index = header.index('Completeness (%)')
cont_index = header.index('Contamination (%)')
for line in f:
line_split = line.strip().split('\t')
gid = line_split[acc_index]
comp = float(line_split[comp_index])
cont = float(line_split[cont_index])
orig_estimates[gid] = (comp, cont)
new_estimates = {}
with open(checkm_qa_files) as f:
header = f.readline().strip().split('\t')
comp_index = header.index('Completeness')
cont_index = header.index('Contamination')
for line in f:
line_split = line.strip().split('\t')
gid = line_split[0].replace('_ncbi_proteins', '')
comp = float(line_split[comp_index])
cont = float(line_split[cont_index])
new_estimates[gid] = (comp, cont)
fout = open(output_file, 'w')
fout.write('Accession\tOriginal completeness\tNew completeness\tOriginal contamination\tNew contamination\n')
for gid in new_estimates:
orig_comp, orig_cont = orig_estimates[gid]
new_comp, new_cont = new_estimates[gid]
orig_quality = orig_comp - 5*orig_cont
if orig_quality >= 50:
continue
new_quality = new_comp - 5*new_cont
if new_quality < 50:
continue
if (new_comp - orig_comp > 5
or new_cont - orig_cont < -1):
print(gid, orig_comp, new_comp, orig_cont, new_cont)
fout.write('%s\t%.2f\t%.2f\t%.2f\t%.2f\n' % (gid, orig_comp, new_comp, orig_cont, new_cont))
fout.close()
if __name__ == '__main__':
print(__prog_name__ + ' v' + __version__ + ': ' + __prog_desc__)
print(' by ' + __author__ + ' (' + __email__ + ')' + '\n')
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('qc_failed_file', help='file indicating genomes that failed QC')
parser.add_argument('checkm_qa_files', help='file with alternative CheckM estimates')
parser.add_argument('output_file', help='output directory')
args = parser.parse_args()
try:
p = Compare()
p.run(args.qc_failed_file, args.checkm_qa_files, args.output_file)
except SystemExit:
print("\nControlled exit resulting from an unrecoverable error or warning.")
except:
print("\nUnexpected error:", sys.exc_info()[0])
raise
|
gpl-3.0
| -6,658,379,957,902,644,000
| 39.256198
| 117
| 0.49497
| false
| 4.235652
| false
| false
| false
|
pablocscode/TFG-CAEBAT
|
simulacion_basica_v1.py
|
1
|
2696
|
# -*- coding: utf-8 -*-
'''
Creado por Pablo Castro
28/03/17
Objetivo:
Automatizar todo el proceso de simulacion desde el terminal de linux.
Funciona para CAEBAT 1.0
Como usarlo:
1- Se situa el script en la carpeta 'examples'.
2- Se crea una carpeta llamada 'Mis simulaciones' en el escritorio.
3- Se ejecuta normalmente desde la terminal: 'python simulacion_basica.py'
4- El programa te pide el nombre exacto de la carpeta a simular.
Acciones:
-Ejecutar la simulacion de un caso que elijamos
-Copiar la carpeta con los resultados de la simulaciones en otra carpeta
situada en el escritorio y ponerle un nombre segun el caso simulado y la fecha de simulacion.
-Despues de realizar esto, eliminamos las carpetas generadas por la simulacion
en la carpeta ejemplo.
'''
import os
import shutil
from datetime import datetime
def copiar_simulacion(Nombre_simulacion):
#Calculamos la fecha en la que la carpeta fue creada
fecha = os.stat(Nombre_simulacion).st_mtime
#La convertimos a un formato legible y nombramos la nueva carpeta
nombre_carpeta_copia = Nombre_simulacion + ' ' + str(datetime.fromtimestamp(fecha))
shutil.copytree(Nombre_simulacion,nombre_carpeta_copia)
shutil.move(nombre_carpeta_copia,'/home/batsim/Desktop/Mis simulaciones/')
def eliminar_carpetas(Nombre_simulacion):
shutil.rmtree('/home/batsim/caebat/vibe/examples/'+Nombre_simulacion+'/simulation_log')
shutil.rmtree('/home/batsim/caebat/vibe/examples/'+Nombre_simulacion+'/simulation_results')
shutil.rmtree('/home/batsim/caebat/vibe/examples/'+Nombre_simulacion+'/simulation_setup')
shutil.rmtree('/home/batsim/caebat/vibe/examples/'+Nombre_simulacion+'/work')
#Seleccionamos desde el terminal nuestra carpeta de simulacion
print('Introduce el nombre de la carpeta que quieres simular:')
nombre = raw_input()
#Seleccionamos el archivo .conf que vamos a simular
if nombre == 'case2':
modelo = 'thermal_electrical_chartran_cell_twoway.conf'
elif nombre == 'case3':
modelo = 'thermal_electrical_chartran_battery_twoway.conf'
elif nombre == 'case6':
modelo = 'thermal_electrical_chartran_farasis.conf'
elif nombre == 'case7':
modelo = 'thermal_electrical_chartran_module_4P.conf'
else:
print('Error al introducir el nombre de la carpeta')
quit()
#Cambiamos el path a la carpeta seleccionada
os.chdir('/home/batsim/caebat/vibe/examples/'+nombre)
#Ejectuamos la simulacion
os.system('/home/batsim/caebat/oas/install/bin/ips.py --simulation='+modelo+' --log=temp.log --platform=../config/batsim.conf -a')
os.chdir('/home/batsim/caebat/vibe/examples')
copiar_simulacion(nombre)
eliminar_carpetas(nombre)
print('Fin de la simulación')
|
gpl-3.0
| -8,891,207,074,465,465,000
| 37.057971
| 130
| 0.751763
| false
| 2.629268
| false
| false
| false
|
indashnet/InDashNet.Open.UN2000
|
android/external/llvm/utils/lit/lit/ShUtil.py
|
1
|
12179
|
from __future__ import absolute_import
import itertools
import lit.Util
from lit.ShCommands import Command, Pipeline, Seq
class ShLexer:
def __init__(self, data, win32Escapes = False):
self.data = data
self.pos = 0
self.end = len(data)
self.win32Escapes = win32Escapes
def eat(self):
c = self.data[self.pos]
self.pos += 1
return c
def look(self):
return self.data[self.pos]
def maybe_eat(self, c):
"""
maybe_eat(c) - Consume the character c if it is the next character,
returning True if a character was consumed. """
if self.data[self.pos] == c:
self.pos += 1
return True
return False
def lex_arg_fast(self, c):
# Get the leading whitespace free section.
chunk = self.data[self.pos - 1:].split(None, 1)[0]
# If it has special characters, the fast path failed.
if ('|' in chunk or '&' in chunk or
'<' in chunk or '>' in chunk or
"'" in chunk or '"' in chunk or
';' in chunk or '\\' in chunk):
return None
self.pos = self.pos - 1 + len(chunk)
return chunk
def lex_arg_slow(self, c):
if c in "'\"":
str = self.lex_arg_quoted(c)
else:
str = c
while self.pos != self.end:
c = self.look()
if c.isspace() or c in "|&;":
break
elif c in '><':
# This is an annoying case; we treat '2>' as a single token so
# we don't have to track whitespace tokens.
# If the parse string isn't an integer, do the usual thing.
if not str.isdigit():
break
# Otherwise, lex the operator and convert to a redirection
# token.
num = int(str)
tok = self.lex_one_token()
assert isinstance(tok, tuple) and len(tok) == 1
return (tok[0], num)
elif c == '"':
self.eat()
str += self.lex_arg_quoted('"')
elif c == "'":
self.eat()
str += self.lex_arg_quoted("'")
elif not self.win32Escapes and c == '\\':
# Outside of a string, '\\' escapes everything.
self.eat()
if self.pos == self.end:
lit.Util.warning(
"escape at end of quoted argument in: %r" % self.data)
return str
str += self.eat()
else:
str += self.eat()
return str
def lex_arg_quoted(self, delim):
str = ''
while self.pos != self.end:
c = self.eat()
if c == delim:
return str
elif c == '\\' and delim == '"':
# Inside a '"' quoted string, '\\' only escapes the quote
# character and backslash, otherwise it is preserved.
if self.pos == self.end:
lit.Util.warning(
"escape at end of quoted argument in: %r" % self.data)
return str
c = self.eat()
if c == '"': #
str += '"'
elif c == '\\':
str += '\\'
else:
str += '\\' + c
else:
str += c
lit.Util.warning("missing quote character in %r" % self.data)
return str
def lex_arg_checked(self, c):
pos = self.pos
res = self.lex_arg_fast(c)
end = self.pos
self.pos = pos
reference = self.lex_arg_slow(c)
if res is not None:
if res != reference:
raise ValueError("Fast path failure: %r != %r" % (
res, reference))
if self.pos != end:
raise ValueError("Fast path failure: %r != %r" % (
self.pos, end))
return reference
def lex_arg(self, c):
return self.lex_arg_fast(c) or self.lex_arg_slow(c)
def lex_one_token(self):
"""
lex_one_token - Lex a single 'sh' token. """
c = self.eat()
if c == ';':
return (c,)
if c == '|':
if self.maybe_eat('|'):
return ('||',)
return (c,)
if c == '&':
if self.maybe_eat('&'):
return ('&&',)
if self.maybe_eat('>'):
return ('&>',)
return (c,)
if c == '>':
if self.maybe_eat('&'):
return ('>&',)
if self.maybe_eat('>'):
return ('>>',)
return (c,)
if c == '<':
if self.maybe_eat('&'):
return ('<&',)
if self.maybe_eat('>'):
return ('<<',)
return (c,)
return self.lex_arg(c)
def lex(self):
while self.pos != self.end:
if self.look().isspace():
self.eat()
else:
yield self.lex_one_token()
###
class ShParser:
def __init__(self, data, win32Escapes = False, pipefail = False):
self.data = data
self.pipefail = pipefail
self.tokens = ShLexer(data, win32Escapes = win32Escapes).lex()
def lex(self):
for item in self.tokens:
return item
return None
def look(self):
token = self.lex()
if token is not None:
self.tokens = itertools.chain([token], self.tokens)
return token
def parse_command(self):
tok = self.lex()
if not tok:
raise ValueError("empty command!")
if isinstance(tok, tuple):
raise ValueError("syntax error near unexpected token %r" % tok[0])
args = [tok]
redirects = []
while 1:
tok = self.look()
# EOF?
if tok is None:
break
# If this is an argument, just add it to the current command.
if isinstance(tok, str):
args.append(self.lex())
continue
# Otherwise see if it is a terminator.
assert isinstance(tok, tuple)
if tok[0] in ('|',';','&','||','&&'):
break
# Otherwise it must be a redirection.
op = self.lex()
arg = self.lex()
if not arg:
raise ValueError("syntax error near token %r" % op[0])
redirects.append((op, arg))
return Command(args, redirects)
def parse_pipeline(self):
negate = False
commands = [self.parse_command()]
while self.look() == ('|',):
self.lex()
commands.append(self.parse_command())
return Pipeline(commands, negate, self.pipefail)
def parse(self):
lhs = self.parse_pipeline()
while self.look():
operator = self.lex()
assert isinstance(operator, tuple) and len(operator) == 1
if not self.look():
raise ValueError(
"missing argument to operator %r" % operator[0])
# FIXME: Operator precedence!!
lhs = Seq(lhs, operator[0], self.parse_pipeline())
return lhs
###
import unittest
class TestShLexer(unittest.TestCase):
def lex(self, str, *args, **kwargs):
return list(ShLexer(str, *args, **kwargs).lex())
def test_basic(self):
self.assertEqual(self.lex('a|b>c&d<e;f'),
['a', ('|',), 'b', ('>',), 'c', ('&',), 'd',
('<',), 'e', (';',), 'f'])
def test_redirection_tokens(self):
self.assertEqual(self.lex('a2>c'),
['a2', ('>',), 'c'])
self.assertEqual(self.lex('a 2>c'),
['a', ('>',2), 'c'])
def test_quoting(self):
self.assertEqual(self.lex(""" 'a' """),
['a'])
self.assertEqual(self.lex(""" "hello\\"world" """),
['hello"world'])
self.assertEqual(self.lex(""" "hello\\'world" """),
["hello\\'world"])
self.assertEqual(self.lex(""" "hello\\\\world" """),
["hello\\world"])
self.assertEqual(self.lex(""" he"llo wo"rld """),
["hello world"])
self.assertEqual(self.lex(""" a\\ b a\\\\b """),
["a b", "a\\b"])
self.assertEqual(self.lex(""" "" "" """),
["", ""])
self.assertEqual(self.lex(""" a\\ b """, win32Escapes = True),
['a\\', 'b'])
class TestShParse(unittest.TestCase):
def parse(self, str):
return ShParser(str).parse()
def test_basic(self):
self.assertEqual(self.parse('echo hello'),
Pipeline([Command(['echo', 'hello'], [])], False))
self.assertEqual(self.parse('echo ""'),
Pipeline([Command(['echo', ''], [])], False))
self.assertEqual(self.parse("""echo -DFOO='a'"""),
Pipeline([Command(['echo', '-DFOO=a'], [])], False))
self.assertEqual(self.parse('echo -DFOO="a"'),
Pipeline([Command(['echo', '-DFOO=a'], [])], False))
def test_redirection(self):
self.assertEqual(self.parse('echo hello > c'),
Pipeline([Command(['echo', 'hello'],
[((('>'),), 'c')])], False))
self.assertEqual(self.parse('echo hello > c >> d'),
Pipeline([Command(['echo', 'hello'], [(('>',), 'c'),
(('>>',), 'd')])], False))
self.assertEqual(self.parse('a 2>&1'),
Pipeline([Command(['a'], [(('>&',2), '1')])], False))
def test_pipeline(self):
self.assertEqual(self.parse('a | b'),
Pipeline([Command(['a'], []),
Command(['b'], [])],
False))
self.assertEqual(self.parse('a | b | c'),
Pipeline([Command(['a'], []),
Command(['b'], []),
Command(['c'], [])],
False))
def test_list(self):
self.assertEqual(self.parse('a ; b'),
Seq(Pipeline([Command(['a'], [])], False),
';',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a & b'),
Seq(Pipeline([Command(['a'], [])], False),
'&',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a && b'),
Seq(Pipeline([Command(['a'], [])], False),
'&&',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a || b'),
Seq(Pipeline([Command(['a'], [])], False),
'||',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a && b || c'),
Seq(Seq(Pipeline([Command(['a'], [])], False),
'&&',
Pipeline([Command(['b'], [])], False)),
'||',
Pipeline([Command(['c'], [])], False)))
self.assertEqual(self.parse('a; b'),
Seq(Pipeline([Command(['a'], [])], False),
';',
Pipeline([Command(['b'], [])], False)))
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| -4,915,834,333,598,974,000
| 33.307042
| 79
| 0.416126
| false
| 4.46608
| true
| false
| false
|
cloudera/hue
|
desktop/libs/librdbms/java/query.py
|
2
|
1577
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from builtins import range
from py4j.java_gateway import JavaGateway
gateway = JavaGateway()
jdbc_driver = 'com.mysql.jdb.Driver'
db_url = 'jdbc:mysql://localhost/hue'
username = 'root'
password = 'root'
conn = gateway.jvm.java.sql.DriverManager.getConnection(db_url, username, password)
try:
stmt = conn.createStatement()
try:
rs = stmt.executeQuery('select username,email from auth_user')
try:
md = rs.getMetaData()
for i in range(md.getColumnCount()):
print(md.getColumnTypeName(i + 1))
while next(rs):
username = rs.getString("username")
email = rs.getString("email")
print(username, email)
finally:
rs.close()
finally:
stmt.close()
finally:
conn.close()
|
apache-2.0
| -8,428,450,868,111,065,000
| 29.326923
| 83
| 0.717185
| false
| 3.790865
| false
| false
| false
|
adongy/spreads
|
spreads/util.py
|
1
|
14770
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Johannes Baiter <johannes.baiter@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Various utility functions and classes.
"""
from __future__ import division, unicode_literals, print_function
import abc
import glob
import json
import logging
import os
import pkg_resources
import platform
import re
import subprocess
from unicodedata import normalize
import blinker
import colorama
import psutil
import roman
from colorama import Fore, Back, Style
from spreads.vendor.pathlib import Path
class SpreadsException(Exception):
""" General exception """
pass
class DeviceException(SpreadsException):
""" Raised when a device-related error occured. """
pass
class MissingDependencyException(SpreadsException):
""" Raised when a dependency for a plugin is missing. """
pass
def get_version():
""" Get installed version via pkg_resources. """
return pkg_resources.require('spreads')[0].version
def find_in_path(name):
""" Find executable in $PATH.
:param name: name of the executable
:type name: unicode
:returns: Path to executable or None if not found
:rtype: unicode or None
"""
candidates = None
if is_os('windows'):
import _winreg
if name.startswith('scantailor'):
try:
cmd = _winreg.QueryValue(
_winreg.HKEY_CLASSES_ROOT,
'Scan Tailor Project\\shell\\open\\command')
bin_path = cmd.split('" "')[0][1:]
if name.endswith('-cli'):
bin_path = bin_path[:-4] + "-cli.exe"
return bin_path if os.path.exists(bin_path) else None
except OSError:
return None
else:
path_dirs = os.environ.get('PATH').split(';')
path_dirs.append(os.getcwd())
path_exts = os.environ.get('PATHEXT').split(';')
candidates = (os.path.join(p, name + e)
for p in path_dirs
for e in path_exts)
else:
candidates = (os.path.join(p, name)
for p in os.environ.get('PATH').split(':'))
return next((c for c in candidates if os.path.exists(c)), None)
def is_os(osname):
""" Check if the current operating system matches the expected.
:param osname: Operating system name as returned by
:py:func:`platform.system`
:returns: Whether the OS matches or not
:rtype: bool
"""
return platform.system().lower() == osname
def check_futures_exceptions(futures):
"""" Go through passed :py:class:`concurrent.futures._base.Future` objects
and re-raise the first Exception raised by any one of them.
:param futures: Iterable that contains the futures to be checked
:type futures: iterable with :py:class:`concurrent.futures._base.Future`
instances
"""
if any(x.exception() for x in futures):
raise next(x for x in futures if x.exception()).exception()
def get_free_space(path):
""" Return free space on file-system underlying the passed path.
:param path: Path on file-system the free space of which is desired.
:type path; unicode
:return: Free space in bytes.
:rtype: int
"""
return psutil.disk_usage(unicode(path)).free
def get_subprocess(cmdline, **kwargs):
""" Get a :py:class:`subprocess.Popen` instance.
On Windows systems, the process will be ran in the background and won't
open a cmd-window or appear in the taskbar.
The function signature matches that of the :py:class:`subprocess.Popen`
initialization method.
"""
if subprocess.mswindows and 'startupinfo' not in kwargs:
su = subprocess.STARTUPINFO()
su.dwFlags |= subprocess.STARTF_USESHOWWINDOW
su.wShowWindow = subprocess.SW_HIDE
kwargs['startupinfo'] = su
return subprocess.Popen(cmdline, **kwargs)
def wildcardify(pathnames):
""" Try to generate a single path with wildcards that matches all
`pathnames`.
:param pathnames: List of pathnames to find a wildcard string for
:type pathanmes: List of str/unicode
:return: The wildcard string or None if none was found
:rtype: unicode or None
"""
wildcard_str = ""
for idx, char in enumerate(pathnames[0]):
if all(p[idx] == char for p in pathnames[1:]):
wildcard_str += char
elif not wildcard_str or wildcard_str[-1] != "*":
wildcard_str += "*"
matched_paths = glob.glob(wildcard_str)
if not sorted(pathnames) == sorted(matched_paths):
return None
return wildcard_str
def diff_dicts(old, new):
""" Get the difference between two dictionaries.
:param old: Dictionary to base comparison on
:type old: dict
:param new: Dictionary to compare with
:type new: dict
:return: A (possibly nested) dictionary containing all items from `new`
that differ from the ones in `old`
:rtype: dict
"""
out = {}
for key, value in old.iteritems():
if new[key] != value:
out[key] = new[key]
elif isinstance(value, dict):
diff = diff_dicts(value, new[key])
if diff:
out[key] = diff
return out
def slugify(text, delimiter=u'-'):
"""Generates an ASCII-only slug.
Code adapted from Flask snipped by Armin Ronacher:
http://flask.pocoo.org/snippets/5/
:param text: Text to create slug for
:type text: unicode
:param delimiter: Delimiter to use in slug
:type delimiter: unicode
:return: The generated slug
:rtype: unicode
"""
punctuation_re = r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+'
result = []
for word in re.split(punctuation_re, text.lower()):
word = normalize('NFKD', word).encode('ascii', 'ignore')
if word:
result.append(word)
return unicode(delimiter.join(result))
class _instancemethodwrapper(object): # noqa
def __init__(self, callable):
self.callable = callable
self.__dontcall__ = False
def __getattr__(self, key):
return getattr(self.callable, key)
def __call__(self, *args, **kwargs):
if self.__dontcall__:
raise TypeError('Attempted to call abstract method.')
return self.callable(*args, **kwargs)
class _classmethod(classmethod): # noqa
def __init__(self, func):
super(_classmethod, self).__init__(func)
isabstractmethod = getattr(func, '__isabstractmethod__', False)
if isabstractmethod:
self.__isabstractmethod__ = isabstractmethod
def __get__(self, instance, owner):
result = _instancemethodwrapper(super(_classmethod, self)
.__get__(instance, owner))
isabstractmethod = getattr(self, '__isabstractmethod__', False)
if isabstractmethod:
result.__isabstractmethod__ = isabstractmethod
abstractmethods = getattr(owner, '__abstractmethods__', None)
if abstractmethods and result.__name__ in abstractmethods:
result.__dontcall__ = True
return result
class abstractclassmethod(_classmethod): # noqa
""" New decorator class that implements the @abstractclassmethod decorator
added in Python 3.3 for Python 2.7.
Kudos to http://stackoverflow.com/a/13640018/487903
"""
def __init__(self, func):
func = abc.abstractmethod(func)
super(abstractclassmethod, self).__init__(func)
class ColourStreamHandler(logging.StreamHandler):
""" A colorized output StreamHandler
Kudos to Leigh MacDonald: http://goo.gl/Lpr6C5
"""
# Some basic colour scheme defaults
colours = {
'DEBUG': Fore.CYAN,
'INFO': Fore.GREEN,
'WARN': Fore.YELLOW,
'WARNING': Fore.YELLOW,
'ERROR': Fore.RED,
'CRIT': Back.RED + Fore.WHITE,
'CRITICAL': Back.RED + Fore.WHITE
}
@property
def is_tty(self):
""" Check if we are using a "real" TTY. If we are not using a TTY it
means that the colour output should be disabled.
:return: Using a TTY status
:rtype: bool
"""
try:
return getattr(self.stream, 'isatty', None)()
except:
return False
def emit(self, record):
try:
message = self.format(record)
if not self.is_tty:
self.stream.write(message)
else:
self.stream.write(self.colours[record.levelname] +
message + Style.RESET_ALL)
self.stream.write(getattr(self, 'terminator', '\n'))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class EventHandler(logging.Handler):
""" Subclass of :py:class:`logging.Handler` that emits a
:py:class:`blinker.base.Signal` whenever a new record is emitted.
"""
signals = blinker.Namespace()
on_log_emit = signals.signal('logrecord', doc="""\
Sent when a log record was emitted.
:keyword :class:`logging.LogRecord` record: the LogRecord
""")
def emit(self, record):
self.on_log_emit.send(record=record)
def get_data_dir(create=False):
""" Return (and optionally create) the user's default data directory.
:param create: Create the data directory if it doesn't exist
:type create: bool
:return: Path to the default data directory
:rtype: unicode
"""
unix_dir_var = 'XDG_DATA_HOME'
unix_dir_fallback = '~/.config'
windows_dir_var = 'APPDATA'
windows_dir_fallback = '~\\AppData\\Roaming'
mac_dir = '~/Library/Application Support'
base_dir = None
if is_os('darwin'):
if Path(unix_dir_fallback).exists:
base_dir = unix_dir_fallback
else:
base_dir = mac_dir
elif is_os('windows'):
if windows_dir_var in os.environ:
base_dir = os.environ[windows_dir_var]
else:
base_dir = windows_dir_fallback
else:
if unix_dir_var in os.environ:
base_dir = os.environ[unix_dir_var]
else:
base_dir = unix_dir_fallback
app_path = Path(base_dir)/'spreads'
if create and not app_path.exists():
app_path.mkdir()
return unicode(app_path)
def colorize(text, color):
""" Return text with a new ANSI foreground color.
:param text: Text to be wrapped
:param color: ANSI color to wrap text in
:type color: str (from `colorama.ansi <http://git.io/9qnt0Q>`)
:return: Colorized text
"""
return color + text + colorama.Fore.RESET
class RomanNumeral(object):
""" Number type that represents integers as Roman numerals and that
can be used in all arithmetic operations applicable to integers.
"""
@staticmethod
def is_roman(value):
""" Check if `value` is a valid Roman numeral.
:param value: Value to be checked
:type value: unicode
:returns: Whether the value is valid or not
:rtype: bool
"""
return bool(roman.romanNumeralPattern.match(value))
def __init__(self, value, case='upper'):
""" Create a new instance.
:param value: Value of the instance
:type value: int, unicode containing valid Roman numeral or
:py:class:`RomanNumeral`
"""
self._val = self._to_int(value)
self._case = case
if isinstance(value, basestring) and not self.is_roman(value):
self._case = 'lower'
elif isinstance(value, RomanNumeral):
self._case = value._case
def _to_int(self, value):
if isinstance(value, int):
return value
elif isinstance(value, basestring) and self.is_roman(value.upper()):
return roman.fromRoman(value.upper())
elif isinstance(value, RomanNumeral):
return value._val
else:
raise ValueError("Value must be a valid roman numeral, a string"
" representing one or an integer: '{0}'"
.format(value))
def __cmp__(self, other):
if self._val > self._to_int(other):
return 1
elif self._val == self._to_int(other):
return 0
elif self._val < self._to_int(other):
return -1
def __add__(self, other):
return RomanNumeral(self._val + self._to_int(other), self._case)
def __sub__(self, other):
return RomanNumeral(self._val - self._to_int(other), self._case)
def __int__(self):
return self._val
def __str__(self):
strval = roman.toRoman(self._val)
if self._case == 'lower':
return strval.lower()
else:
return strval
def __unicode__(self):
return unicode(str(self))
def __repr__(self):
return str(self)
class CustomJSONEncoder(json.JSONEncoder):
""" Custom :py:class:`json.JSONEncoder`.
Uses an object's `to_dict` method if present for serialization.
Serializes :py:class:`pathlib.Path` instances to the string
representation of their relative path to a BagIt-compliant directory or
their absolute path if not applicable.
"""
def default(self, obj):
if hasattr(obj, 'to_dict'):
return obj.to_dict()
if isinstance(obj, Path):
# Serialize paths that belong to a workflow as paths relative to
# its base directory
base = next((p for p in obj.parents if (p/'bagit.txt').exists()),
None)
if base:
return unicode(obj.relative_to(base))
else:
return unicode(obj.absolute())
return json.JSONEncoder.default(self, obj)
|
agpl-3.0
| 8,165,834,610,917,695,000
| 31.178649
| 79
| 0.599323
| false
| 4.089147
| false
| false
| false
|
uw-it-cte/uw-restclients
|
restclients/views.py
|
1
|
6990
|
try:
from importlib import import_module
except:
# python 2.6
from django.utils.importlib import import_module
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_protect
from django.http import HttpResponseNotFound, HttpResponseRedirect
from django.http import HttpResponse
from django.template import loader, RequestContext, TemplateDoesNotExist
from django.shortcuts import render_to_response
from restclients.dao import SWS_DAO, PWS_DAO, GWS_DAO, NWS_DAO, Hfs_DAO,\
Book_DAO, Canvas_DAO, Uwnetid_DAO, MyLibInfo_DAO, LibCurrics_DAO,\
TrumbaCalendar_DAO, MyPlan_DAO, IASYSTEM_DAO, Grad_DAO
from restclients.mock_http import MockHTTP
from authz_group import Group
from userservice.user import UserService
from time import time
from urllib import quote, unquote, urlencode
from urlparse import urlparse, parse_qs
import simplejson as json
import re
@login_required
@csrf_protect
def proxy(request, service, url):
if not hasattr(settings, "RESTCLIENTS_ADMIN_GROUP"):
print "You must have a group defined as your admin group."
print 'Configure that using RESTCLIENTS_ADMIN_GROUP="u_foo_bar"'
raise Exception("Missing RESTCLIENTS_ADMIN_GROUP in settings")
user_service = UserService()
actual_user = user_service.get_original_user()
g = Group()
is_admin = g.is_member_of_group(actual_user,
settings.RESTCLIENTS_ADMIN_GROUP)
if not is_admin:
return HttpResponseRedirect("/")
use_pre = False
headers = {}
if service == "sws":
dao = SWS_DAO()
headers["X-UW-Act-as"] = actual_user
elif service == "pws":
dao = PWS_DAO()
elif service == "gws":
dao = GWS_DAO()
elif service == "nws":
dao = NWS_DAO()
elif service == "hfs":
dao = Hfs_DAO()
elif service == "book":
dao = Book_DAO()
elif service == "canvas":
dao = Canvas_DAO()
elif service == "grad":
dao = Grad_DAO()
elif service == "uwnetid":
dao = Uwnetid_DAO()
elif service == "libraries":
dao = MyLibInfo_DAO()
elif service == "libcurrics":
dao = LibCurrics_DAO()
elif service == "myplan":
dao = MyPlan_DAO()
elif service == "iasystem":
dao = IASYSTEM_DAO()
headers = {"Accept": "application/vnd.collection+json"}
subdomain = None
if url.endswith('/evaluation'):
if url.startswith('uwb/') or url.startswith('uwt/'):
subdomain = url[:3]
url = url[4:]
else:
subdomain = url[:2]
url = url[3:]
elif service == "calendar":
dao = TrumbaCalendar_DAO()
use_pre = True
else:
return HttpResponseNotFound("Unknown service: %s" % service)
url = "/%s" % quote(url)
if request.GET:
try:
url = "%s?%s" % (url, urlencode(request.GET))
except UnicodeEncodeError:
err = "Bad URL param given to the restclients browser"
return HttpResponse(err)
start = time()
try:
if service == "iasystem" and subdomain is not None:
response = dao.getURL(url, headers, subdomain)
else:
if service == "libcurrics":
if "?campus=" in url:
url = url.replace("?campus=", "/")
elif "course?" in url:
url_prefix = re.sub(r'\?.*$', "", url)
url = "%s/%s/%s/%s/%s/%s" % (
url_prefix,
request.GET["year"],
request.GET["quarter"],
request.GET["curriculum_abbr"].replace(" ", "%20"),
request.GET["course_number"],
request.GET["section_id"])
response = dao.getURL(url, headers)
except Exception as ex:
response = MockHTTP()
response.status = 500
response.data = str(ex)
end = time()
# Assume json, and try to format it.
try:
if not use_pre:
content = format_json(service, response.data)
json_data = response.data
else:
content = response.data
json_data = None
except Exception as e:
content = format_html(service, response.data)
json_data = None
context = {
"url": unquote(url),
"content": content,
"json_data": json_data,
"response_code": response.status,
"time_taken": "%f seconds" % (end - start),
"headers": response.headers,
"override_user": user_service.get_override_user(),
"use_pre": use_pre,
}
try:
loader.get_template("restclients/extra_info.html")
context["has_extra_template"] = True
context["extra_template"] = "restclients/extra_info.html"
except TemplateDoesNotExist:
pass
try:
loader.get_template("restclients/proxy_wrapper.html")
context["wrapper_template"] = "restclients/proxy_wrapper.html"
except TemplateDoesNotExist:
context["wrapper_template"] = "proxy_wrapper.html"
try:
search_template_path = re.sub(r"\..*$", "", url)
search_template = "proxy/%s%s.html" % (service, search_template_path)
loader.get_template(search_template)
context["search_template"] = search_template
context["search"] = format_search_params(url)
except TemplateDoesNotExist:
context["search_template"] = None
return render_to_response("proxy.html",
context,
context_instance=RequestContext(request))
def format_search_params(url):
params = {}
query_params = parse_qs(urlparse(url).query)
for param in query_params:
params[param] = ",".join(query_params[param])
return params
def format_json(service, content):
json_data = json.loads(content, use_decimal=True)
formatted = json.dumps(json_data, sort_keys=True, indent=4)
formatted = formatted.replace("&", "&")
formatted = formatted.replace("<", "<")
formatted = formatted.replace(">", ">")
formatted = formatted.replace(" ", " ")
formatted = formatted.replace("\n", "<br/>\n")
formatted = re.sub(r"\"/(.*?)\"",
r'"<a href="/restclients/view/%s/\1">/\1</a>"' %
service, formatted)
return formatted
def format_html(service, content):
formatted = re.sub(r"href\s*=\s*\"/(.*?)\"",
r"href='/restclients/view/%s/\1'" % service, content)
formatted = re.sub(re.compile(r"<style.*/style>", re.S), "", formatted)
formatted = clean_self_closing_divs(formatted)
return formatted
def clean_self_closing_divs(content):
cleaned = re.sub("((<div[^>]*?)/>)", "<!-- \g<1> -->\g<2>></div>", content)
return cleaned
|
apache-2.0
| 8,306,111,691,793,203,000
| 32.605769
| 79
| 0.580973
| false
| 3.935811
| false
| false
| false
|
dersphere/plugin.programm.xbmcmail
|
addon.py
|
1
|
9677
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Tristan Fischer (sphere@dersphere.de)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from xbmcswift2 import Plugin, xbmc, xbmcgui
from resources.lib.client import (
XBMCMailClient, InvalidCredentials, InvalidHost
)
STRINGS = {
'email_mark_seen': 30000,
'email_mark_unseen': 30001,
'email_delete': 30002,
'delete': 30003,
'are_you_sure': 30004,
'select_provider': 30005,
'connection_error': 30006,
'wrong_credentials': 30007,
'want_set_now': 30008,
'wrong_host': 30009,
'page': 30010,
'refresh_inbox': 30011,
}
plugin = Plugin()
@plugin.route('/')
def show_mailboxes():
client = _login()
if not client:
return
def _format_label(mailbox):
label = mailbox['name']
if 'unseen' in mailbox and 'total' in mailbox:
label = u'%s (%d/%d)' % (
label,
int(mailbox['unseen']),
int(mailbox['total']),
)
return label
items = [{
'label': _format_label(mailbox),
'path': plugin.url_for(
endpoint='show_mailbox',
mailbox=mailbox['raw_name'],
)
} for mailbox in client.get_mailboxes()]
return plugin.finish(items)
@plugin.route('/mailbox/<mailbox>/', options={'page': '1'})
@plugin.route('/mailbox/<mailbox>/<page>/', name='show_mailbox_page')
def show_mailbox(mailbox, page):
client = _login()
if not client:
return
page = int(page)
limit = 50
offset = (page - 1) * limit
def context_menu(mailbox, email):
items = []
if email['unseen']:
items.append(
(_('email_mark_seen'),
_view(endpoint='email_mark_seen',
mailbox=mailbox,
email_id=email['id']))
)
else:
items.append(
(_('email_mark_unseen'),
_view(endpoint='email_mark_unseen',
mailbox=mailbox,
email_id=email['id']))
)
items.append(
(_('email_delete'),
_view(endpoint='email_delete',
mailbox=mailbox,
email_id=email['id']))
)
items.append(
(_('refresh_inbox'),
_view(endpoint='refresh_inbox',
mailbox=mailbox,
email_id=email['id']))
)
return items
def _format_label(email):
label = '[B]%s[/B] - %s' % (
_format_from(email['from']),
_format_subject(email['subject']),
)
if email['unseen']:
label = '[COLOR red]%s[/COLOR]' % label
return label
def _format_from(s):
if ' <' in s:
return s.split(' <')[0].strip('"')
else:
return s.split('@')[0]
def _format_subject(s):
return s.replace('\r\n', '')
emails, has_next_page = client.get_emails(mailbox, limit, offset)
has_prev_page = page > 1
items = [{
'label': _format_label(email),
'replace_context_menu': True,
'info': {'count': i + 1},
'context_menu': context_menu(mailbox, email),
'path': plugin.url_for(
endpoint='email_show',
mailbox=email['mailbox'],
email_id=email['id']
)
} for i, email in enumerate(emails)]
if has_next_page:
items.append({
'label': '>> %s %s >>' % (_('page'), (page + 1)),
'info': {'count': len(emails) + 2},
'path': plugin.url_for(
endpoint='show_mailbox_page',
mailbox=mailbox,
page=(page + 1),
is_update='true',
)
})
if has_prev_page:
items.append({
'label': '<< %s %s <<' % (_('page'), (page - 1)),
'info': {'count': 0},
'path': plugin.url_for(
endpoint='show_mailbox_page',
mailbox=mailbox,
page=(page - 1),
is_update='true',
)
})
finish_kwargs = {
'update_listing': 'is_update' in plugin.request.args,
'sort_methods': ('playlist_order', )
}
return plugin.finish(items, **finish_kwargs)
@plugin.route('/mailbox/<mailbox>/<email_id>/mark_seen')
def email_mark_seen(mailbox, email_id):
client = _login()
if not client:
return
client.email_mark_seen(email_id, mailbox)
_refresh_view()
@plugin.route('/mailbox/<mailbox>/<email_id>/mark_unseen')
def email_mark_unseen(mailbox, email_id):
client = _login()
if not client:
return
client.email_mark_unseen(email_id, mailbox)
_refresh_view()
@plugin.route('/mailbox/<mailbox>')
def refresh_inbox(mailbox):
return
_refresh_view()
@plugin.route('/mailbox/<mailbox>/<email_id>/delete')
def email_delete(mailbox, email_id):
client = _login()
if not client:
return
confirmed = xbmcgui.Dialog().yesno(
_('delete'),
_('are_you_sure')
)
if not confirmed:
return
client.email_delete(email_id, mailbox)
_refresh_view()
@plugin.route('/mailbox/<mailbox>/<email_id>/show')
def email_show(mailbox, email_id):
client = _login()
if not client:
return
xbmc.executebuiltin('ActivateWindow(%d)' % 10147)
window = xbmcgui.Window(10147)
email = client.get_email(email_id, mailbox)
header = '%s - %s' % (email['from'], email['subject'])
text = '\r\n'.join((
'=====================================================',
'[B]From:[/B] %s' % email['from'],
'[B]To:[/B] %s' % email['to'],
'[B]Date:[/B] %s' % email['date'],
'[B]Subject:[/B] %s' % email['subject'],
'=====================================================',
email['body_text'],
))
window.getControl(1).setLabel(header)
window.getControl(5).setText(text)
def ask_provider():
providers = [
{'name': 'Custom',
'imap_host': ''},
{'name': '1und1.de',
'imap_host': 'imap.1und1.de',
'use_ssl': 'true'},
{'name': 'Arcor.de',
'imap_host': 'imap.arcor.de',
'use_ssl': 'true'},
{'name': 'Freenet.de',
'imap_host': 'mx.freenet.de',
'use_ssl': 'false'},
{'name': 'Gmail.com',
'imap_host': 'imap.gmail.com',
'use_ssl': 'true'},
{'name': 'iCloud.com',
'imap_host': 'imap.mail.me.com',
'use_ssl': 'true'},
{'name': 'T-Online.de',
'imap_host': 'secureimap.t-online.de',
'use_ssl': 'true'},
{'name': 'Web.de',
'imap_host': 'imap.web.de',
'use_ssl': 'false'},
{'name': 'Yahoo.com',
'imap_host': 'imap.mail.yahoo.com',
'use_ssl': 'true'},
]
selected = xbmcgui.Dialog().select(
_('select_provider'), [p['name'] for p in providers]
)
if selected >= 0:
return providers[selected]
@plugin.route('/settings/set_provider')
def set_default_list():
provider = ask_provider()
if provider:
plugin.set_setting('provider', provider['name'])
for k, v in provider.iteritems():
if k == 'name':
plugin.set_setting('provider', v)
else:
plugin.set_setting(k, v)
else:
plugin.set_setting('provider', 'Custom')
def _run(*args, **kwargs):
return 'XBMC.RunPlugin(%s)' % plugin.url_for(*args, **kwargs)
def _view(*args, **kwargs):
return 'XBMC.Container.Update(%s)' % plugin.url_for(*args, **kwargs)
def _refresh_view():
xbmc.executebuiltin('Container.Refresh')
def _login():
logged_in = False
while not logged_in:
try:
client = XBMCMailClient(
username=plugin.get_setting('username', unicode),
password=plugin.get_setting('password', unicode),
host=plugin.get_setting('imap_host', unicode),
use_ssl=plugin.get_setting('use_ssl', bool),
)
except InvalidCredentials:
try_again = xbmcgui.Dialog().yesno(
_('connection_error'),
_('wrong_credentials'),
_('want_set_now')
)
if not try_again:
return
plugin.open_settings()
except InvalidHost:
try_again = xbmcgui.Dialog().yesno(
_('connection_error'),
_('wrong_host'),
_('want_set_now')
)
if not try_again:
return
plugin.open_settings()
else:
logged_in = True
return client
def _(string_id):
if string_id in STRINGS:
return plugin.get_string(STRINGS[string_id])
else:
plugin.log.debug('String is missing: %s' % string_id)
return string_id
if __name__ == '__main__':
plugin.run()
|
gpl-2.0
| -2,259,042,183,702,010,400
| 27.800595
| 73
| 0.512969
| false
| 3.777127
| false
| false
| false
|
Scille/parsec-cloud
|
tests/monitor.py
|
1
|
13104
|
# Parsec Cloud (https://parsec.cloud) Copyright (c) AGPLv3 2016-2021 Scille SAS
# Monitor POC, shamelessly taken from curio
import os
import signal
import socket
import traceback
import threading
import telnetlib
import argparse
import logging
import trio
from trio.abc import Instrument
from trio.lowlevel import current_statistics
LOGGER = logging.getLogger("trio.monitor")
MONITOR_HOST = "127.0.0.1"
MONITOR_PORT = 48802
# Telnet doesn't support unicode, so we must rely on ascii art instead :'-(
if 0:
MID_PREFIX = "├─ "
MID_CONTINUE = "│ "
END_PREFIX = "└─ "
else:
MID_PREFIX = "|- "
MID_CONTINUE = "| "
END_PREFIX = "|_ "
END_CONTINUE = " " * len(END_PREFIX)
def is_shielded_task(task):
cancel_status = task._cancel_status
while cancel_status:
if cancel_status._scope.shield:
return True
cancel_status = cancel_status._parent
return False
def _render_subtree(name, rendered_children):
lines = []
lines.append(name)
for child_lines in rendered_children:
if child_lines is rendered_children[-1]:
first_prefix = END_PREFIX
rest_prefix = END_CONTINUE
else:
first_prefix = MID_PREFIX
rest_prefix = MID_CONTINUE
lines.append(first_prefix + child_lines[0])
for child_line in child_lines[1:]:
lines.append(rest_prefix + child_line)
return lines
def _rendered_nursery_children(nursery, format_task):
return [task_tree_lines(t, format_task) for t in nursery.child_tasks]
def task_tree_lines(task, format_task):
rendered_children = []
nurseries = list(task.child_nurseries)
while nurseries:
nursery = nurseries.pop()
nursery_children = _rendered_nursery_children(nursery, format_task)
if rendered_children:
nested = _render_subtree("(nested nursery)", rendered_children)
nursery_children.append(nested)
rendered_children = nursery_children
return _render_subtree(format_task(task), rendered_children)
def render_task_tree(task, format_task):
return "\n".join(line for line in task_tree_lines(task, format_task)) + "\n"
class Monitor(Instrument):
def __init__(self, host=MONITOR_HOST, port=MONITOR_PORT):
self.address = (host, port)
self._trio_token = None
self._next_task_short_id = 0
self._tasks = {}
self._closing = None
self._ui_thread = None
def get_task_from_short_id(self, shortid):
for task in self._tasks.values():
if task._monitor_short_id == shortid:
return task
return None
def before_run(self):
LOGGER.info("Starting Trio monitor at %s:%d", *self.address)
self._trio_token = trio.lowlevel.current_trio_token()
self._ui_thread = threading.Thread(target=self.server, args=(), daemon=True)
self._closing = threading.Event()
self._ui_thread.start()
def task_spawned(self, task):
self._tasks[id(task)] = task
task._monitor_short_id = self._next_task_short_id
self._next_task_short_id += 1
task._monitor_state = "spawned"
def task_scheduled(self, task):
task._monitor_state = "scheduled"
def before_task_step(self, task):
task._monitor_state = "running"
def after_task_step(self, task):
task._monitor_state = "waiting"
def task_exited(self, task):
del self._tasks[id(task)]
# def before_io_wait(self, timeout):
# if timeout:
# print("### waiting for I/O for up to {} seconds".format(timeout))
# else:
# print("### doing a quick check for I/O")
# self._sleep_time = trio.current_time()
# def after_io_wait(self, timeout):
# duration = trio.current_time() - self._sleep_time
# print("### finished I/O check (took {} seconds)".format(duration))
def after_run(self):
LOGGER.info("Stopping Trio monitor ui thread")
self._closing.set()
self._ui_thread.join()
def server(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# set the timeout to prevent the server loop from
# blocking indefinitaly on sock.accept()
sock.settimeout(0.5)
sock.bind(self.address)
sock.listen(1)
with sock:
while not self._closing.is_set():
try:
client, addr = sock.accept()
with client:
client.settimeout(0.5)
# This bit of magic is for reading lines of input while still allowing
# timeouts and the ability for the monitor to die when curio exits.
# See Issue #108.
def readlines():
buffer = bytearray()
while not self._closing.is_set():
index = buffer.find(b"\n")
if index >= 0:
line = buffer[: index + 1].decode("latin-1")
del buffer[: index + 1]
yield line
try:
chunk = client.recv(1000)
if not chunk:
break
buffer.extend(chunk)
except socket.timeout:
pass
sout = client.makefile("w", encoding="latin-1")
self.interactive_loop(sout, readlines())
except socket.timeout:
continue
def interactive_loop(self, sout, input_lines):
"""
Main interactive loop of the monitor
"""
sout.write("Trio Monitor: %d tasks running\n" % len(self._tasks))
sout.write("Type help for commands\n")
while True:
sout.write("trio > ")
sout.flush()
resp = next(input_lines, None)
if not resp:
return
try:
if resp.startswith("q"):
self.command_exit(sout)
return
elif resp.startswith("pa"):
_, taskid_s = resp.split()
self.command_parents(sout, int(taskid_s))
elif resp.startswith("s"):
self.command_stats(sout)
elif resp.startswith("p"):
self.command_ps(sout)
elif resp.startswith("t"):
self.command_task_tree(sout)
elif resp.startswith("exit"):
self.command_exit(sout)
return
elif resp.startswith("cancel"):
_, taskid_s = resp.split()
self.command_cancel(sout, int(taskid_s))
elif resp.startswith("signal"):
_, signame = resp.split()
self.command_signal(sout, signame)
elif resp.startswith("w"):
_, taskid_s = resp.split()
self.command_where(sout, int(taskid_s))
elif resp.startswith("h"):
self.command_help(sout)
else:
sout.write("Unknown command. Type help.\n")
except Exception as e:
sout.write("Bad command. %s\n" % e)
def command_help(self, sout):
sout.write(
"""Commands:
ps : Show task table
stat : Display general runtime informations
tree : Display hierarchical view of tasks and nurseries
where taskid : Show stack frames for a task
cancel taskid : Cancel an indicated task
signal signame : Send a Unix signal
parents taskid : List task parents
quit : Leave the monitor
"""
)
def command_stats(self, sout):
async def get_current_statistics():
return current_statistics()
stats = trio.from_thread.run(get_current_statistics, trio_token=self._trio_token)
sout.write(
"""tasks_living: {s.tasks_living}
tasks_runnable: {s.tasks_runnable}
seconds_to_next_deadline: {s.seconds_to_next_deadline}
run_sync_soon_queue_size: {s.run_sync_soon_queue_size}
io_statistics:
tasks_waiting_read: {s.io_statistics.tasks_waiting_read}
tasks_waiting_write: {s.io_statistics.tasks_waiting_write}
backend: {s.io_statistics.backend}
""".format(
s=stats
)
)
def command_ps(self, sout):
headers = ("Id", "State", "Shielded", "Task")
widths = (5, 10, 10, 50)
for h, w in zip(headers, widths):
sout.write("%-*s " % (w, h))
sout.write("\n")
sout.write(" ".join(w * "-" for w in widths))
sout.write("\n")
for task in sorted(self._tasks.values(), key=lambda t: t._monitor_short_id):
sout.write(
"%-*d %-*s %-*s %-*s\n"
% (
widths[0],
task._monitor_short_id,
widths[1],
task._monitor_state,
widths[2],
"yes" if is_shielded_task(task) else "",
widths[3],
task.name,
)
)
def command_task_tree(self, sout):
root_task = next(iter(self._tasks.values()))
while root_task.parent_nursery is not None:
root_task = root_task.parent_nursery.parent_task
def _format_task(task):
return "%s (id=%s, %s%s)" % (
task.name,
task._monitor_short_id,
task._monitor_state,
", shielded" if is_shielded_task(task) else "",
)
task_tree = render_task_tree(root_task, _format_task)
sout.write(task_tree)
def command_where(self, sout, taskid):
task = self.get_task_from_short_id(taskid)
if task:
def walk_coro_stack(coro):
while coro is not None:
if hasattr(coro, "cr_frame"):
# A real coroutine
yield coro.cr_frame, coro.cr_frame.f_lineno
coro = coro.cr_await
elif hasattr(coro, "gi_frame"):
# A generator decorated with @types.coroutine
yield coro.gi_frame, coro.gi_frame.f_lineno
coro = coro.gi_yieldfrom
else:
# A coroutine wrapper (used by AsyncGenerator for
# instance), cannot go further
return
ss = traceback.StackSummary.extract(walk_coro_stack(task.coro))
tb = "".join(ss.format())
sout.write(tb + "\n")
else:
sout.write("No task %d\n" % taskid)
def command_signal(self, sout, signame):
if hasattr(signal, signame):
os.kill(os.getpid(), getattr(signal, signame))
else:
sout.write("Unknown signal %s\n" % signame)
def command_cancel(self, sout, taskid):
# TODO: how to cancel a single task ?
# Another solution could be to also display nurseries/cancel_scopes in
# the monitor and allow to cancel them. Given timeout are handled
# by cancel_scope, this could also allow us to monitor the remaining
# time (and task depending on it) in such object.
sout.write("Not supported yet...")
def command_parents(self, sout, taskid):
task = self.get_task_from_short_id(taskid)
while task:
sout.write("%-6d %12s %s\n" % (task._monitor_short_id, "running", task.name))
task = task.parent_nursery._parent_task if task.parent_nursery else None
def command_exit(self, sout):
sout.write("Leaving monitor. Hit Ctrl-C to exit\n")
sout.flush()
def monitor_client(host, port):
"""
Client to connect to the monitor via "telnet"
"""
tn = telnetlib.Telnet()
tn.open(host, port, timeout=0.5)
try:
tn.interact()
except KeyboardInterrupt:
pass
finally:
tn.close()
def main():
parser = argparse.ArgumentParser("usage: python -m trio.monitor [options]")
parser.add_argument(
"-H", "--host", dest="monitor_host", default=MONITOR_HOST, type=str, help="monitor host ip"
)
parser.add_argument(
"-p",
"--port",
dest="monitor_port",
default=MONITOR_PORT,
type=int,
help="monitor port number",
)
args = parser.parse_args()
monitor_client(args.monitor_host, args.monitor_port)
if __name__ == "__main__":
main()
|
agpl-3.0
| -6,085,939,352,050,087,000
| 32.403061
| 99
| 0.530548
| false
| 3.955891
| false
| false
| false
|
Ecotrust/hnfp
|
hnfp/migrations/0011_auto_20171025_0852.py
|
1
|
3297
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-10-25 15:52
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hnfp', '0010_auto_20171024_1648'),
]
operations = [
migrations.CreateModel(
name='Alert',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('alert_date', models.CharField(blank=True, max_length=100, null=True)),
('alert_time', models.CharField(blank=True, max_length=20, null=True)),
('alert_type', models.CharField(blank=True, max_length=400, null=True)),
('alert_created', models.DateTimeField(auto_now_add=True)),
('alert_updated', models.DateTimeField(auto_now=True)),
('alert_username', models.CharField(blank=True, max_length=800, null=True)),
('alert_location', django.contrib.gis.db.models.fields.PointField(blank=True, default=None, null=True, srid=3857)),
('alert_comment', models.CharField(blank=True, default=None, max_length=20000, null=True)),
('alert_confirmed', models.BooleanField(default=False)),
],
options={
'verbose_name_plural': 'Alerts',
},
),
migrations.CreateModel(
name='LandUseProject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=300, null=True)),
('category', models.CharField(blank=True, choices=[('Forest', 'forest'), ('Road', 'road'), ('Stream', 'stream')], max_length=400, null=True)),
('summary', models.CharField(blank=True, default=None, max_length=20000, null=True)),
('description', models.CharField(blank=True, default=None, max_length=20000, null=True)),
('start_date', models.CharField(blank=True, max_length=200, null=True)),
('completion_date', models.CharField(blank=True, max_length=200, null=True)),
('actions', models.CharField(blank=True, max_length=20000, null=True)),
('dollar_costs', models.CharField(blank=True, max_length=4000, null=True)),
('emdollars', models.CharField(blank=True, max_length=4000, null=True)),
('area', django.contrib.gis.db.models.fields.PolygonField(blank=True, default=None, null=True, srid=3857)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('published', models.BooleanField(default=False)),
('username', models.CharField(blank=True, max_length=400, null=True)),
],
options={
'verbose_name_plural': 'Land Use Projects',
},
),
migrations.AddField(
model_name='observation',
name='observer_username',
field=models.CharField(blank=True, max_length=800, null=True),
),
]
|
isc
| -4,770,443,177,926,591,000
| 52.177419
| 158
| 0.584471
| false
| 4.025641
| false
| false
| false
|
alisa-ipn/writing-composition-crawler
|
src/crawl.py
|
1
|
2670
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 19 21:49:22 2015
@author: Alisa
"""
import os
import utils, urlutils
URL1 = "http://abc-english-grammar.com/1/sochinenia_po_angliiskomu_yaziku.htm"
str_to_look = "<a href=\"http://abc-english-grammar.com/1/sochinenia_po_angliiskomu_yaziku"
URL2 = "http://en365.ru/topic.htm"
URL3 = "http://www.native-english.ru/topics"
URL4 = "http://iloveenglish.ru/topics"
URL5 = "http://www.alleng.ru/english/top_08.htm"
PREFIX = "E://Study//SpeechRecognProject//crawl//raw_data//"
def get_next_target(page):
start_link = page.find(str_to_look)
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
content = urlutils.get_page(url)
write_file(content, page, end_quote)
return url, end_quote
def write_file(content, page, end_quote):
name = get_file_name(page, end_quote)
fname0 = dir_name + "//" + name
fname = fname0
i = 0
while (os.path.isfile(fname)):
i+=1
fname = fname0[:-5]+"-"+str(i)+'.html'
fout = open(fname, 'w')
print fname
fout.write(content)
def get_target_urls(content):
links = []
while True:
url, endpos = get_next_target(content)
if url:
links.append(url)
content = content[endpos:]
else:
break
return links
def get_file_name(page, end_quote):
start_name = page.find(">", end_quote)+1
end_name = page.find("<", start_name)
if page.find("-", start_name, end_name) > -1 :
end_name = page.find("-", start_name)
fname = page[start_name:end_name]
fname = fname.strip()
fname = fname.replace(" ", "_")
fname = fname.replace("/", "_")
fname = fname.replace("?", "_")
fname = fname.replace("!", "_")
fname = fname.replace("'", "_")
print fname
return fname + ".html"
def crawl_url(seed): # returns list of crawled links
crawled = []
content = urlutils.get_page(seed)
crawled = get_target_urls(content)
return crawled
# create a directory with the name of the URL if it does not exist
# if it exists, clean the files from the directory
dir_name = urlutils.get_stem_url(URL1)
dir_name = PREFIX+dir_name
if not os.path.exists(dir_name):
os.mkdir(dir_name)
utils.clean_dir(dir_name)
#crawl urls
crawled = crawl_url(URL1)
fout = open(dir_name+"//_url_list.txt",'w')
utils.print_list(crawled, fout)
fout.close()
|
mit
| 1,130,901,153,823,493,900
| 24.673077
| 91
| 0.582397
| false
| 3.090278
| false
| false
| false
|
alexschiller/osf.io
|
api/logs/serializers.py
|
1
|
7104
|
from rest_framework import serializers as ser
from api.base.serializers import (
JSONAPISerializer,
RelationshipField,
RestrictedDictSerializer,
LinksField,
is_anonymized,
DateByVersion,
)
from website.project.model import Node
from website.util import permissions as osf_permissions
from framework.auth.core import User
from website.preprints.model import PreprintService
class NodeLogIdentifiersSerializer(RestrictedDictSerializer):
doi = ser.CharField(read_only=True)
ark = ser.CharField(read_only=True)
class NodeLogInstitutionSerializer(RestrictedDictSerializer):
id = ser.CharField(read_only=True)
name = ser.CharField(read_only=True)
class NodeLogFileParamsSerializer(RestrictedDictSerializer):
materialized = ser.CharField(read_only=True)
url = ser.URLField(read_only=True)
addon = ser.CharField(read_only=True)
node_url = ser.URLField(read_only=True, source='node.url')
node_title = ser.SerializerMethodField()
def get_node_title(self, obj):
user = self.context['request'].user
node_title = obj['node']['title']
node = Node.load(obj['node']['_id'])
if node.has_permission(user, osf_permissions.READ):
return node_title
return 'Private Component'
class NodeLogParamsSerializer(RestrictedDictSerializer):
addon = ser.CharField(read_only=True)
bucket = ser.CharField(read_only=True)
citation_name = ser.CharField(read_only=True, source='citation.name')
contributors = ser.SerializerMethodField(read_only=True)
data_set = ser.CharField(read_only=True, source='dataset')
destination = NodeLogFileParamsSerializer(read_only=True)
figshare_title = ser.CharField(read_only=True, source='figshare.title')
forward_url = ser.CharField(read_only=True)
github_user = ser.CharField(read_only=True, source='github.user')
github_repo = ser.CharField(read_only=True, source='github.repo')
file = ser.DictField(read_only=True)
filename = ser.CharField(read_only=True)
kind = ser.CharField(read_only=True)
folder = ser.CharField(read_only=True)
folder_name = ser.CharField(read_only=True)
license = ser.CharField(read_only=True, source='new_license')
identifiers = NodeLogIdentifiersSerializer(read_only=True)
institution = NodeLogInstitutionSerializer(read_only=True)
old_page = ser.CharField(read_only=True)
page = ser.CharField(read_only=True)
page_id = ser.CharField(read_only=True)
params_node = ser.SerializerMethodField(read_only=True)
params_project = ser.SerializerMethodField(read_only=True)
path = ser.CharField(read_only=True)
pointer = ser.DictField(read_only=True)
preprint = ser.CharField(read_only=True)
preprint_provider = ser.SerializerMethodField(read_only=True)
previous_institution = NodeLogInstitutionSerializer(read_only=True)
source = NodeLogFileParamsSerializer(read_only=True)
study = ser.CharField(read_only=True)
tag = ser.CharField(read_only=True)
tags = ser.CharField(read_only=True)
target = NodeLogFileParamsSerializer(read_only=True)
template_node = ser.DictField(read_only=True)
title_new = ser.CharField(read_only=True)
title_original = ser.CharField(read_only=True)
updated_fields = ser.DictField(read_only=True)
urls = ser.DictField(read_only=True)
version = ser.CharField(read_only=True)
wiki = ser.DictField(read_only=True)
citation_name = ser.CharField(read_only=True, source='citation.name')
institution = NodeLogInstitutionSerializer(read_only=True)
def get_view_url(self, obj):
urls = obj.get('urls', None)
if urls:
view = urls.get('view', None)
if view:
return view
return None
def get_params_node(self, obj):
node_id = obj.get('node', None)
if node_id:
node = Node.load(node_id)
return {'id': node_id, 'title': node.title}
return None
def get_params_project(self, obj):
project_id = obj.get('project', None)
if project_id:
node = Node.load(project_id)
return {'id': project_id, 'title': node.title}
return None
def get_contributors(self, obj):
contributor_info = []
if is_anonymized(self.context['request']):
return contributor_info
contributor_ids = obj.get('contributors', None)
params_node = obj.get('node', None)
if contributor_ids:
for contrib_id in contributor_ids:
user = User.load(contrib_id)
unregistered_name = None
if user.unclaimed_records.get(params_node):
unregistered_name = user.unclaimed_records[params_node].get('name', None)
contributor_info.append({
'id': contrib_id,
'full_name': user.fullname,
'given_name': user.given_name,
'middle_names': user.middle_names,
'family_name': user.family_name,
'unregistered_name': unregistered_name,
'active': user.is_active
})
return contributor_info
def get_preprint_provider(self, obj):
preprint_id = obj.get('preprint', None)
if preprint_id:
preprint = PreprintService.load(preprint_id)
if preprint:
provider = preprint.provider
return {'url': provider.external_url, 'name': provider.name}
return None
class NodeLogSerializer(JSONAPISerializer):
filterable_fields = frozenset(['action', 'date'])
non_anonymized_fields = [
'id',
'date',
'action',
]
id = ser.CharField(read_only=True, source='_id')
date = DateByVersion(read_only=True)
action = ser.CharField(read_only=True)
params = NodeLogParamsSerializer(read_only=True)
links = LinksField({'self': 'get_absolute_url'})
class Meta:
type_ = 'logs'
node = RelationshipField(
related_view=lambda n: 'registrations:registration-detail' if getattr(n, 'is_registration', False) else 'nodes:node-detail',
related_view_kwargs={'node_id': '<node._id>'},
)
original_node = RelationshipField(
related_view=lambda n: 'registrations:registration-detail' if getattr(n, 'is_registration', False) else 'nodes:node-detail',
related_view_kwargs={'node_id': '<original_node._id>'},
)
user = RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<user._id>'},
)
# This would be a node_link, except that data isn't stored in the node log params
linked_node = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<params.pointer.id>'}
)
template_node = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<params.template_node.id>'}
)
def get_absolute_url(self, obj):
return obj.absolute_url
|
apache-2.0
| -8,319,683,326,383,449,000
| 36.193717
| 132
| 0.652168
| false
| 3.72327
| false
| false
| false
|
classcat/cctf
|
cctf/initializations.py
|
1
|
10251
|
from __future__ import division, print_function, absolute_import
import math
import tensorflow as tf
try:
from tensorflow.contrib.layers.python.layers.initializers import \
xavier_initializer
except Exception:
xavier_initializer = None
try:
from tensorflow.contrib.layers.python.layers.initializers import \
variance_scaling_initializer
except Exception:
variance_scaling_initializer = None
from .utils import get_from_module
def get(identifier):
if hasattr(identifier, '__call__'):
return identifier
else:
return get_from_module(identifier, globals(), 'initialization')
def zeros(shape=None, dtype=tf.float32, seed=None):
""" Zeros.
Initialize a tensor with all elements set to zero.
Arguments:
shape: List of `int`. A shape to initialize a Tensor (optional).
dtype: The tensor data type.
Returns:
The Initializer, or an initialized `Tensor` if a shape is specified.
"""
if shape:
return tf.zeros(shape, dtype=dtype)
else:
return tf.constant_initializer(0.)
def uniform(shape=None, minval=0, maxval=None, dtype=tf.float32, seed=None):
""" Uniform.
Initialization with random values from a uniform distribution.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range,
while the upper bound `maxval` is excluded.
For floats, the default range is `[0, 1)`. For ints, at least `maxval`
must be specified explicitly.
In the integer case, the random integers are slightly biased unless
`maxval - minval` is an exact power of two. The bias is small for values of
`maxval - minval` significantly smaller than the range of the output (either
`2**32` or `2**64`).
Arguments:
shape: List of `int`. A shape to initialize a Tensor (optional).
dtype: The tensor data type. Only float are supported.
seed: `int`. Used to create a random seed for the distribution.
Returns:
The Initializer, or an initialized `Tensor` if shape is specified.
"""
if shape:
return tf.random_uniform(shape, minval=minval, maxval=maxval,
seed=seed, dtype=dtype)
else:
return tf.random_uniform_initializer(minval=minval, maxval=maxval,
seed=seed, dtype=dtype)
def uniform_scaling(shape=None, factor=1.0, dtype=tf.float32, seed=None):
""" Uniform Scaling.
Initialization with random values from uniform distribution without scaling
variance.
When initializing a deep network, it is in principle advantageous to keep
the scale of the input variance constant, so it does not explode or diminish
by reaching the final layer. If the input is `x` and the operation `x * W`,
and we want to initialize `W` uniformly at random, we need to pick `W` from
[-sqrt(3) / sqrt(dim), sqrt(3) / sqrt(dim)]
to keep the scale intact, where `dim = W.shape[0]` (the size of the input).
A similar calculation for convolutional networks gives an analogous result
with `dim` equal to the product of the first 3 dimensions. When
nonlinearities are present, we need to multiply this by a constant `factor`.
See [Sussillo et al., 2014](https://arxiv.org/abs/1412.6558)
([pdf](http://arxiv.org/pdf/1412.6558.pdf)) for deeper motivation, experiments
and the calculation of constants. In section 2.3 there, the constants were
numerically computed: for a linear layer it's 1.0, relu: ~1.43, tanh: ~1.15.
Arguments:
shape: List of `int`. A shape to initialize a Tensor (optional).
factor: `float`. A multiplicative factor by which the values will be
scaled.
dtype: The tensor data type. Only float are supported.
seed: `int`. Used to create a random seed for the distribution.
Returns:
The Initializer, or an initialized `Tensor` if shape is specified.
"""
if shape:
input_size = 1.0
for dim in shape[:-1]:
input_size *= float(dim)
max_val = math.sqrt(3 / input_size) * factor
return tf.random_ops.random_uniform(shape, -max_val, max_val,
dtype, seed=seed)
else:
return tf.uniform_unit_scaling_initializer(seed=seed, dtype=dtype)
def normal(shape=None, mean=0.0, stddev=0.02, dtype=tf.float32, seed=None):
""" Normal.
Initialization with random values from a normal distribution.
Arguments:
shape: List of `int`. A shape to initialize a Tensor (optional).
mean: Same as `dtype`. The mean of the truncated normal distribution.
stddev: Same as `dtype`. The standard deviation of the truncated
normal distribution.
dtype: The tensor data type.
seed: `int`. Used to create a random seed for the distribution.
Returns:
The Initializer, or an initialized `Tensor` if shape is specified.
"""
if shape:
return tf.random_normal(shape, mean=mean, stddev=stddev, seed=seed,
dtype=dtype)
else:
return tf.random_normal_initializer(mean=mean, stddev=stddev,
seed=seed, dtype=dtype)
def truncated_normal(shape=None, mean=0.0, stddev=0.02, dtype=tf.float32,
seed=None):
""" Truncated Normal.
Initialization with random values from a normal truncated distribution.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than 2 standard
deviations from the mean are dropped and re-picked.
Arguments:
shape: List of `int`. A shape to initialize a Tensor (optional).
mean: Same as `dtype`. The mean of the truncated normal distribution.
stddev: Same as `dtype`. The standard deviation of the truncated
normal distribution.
dtype: The tensor data type.
seed: `int`. Used to create a random seed for the distribution.
Returns:
The Initializer, or an initialized `Tensor` if shape is specified.
"""
if shape:
return tf.truncated_normal(shape=shape, mean=mean, stddev=stddev,
seed=seed, dtype=dtype)
else:
return tf.truncated_normal_initializer(mean=mean, stddev=stddev,
seed=seed, dtype=dtype)
def xavier(uniform=True, seed=None, dtype=tf.float32):
""" Xavier.
Returns an initializer performing "Xavier" initialization for weights.
This initializer is designed to keep the scale of the gradients roughly the
same in all layers. In uniform distribution this ends up being the range:
`x = sqrt(6. / (in + out)); [-x, x]` and for normal distribution a standard
deviation of `sqrt(3. / (in + out))` is used.
Arguments:
uniform: Whether to use uniform or normal distributed random
initialization.
seed: A Python integer. Used to create random seeds. See
`set_random_seed` for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer for a weight matrix.
References:
Understanding the difficulty of training deep feedforward neural
networks. International conference on artificial intelligence and
statistics. Xavier Glorot and Yoshua Bengio (2010).
Links:
[http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf]
(http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)
"""
if xavier_initializer is None:
raise NotImplementedError("'xavier_initializer' not supported, "
"please update TensorFlow.")
return xavier_initializer(uniform=uniform, seed=seed, dtype=dtype)
def variance_scaling(factor=2.0, mode='FAN_IN', uniform=False, seed=None,
dtype=tf.float32):
""" Variance Scaling.
Returns an initializer that generates tensors without scaling variance.
When initializing a deep network, it is in principle advantageous to keep
the scale of the input variance constant, so it does not explode or diminish
by reaching the final layer. This initializer use the following formula:
```
if mode='FAN_IN': # Count only number of input connections.
n = fan_in
elif mode='FAN_OUT': # Count only number of output connections.
n = fan_out
elif mode='FAN_AVG': # Average number of inputs and output connections.
n = (fan_in + fan_out)/2.0
truncated_normal(shape, 0.0, stddev=sqrt(factor / n))
```
To get http://arxiv.org/pdf/1502.01852v1.pdf use (Default):
- factor=2.0 mode='FAN_IN' uniform=False
To get http://arxiv.org/abs/1408.5093 use:
- factor=1.0 mode='FAN_IN' uniform=True
To get http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf use:
- factor=1.0 mode='FAN_AVG' uniform=True.
To get xavier_initializer use either:
- factor=1.0 mode='FAN_AVG' uniform=True.
- factor=1.0 mode='FAN_AVG' uniform=False.
Arguments:
factor: Float. A multiplicative factor.
mode: String. 'FAN_IN', 'FAN_OUT', 'FAN_AVG'.
uniform: Whether to use uniform or normal distributed random
initialization.
seed: A Python integer. Used to create random seeds. See
`set_random_seed` for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer that generates tensors with unit variance.
Raises:
ValueError: if `dtype` is not a floating point type.
TypeError: if `mode` is not in ['FAN_IN', 'FAN_OUT', 'FAN_AVG'].
"""
if variance_scaling_initializer is None:
raise NotImplementedError("'variance_scaling_initializer' not "
"supported, please update TensorFlow.")
return variance_scaling_initializer(factor=factor, mode=mode,
uniform=uniform, seed=seed,
dtype=dtype)
|
agpl-3.0
| -1,623,474,419,808,075,000
| 37.537594
| 82
| 0.651546
| false
| 4.116867
| false
| false
| false
|
AlexOugh/horizon
|
openstack_dashboard/dashboards/nikolaboard/usagepanel/tabs.py
|
1
|
1155
|
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
from openstack_dashboard.dashboards.nikolaboard.usagepanel import tables
class UsageTab(tabs.TableTab):
name = _("List")
slug = "usage_tab"
table_classes = (tables.UsageTable,)
template_name = ("horizon/common/_detail_table.html")
preload = False
def has_more_data(self, table):
return self._has_more
def get_usage_data(self):
try:
marker = self.request.GET.get(
tables.UsageTable._meta.pagination_param, None)
usages, self._has_more, has_prev_data = api.nikola.usage.list_usages(
self.request,
search_opts={'marker': marker, 'paginate': True})
return usages
except Exception:
self._has_more = False
error_message = _('Unable to get usages')
exceptions.handle(self.request, error_message)
return []
class UsagepanelTabs(tabs.TabGroup):
slug = "usagepanel_tabs"
tabs = (UsageTab,)
sticky = True
|
apache-2.0
| 1,395,974,317,262,188,300
| 28.615385
| 81
| 0.62684
| false
| 4.066901
| false
| false
| false
|
eaudeweb/lcc-toolkit
|
lcc/views/legislation.py
|
1
|
24257
|
import json
import operator
from functools import reduce
from django import views
from django.conf import settings
from django.contrib.auth import mixins
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q as DjQ, IntegerField
from django.db.models.functions import Cast
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.views.generic import (
ListView, CreateView, DetailView, UpdateView, DeleteView
)
from elasticsearch_dsl import Q
from lcc import models, constants, forms
from lcc.constants import LEGISLATION_YEAR_RANGE
from lcc.documents import LegislationDocument
from lcc.views.base import TagGroupRender, TaxonomyFormMixin
from lcc.views.country import (
CountryMetadataFiltering,
POP_RANGES,
HDI_RANGES,
GDP_RANGES,
GHG_LUCF,
GHG_NO_LUCF,
)
CONN = settings.TAXONOMY_CONNECTOR
class HighlightedLaws:
"""
This class wraps a Search instance and is compatible with Django's
pagination API.
"""
def __init__(self, search, sort=None):
self.search = search
self.sort = sort
def __getitem__(self, key):
hits = self.search[key]
if self.sort:
return hits.sort(self.sort).to_queryset()
laws = []
matched_article_tags = []
matched_article_classifications = []
for hit, law in zip(hits, hits.to_queryset()):
if hasattr(hit.meta, 'highlight'):
highlights = hit.meta.highlight.to_dict()
if 'abstract' in highlights:
law._highlighted_abstract = mark_safe(
' […] '.join(highlights['abstract'])
)
if 'pdf_text' in highlights:
law._highlighted_pdf_text = mark_safe(
' […] '.join(
highlights['pdf_text']
).replace('<pre>', '').replace('</pre>', '')
)
if 'title' in highlights:
law._highlighted_title = mark_safe(highlights['title'][0])
if 'classifications' in highlights:
law._highlighted_classifications = [
mark_safe(classification)
for classification in (
highlights['classifications'][0].split(CONN))
]
if 'article_classifications' in highlights:
matched_article_classifications += [
tag[4:-5] for tag in (
highlights['article_classifications'][0].split(CONN))
if '<em>' in tag
]
if 'tags' in highlights:
law._highlighted_tags = [
mark_safe(tag)
for tag in highlights['tags'][0].split(CONN)
]
if 'article_tags' in highlights:
matched_article_tags += [
tag[4:-5] for tag in (
highlights['article_tags'][0].split(CONN))
if '<em>' in tag
]
if hasattr(hit.meta, 'inner_hits'):
law._highlighted_articles = []
if hit.meta.inner_hits.articles:
for article in hit.meta.inner_hits.articles.hits:
article_dict = {
'pk': article.pk,
'code': article.code
}
if not hasattr(article.meta, 'highlight'):
continue
highlights = article.meta.highlight.to_dict()
matched_text = highlights.get('articles.text')
if matched_text:
article_dict['text'] = mark_safe(
' […] '.join(matched_text)
)
matched_classifications = (
highlights.get(
'articles.classifications_text')
)
if matched_classifications:
article_dict['classifications'] = [
mark_safe(classification)
for classification in (
matched_classifications[0].split(CONN))
]
matched_tags = highlights.get(
'articles.tags_text')
if matched_tags:
article_dict['tags'] = [
mark_safe(tag)
for tag in (
matched_tags[0].split(CONN))
]
law._highlighted_articles.append(article_dict)
elif matched_article_classifications or matched_article_tags:
# NOTE: This is a hack. ElasticSearch won't return
# highlighted article tags in some cases so this workaround
# is necessary. Please fix if you know how. Try searching
# for a keyword that is in the title of a law, and filtering
# by a tag that is assigned to an article of that law, but
# not the law itself. The query will work (it will only
# return the law that has such an article, and not others),
# but the inner_hits will be empty.
law._highlighted_articles = []
articles = law.articles.filter(
DjQ(tags__name__in=matched_article_tags) |
DjQ(
classifications__name__in=(
matched_article_classifications)
)
).prefetch_related('tags')
for article in articles:
article_dict = {
'pk': article.pk,
'code': article.code,
'classifications': [
mark_safe('<em>{}</em>'.format(cl.name))
if cl.name in matched_article_classifications
else cl.name
for cl in article.classifications.all()
],
'tags': [
mark_safe('<em>{}</em>'.format(tag.name))
if tag.name in matched_article_tags
else tag.name
for tag in article.tags.all()
]
}
law._highlighted_articles.append(article_dict)
laws.append(law)
return laws
def count(self):
return self.search.count()
class LegislationExplorer(CountryMetadataFiltering, ListView):
template_name = "legislation/explorer.html"
model = models.Legislation
def get_sort(self):
promulgation_sort = self.request.GET.get("promulgation_sort")
country_sort = self.request.GET.get("country_sort")
if promulgation_sort:
if promulgation_sort == '1':
return 'year'
else:
return '-year'
if country_sort:
if country_sort == '1':
return 'country_name'
else:
return '-country_name'
def get_queryset(self):
"""
Perform filtering using ElasticSearch instead of Postgres.
Note that this DOES NOT return a QuerySet object, it returms a Page
object instead. This is necessary because by transforming an
elasticsearch-dsl Search object into a QuerySet a lot of functionality
is lost, so we need to make things a bit more custom.
"""
law_queries = []
article_queries = []
article_highlights = {}
# jQuery's ajax function ads `[]` to duplicated querystring parameters
# or parameters whose values are objects, so we have to take that into
# account when looking for our values in the querystring. More into at:
# - http://api.jquery.com/jQuery.param/
# List of strings representing TaxonomyClassification ids
classification_ids = [
int(pk) for pk in self.request.GET.getlist('classifications[]')]
if classification_ids:
classification_names = models.TaxonomyClassification.objects.filter(
pk__in=classification_ids).values_list('name', flat=True)
# Search root document for any of the classifications received
law_queries.append(
reduce(
operator.or_,
[
Q('match_phrase', classifications=name)
for name in classification_names
]
) | reduce(
operator.or_,
[
Q('match_phrase', article_classifications=name)
for name in classification_names
]
)
)
# Search inside articles for any classifications
article_queries.append(
reduce(
operator.or_,
[
Q(
'match_phrase',
articles__classifications_text=name
) for name in classification_names
]
) | reduce(
operator.or_,
[
Q(
'match_phrase',
articles__parent_classifications=name
) for name in classification_names
]
)
)
article_highlights['articles.classifications_text'] = {
'number_of_fragments': 0
}
# List of strings representing TaxonomyTag ids
tag_ids = [int(pk) for pk in self.request.GET.getlist('tags[]')]
if tag_ids:
tag_names = models.TaxonomyTag.objects.filter(
pk__in=tag_ids).values_list('name', flat=True)
# Search root document
law_queries.append(
reduce(
operator.or_,
[
Q('match_phrase', tags=name)
for name in tag_names
]
) | reduce(
operator.or_,
[
Q('match_phrase', article_tags=name)
for name in tag_names
]
)
)
# Search inside articles
article_queries.append(
reduce(
operator.or_,
[
Q('match_phrase', articles__tags_text=name)
for name in tag_names
]
) | reduce(
operator.or_,
[
Q('match_phrase', articles__parent_tags=name)
for name in tag_names
]
)
)
article_highlights['articles.tags_text'] = {
'number_of_fragments': 0
}
# String to be searched in all text fields (full-text search using
# elasticsearch's default best_fields strategy)
q = self.request.GET.get('q')
law_q_query = []
article_q_query = []
if q:
# Compose root document search
law_q_query = [
Q(
'multi_match', query=q, fields=[
'title', 'abstract', 'pdf_text', 'classifications',
'tags'
]
)
]
# Compose nested document search inside articles
article_q_query = [
Q('multi_match', query=q, fields=['articles.text']) |
Q(
'constant_score', boost=50, filter={
"match_phrase": {
"articles.text": q
}
}
)
]
article_q_highlights = {'articles.text': {}}
search = LegislationDocument.search()
sort = self.get_sort()
if not sort:
if q:
q_in_law = Q(
'bool', must=law_queries + law_q_query + ([
Q(
'nested',
score_mode='max',
# boost=10,
path='articles',
query=Q(
reduce(
operator.and_,
article_queries
)
),
inner_hits={
'highlight': {'fields': article_highlights}
}
)
] if article_queries else [])
)
q_in_article = Q(
'bool', must=law_queries + ([
Q(
'nested',
score_mode='max',
# boost=10,
path='articles',
query=Q(
reduce(
operator.and_,
article_queries + article_q_query
)
),
inner_hits={
'highlight': {
'fields': {
**article_highlights,
**article_q_highlights
}
}
}
)
] if article_queries or article_q_query else [])
)
search = search.query(q_in_law | q_in_article).highlight(
'abstract', 'pdf_text'
)
else:
root_query = [Q(
reduce(
operator.and_,
law_queries
)
)] if law_queries else []
nested_query = [Q(
'nested',
score_mode='max',
# boost=10,
path='articles',
query=Q(
reduce(
operator.and_,
article_queries
)
),
inner_hits={
'highlight': {'fields': article_highlights}
}
)] if article_queries else []
final_query = []
if root_query:
final_query += root_query
if nested_query:
# Necessary for highlights
final_query += root_query and nested_query
if final_query:
search = search.query(
'bool', should=final_query,
minimum_should_match=1
)
# String representing country iso code
countries = self.request.GET.getlist('countries[]')
selected_countries = False
if countries:
selected_countries = True
filtering_countries = self.filter_countries(self.request, selected_countries=selected_countries)
if countries or filtering_countries.count() != models.Country.objects.all().count():
countries.extend([country.iso for country in filtering_countries])
search = search.query('terms', country=countries)
# String representing law_type
law_types = self.request.GET.getlist('law_types[]')
if law_types:
search = search.query('terms', law_type=law_types)
# String representing the minimum year allowed in the results
from_year = self.request.GET.get('from_year')
# String representing the maximum year allowed in the results
to_year = self.request.GET.get('to_year')
if all([from_year, to_year]):
search = search.query(
Q('range', year={'gte': int(from_year), 'lte': int(to_year)}) |
Q('range', year_amendment={
'gte': int(from_year), 'lte': int(to_year)}) |
Q('range', year_mentions={
'gte': int(from_year), 'lte': int(to_year)})
)
search = search.highlight(
'title', 'classifications', 'article_classifications', 'tags',
'article_tags', number_of_fragments=0
)
if not any([classification_ids, tag_ids, q]):
# If there is no score to sort by, sort by id
search = search.sort('id')
# import json; print(json.dumps(search.to_dict(), indent=2))
all_laws = HighlightedLaws(search, sort)
paginator = Paginator(all_laws, settings.LAWS_PER_PAGE)
page = self.request.GET.get('page', 1)
try:
laws = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
laws = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
laws = paginator.page(paginator.num_pages)
return laws
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
group_tags = models.TaxonomyTagGroup.objects.all()
top_classifications = models.TaxonomyClassification.objects.filter(
level=0).annotate(
code_as_int=Cast('code', output_field=IntegerField())
).order_by('code_as_int')
countries = models.Country.objects.all().order_by('name')
regions = models.Region.objects.all().order_by('name')
sub_regions = models.SubRegion.objects.all().order_by('name')
legal_systems = models.LegalSystem.objects.all().order_by('name')
laws = self.object_list
legislation_year = (
LEGISLATION_YEAR_RANGE[0],
LEGISLATION_YEAR_RANGE[len(LEGISLATION_YEAR_RANGE) - 1]
)
filters_dict = dict(self.request.GET)
context.update({
'laws': laws,
'group_tags': group_tags,
'top_classifications': top_classifications,
'countries': countries,
'regions': regions,
'sub_regions': sub_regions,
'legal_systems': legal_systems,
'population': POP_RANGES,
'hdi2015': HDI_RANGES,
'gdp_capita': GDP_RANGES,
'ghg_no_lucf': GHG_NO_LUCF,
'ghg_lucf': GHG_LUCF,
'legislation_type': constants.LEGISLATION_TYPE,
'legislation_year': legislation_year,
'min_year': settings.MIN_YEAR,
'max_year': settings.MAX_YEAR,
'from_year': filters_dict.pop('from_year', [settings.MIN_YEAR])[0],
'to_year': filters_dict.pop('to_year', [settings.MAX_YEAR])[0],
'filters': json.dumps(filters_dict)
})
return context
class LegislationAdd(mixins.LoginRequiredMixin, TaxonomyFormMixin,
CreateView):
template_name = "legislation/add.html"
form_class = forms.LegislationForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
countries = sorted(models.Country.objects.all(), key=lambda c: c.name)
context.update({
"countries": countries,
"legislation_type": constants.LEGISLATION_TYPE,
"tag_groups": [
TagGroupRender(tag_group)
for tag_group in models.TaxonomyTagGroup.objects.all()
],
"available_languages": constants.ALL_LANGUAGES,
"source_types": constants.SOURCE_TYPE,
"geo_coverage": constants.GEOGRAPHICAL_COVERAGE,
"adoption_years": LEGISLATION_YEAR_RANGE,
"classifications": models.TaxonomyClassification.objects.filter(
level=0).order_by('code')
})
return context
def form_valid(self, form):
legislation = form.save()
legislation.save_pdf_pages()
if "save-and-continue-btn" in self.request.POST:
return HttpResponseRedirect(
reverse('lcc:legislation:articles:add',
kwargs={'legislation_pk': legislation.pk})
)
if "save-btn" in self.request.POST:
return HttpResponseRedirect(reverse("lcc:legislation:explorer"))
class LegislationView(DetailView):
template_name = "legislation/detail.html"
pk_url_kwarg = 'legislation_pk'
model = models.Legislation
context_object_name = 'law'
class LegislationPagesView(views.View):
def get(self, request, *args, **kwargs):
law = get_object_or_404(models.Legislation,
pk=kwargs['legislation_pk'])
pages = law.pages.all()
content = {}
for page in pages:
content[page.page_number] = page.page_text
return JsonResponse(content)
class LegislationEditView(mixins.LoginRequiredMixin, TaxonomyFormMixin,
UpdateView):
template_name = "legislation/edit.html"
model = models.Legislation
form_class = forms.LegislationForm
pk_url_kwarg = 'legislation_pk'
context_object_name = 'law'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
countries = sorted(models.Country.objects.all(), key=lambda c: c.name)
context.update({
"countries": countries,
"available_languages": constants.ALL_LANGUAGES,
"legislation_type": constants.LEGISLATION_TYPE,
"tag_groups": [
TagGroupRender(tag_group)
for tag_group in models.TaxonomyTagGroup.objects.all()
],
"classifications": models.TaxonomyClassification.objects.filter(
level=0).order_by('code'),
"adoption_years": LEGISLATION_YEAR_RANGE,
"source_types": constants.SOURCE_TYPE,
"geo_coverage": constants.GEOGRAPHICAL_COVERAGE,
})
return context
def form_valid(self, form):
legislation = form.save()
if 'pdf_file' in self.request.FILES:
models.LegislationPage.objects.filter(
legislation=legislation).delete()
legislation.save_pdf_pages()
return HttpResponseRedirect(
reverse('lcc:legislation:details',
kwargs={'legislation_pk': legislation.pk})
)
class LegislationDeleteView(mixins.LoginRequiredMixin, DeleteView):
model = models.Legislation
pk_url_kwarg = 'legislation_pk'
def get_success_url(self, **kwargs):
return reverse("lcc:legislation:explorer")
def get(self, *args, **kwargs):
return self.post(*args, **kwargs)
|
gpl-3.0
| 4,738,806,645,539,076,000
| 38.3047
| 108
| 0.474207
| false
| 4.8502
| false
| false
| false
|
leakim/svtplay-dl
|
lib/svtplay_dl/service/tv4play.py
|
1
|
7249
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import
import re
import os
import xml.etree.ElementTree as ET
import json
import copy
from svtplay_dl.utils.urllib import urlparse, parse_qs, quote_plus
from svtplay_dl.service import Service, OpenGraphThumbMixin
from svtplay_dl.utils import is_py2_old, filenamify
from svtplay_dl.log import log
from svtplay_dl.fetcher.hls import hlsparse, HLS
from svtplay_dl.fetcher.rtmp import RTMP
from svtplay_dl.fetcher.hds import hdsparse
from svtplay_dl.subtitle import subtitle
from svtplay_dl.error import ServiceError
class Tv4play(Service, OpenGraphThumbMixin):
supported_domains = ['tv4play.se', 'tv4.se']
def __init__(self, url):
Service.__init__(self, url)
self.subtitle = None
self.cookies = {}
def get(self, options):
data = self.get_urldata()
vid = findvid(self.url, data)
if vid is None:
yield ServiceError("Can't find video id for %s" % self.url)
return
if options.username and options.password:
data = self.http.request("get", "https://www.tv4play.se/session/new?https=")
auth_token = re.search('name="authenticity_token" ([a-z]+="[^"]+" )?value="([^"]+)"', data.text)
if not auth_token:
yield ServiceError("Can't find authenticity_token needed for user / password")
return
url = "https://www.tv4play.se/session"
postdata = {"user_name" : options.username, "password": options.password, "authenticity_token":auth_token.group(2), "https": ""}
data = self.http.request("post", url, data=postdata, cookies=self.cookies)
self.cookies = data.cookies
fail = re.search("<p class='failed-login'>([^<]+)</p>", data.text)
if fail:
yield ServiceError(fail.group(1))
return
url = "http://premium.tv4play.se/api/web/asset/%s/play" % vid
data = self.http.request("get", url, cookies=self.cookies)
if data.status_code == 401:
xml = ET.XML(data.content)
code = xml.find("code").text
if code == "SESSION_NOT_AUTHENTICATED":
yield ServiceError("Can't access premium content")
elif code == "ASSET_PLAYBACK_INVALID_GEO_LOCATION":
yield ServiceError("Can't downoad this video because of geoblocked.")
else:
yield ServiceError("Can't find any info for that video")
return
if data.status_code == 404:
yield ServiceError("Can't find the video api")
return
xml = ET.XML(data.content)
ss = xml.find("items")
if is_py2_old:
sa = list(ss.getiterator("item"))
else:
sa = list(ss.iter("item"))
if xml.find("live").text:
if xml.find("live").text != "false":
options.live = True
if xml.find("drmProtected").text == "true":
yield ServiceError("We cant download DRM protected content from this site.")
return
if options.output_auto:
directory = os.path.dirname(options.output)
options.service = "tv4play"
title = "%s-%s-%s" % (options.output, vid, options.service)
title = filenamify(title)
if len(directory):
options.output = os.path.join(directory, title)
else:
options.output = title
if self.exclude(options):
yield ServiceError("Excluding video")
return
for i in sa:
if i.find("mediaFormat").text == "mp4":
base = urlparse(i.find("base").text)
parse = urlparse(i.find("url").text)
if "rtmp" in base.scheme:
swf = "http://www.tv4play.se/flash/tv4playflashlets.swf"
options.other = "-W %s -y %s" % (swf, i.find("url").text)
yield RTMP(copy.copy(options), i.find("base").text, i.find("bitrate").text)
elif parse.path[len(parse.path)-3:len(parse.path)] == "f4m":
streams = hdsparse(copy.copy(options), self.http.request("get", i.find("url").text, params={"hdcore": "3.7.0"}).text, i.find("url").text)
if streams:
for n in list(streams.keys()):
yield streams[n]
elif i.find("mediaFormat").text == "smi":
yield subtitle(copy.copy(options), "smi", i.find("url").text)
url = "http://premium.tv4play.se/api/web/asset/%s/play?protocol=hls" % vid
data = self.http.request("get", url, cookies=self.cookies).content
xml = ET.XML(data)
ss = xml.find("items")
if is_py2_old:
sa = list(ss.getiterator("item"))
else:
sa = list(ss.iter("item"))
for i in sa:
if i.find("mediaFormat").text == "mp4":
parse = urlparse(i.find("url").text)
if parse.path.endswith("m3u8"):
streams = hlsparse(i.find("url").text, self.http.request("get", i.find("url").text).text)
for n in list(streams.keys()):
yield HLS(copy.copy(options), streams[n], n)
def find_all_episodes(self, options):
parse = urlparse(self.url)
show = parse.path[parse.path.find("/", 1)+1:]
if not re.search("%", show):
show = quote_plus(show)
data = self.http.request("get", "http://webapi.tv4play.se/play/video_assets?type=episode&is_live=false&platform=web&node_nids=%s&per_page=99999" % show).text
jsondata = json.loads(data)
episodes = []
n = 1
for i in jsondata["results"]:
try:
days = int(i["availability"]["availability_group_free"])
except (ValueError, TypeError):
days = 999
if days > 0:
video_id = i["id"]
url = "http://www.tv4play.se/program/%s?video_id=%s" % (
show, video_id)
episodes.append(url)
if n == options.all_last:
break
n += 1
return episodes
def findvid(url, data):
parse = urlparse(url)
if "tv4play.se" in url:
try:
vid = parse_qs(parse.query)["video_id"][0]
except KeyError:
return None
else:
match = re.search(r"\"vid\":\"(\d+)\",", data)
if match:
vid = match.group(1)
else:
match = re.search(r"-(\d+)$", url)
if match:
vid = match.group(1)
else:
match = re.search(r"meta content='([^']+)' property='og:video'", data)
if match:
match = re.search(r"vid=(\d+)&", match.group(1))
if match:
vid = match.group(1)
else:
log.error("Can't find video id for %s", url)
return
else:
return None
return vid
|
mit
| 4,481,248,987,106,100,000
| 39.730337
| 165
| 0.531522
| false
| 3.769631
| false
| false
| false
|
Lorquas/subscription-manager
|
test/test_cache.py
|
1
|
45245
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
#
# Copyright (c) 2011 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
try:
import unittest2 as unittest
except ImportError:
import unittest
import os
import logging
import random
import shutil
import socket
import tempfile
import time
from mock import Mock, patch, mock_open
# used to get a user readable cfg class for test cases
from .stubs import StubProduct, StubProductCertificate, StubCertificateDirectory, \
StubEntitlementCertificate, StubPool, StubEntitlementDirectory
from .fixture import SubManFixture
from rhsm import ourjson as json
from subscription_manager.cache import ProfileManager, \
InstalledProductsManager, EntitlementStatusCache, \
PoolTypeCache, ReleaseStatusCache, ContentAccessCache, \
PoolStatusCache, ContentAccessModeCache, SupportedResourcesCache, \
AvailableEntitlementsCache
from rhsm.profile import Package, RPMProfile, EnabledReposProfile, ModulesProfile
from rhsm.connection import RestlibException, UnauthorizedException, \
RateLimitExceededException
from subscription_manager import injection as inj
from subscription_manager import isodate, cache
log = logging.getLogger(__name__)
class _FACT_MATCHER(object):
def __eq__(self, other):
return True
FACT_MATCHER = _FACT_MATCHER()
CONTENT_REPO_FILE = """
[awesome-os-for-x86_64-upstream-rpms]
name = Awesome OS for x86_64 - Upstream (RPMs)
baseurl = https://cdn.awesome.com/content/dist/awesome/$releasever/x86_64/upstream/os
enabled = 1
gpgcheck = 1
gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-awesome-release
sslverify = 1
sslcacert = /etc/rhsm/ca/awesome-uep.pem
sslclientkey = /etc/pki/entitlement/0123456789012345678-key.pem
sslclientcert = /etc/pki/entitlement/0123456789012345678.pem
metadata_expire = 86400
ui_repoid_vars = releasever
[awesome-os-for-x86_64-debug-rpms]
name = Awesome OS for x86_64 - Debug (RPMs)
baseurl = https://cdn.awesome.com/content/dist/awesome/$releasever/x86_64/upstream/debug
enabled = 0
gpgcheck = 1
gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-awesome-release
sslverify = 1
sslcacert = /etc/rhsm/ca/awesome-uep.pem
sslclientkey = /etc/pki/entitlement/0123456789012345678-key.pem
sslclientcert = /etc/pki/entitlement/0123456789012345678.pem
metadata_expire = 86400
ui_repoid_vars = releasever
"""
ENABLED_MODULES = [
{
"name": "duck",
"stream": 0,
"version": "20180730233102",
"context": "deadbeef",
"arch": "noarch",
"profiles": ["default"],
"installed_profiles": [],
"status": "enabled"
},
{
"name": "flipper",
"stream": 0.69,
"version": "20180707144203",
"context": "c0ffee42",
"arch": "x86_64",
"profiles": ["default", "server"],
"installed_profiles": ["server"],
"status": "unknown"
}
]
class TestProfileManager(unittest.TestCase):
def setUp(self):
current_pkgs = [
Package(name="package1", version="1.0.0", release=1, arch="x86_64"),
Package(name="package2", version="2.0.0", release=2, arch="x86_64")
]
temp_repo_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, temp_repo_dir)
repo_file_name = os.path.join(temp_repo_dir, 'awesome.repo')
with open(repo_file_name, 'w') as repo_file:
repo_file.write(CONTENT_REPO_FILE)
patcher = patch('rhsm.profile.dnf')
self.addCleanup(patcher.stop)
dnf_mock = patcher.start()
dnf_mock.dnf = Mock()
mock_db = Mock()
mock_db.conf = Mock()
mock_db.conf.substitutions = {'releasever': '1', 'basearch': 'x86_64'}
dnf_mock.dnf.Base = Mock(return_value=mock_db)
self.current_profile = self._mock_pkg_profile(current_pkgs, repo_file_name, ENABLED_MODULES)
self.profile_mgr = ProfileManager()
self.profile_mgr.current_profile = self.current_profile
@patch('subscription_manager.cache.get_supported_resources')
def test_update_check_no_change(self, mock_get_supported_resources):
mock_get_supported_resources.return_value = ['packages']
uuid = 'FAKEUUID'
uep = Mock()
uep.updatePackageProfile = Mock()
self.profile_mgr.has_changed = Mock(return_value=False)
self.profile_mgr.write_cache = Mock()
self.profile_mgr.update_check(uep, uuid)
self.assertEqual(0, uep.updatePackageProfile.call_count)
self.assertEqual(0, self.profile_mgr.write_cache.call_count)
@patch('subscription_manager.cache.get_supported_resources')
def test_update_check_has_changed(self, mock_get_supported_resources):
mock_get_supported_resources.return_value = ['packages']
uuid = 'FAKEUUID'
uep = Mock()
uep.has_capability = Mock(return_value=False)
uep.updatePackageProfile = Mock()
self.profile_mgr.has_changed = Mock(return_value=True)
self.profile_mgr.write_cache = Mock()
self.profile_mgr.update_check(uep, uuid, True)
uep.updatePackageProfile.assert_called_with(uuid,
FACT_MATCHER)
self.assertEqual(1, self.profile_mgr.write_cache.call_count)
@patch('subscription_manager.cache.get_supported_resources')
def test_combined_profile_update_check_has_changed(self, mock_get_supported_resources):
mock_get_supported_resources.return_value = ["packages"]
uuid = 'FAKEUUID'
uep = Mock()
uep.has_capability = Mock(return_value=True)
uep.updateCombinedProfile = Mock()
self.profile_mgr.has_changed = Mock(return_value=True)
self.profile_mgr.write_cache = Mock()
self.profile_mgr.update_check(uep, uuid, True)
uep.updateCombinedProfile.assert_called_with(uuid,
FACT_MATCHER)
self.assertEqual(1, self.profile_mgr.write_cache.call_count)
@patch('subscription_manager.cache.get_supported_resources')
def test_update_check_packages_not_supported(self, mock_get_supported_resources):
# support anything else but not 'packages'
mock_get_supported_resources.return_value = ['foo', 'bar']
uuid = 'FAKEUUID'
uep = Mock()
uep.updatePackageProfile = Mock()
self.profile_mgr.has_changed = Mock(return_value=True)
self.profile_mgr.write_cache = Mock()
self.profile_mgr.update_check(uep, uuid)
self.assertEqual(0, uep.updatePackageProfile.call_count)
mock_get_supported_resources.assert_called_once()
self.assertEqual(0, self.profile_mgr.write_cache.call_count)
@patch('subscription_manager.cache.get_supported_resources')
def test_update_check_packages_disabled(self, mock_get_supported_resources):
mock_get_supported_resources.return_value = ['packages']
uuid = 'FAKEUUID'
uep = Mock()
self.profile_mgr.report_package_profile = 0
uep.updatePackageProfile = Mock()
self.profile_mgr.has_changed = Mock(return_value=True)
self.profile_mgr.write_cache = Mock()
self.profile_mgr.update_check(uep, uuid)
self.assertEqual(0, uep.updatePackageProfile.call_count)
mock_get_supported_resources.assert_called_once()
self.assertEqual(0, self.profile_mgr.write_cache.call_count)
def test_report_package_profile_environment_variable(self):
with patch.dict('os.environ', {'SUBMAN_DISABLE_PROFILE_REPORTING': '1'}), \
patch.object(cache, 'conf') as conf:
# report_package_profile is set to 1 and SUBMAN_DISABLE_PROFILE_REPORTING is set to 1, the
# package profile should not be reported.
conf.__getitem__.return_value.get_int.return_value = 1
self.assertFalse(self.profile_mgr.profile_reporting_enabled())
# report_package_profile in rhsm.conf is set to 0 and SUBMAN_DISABLE_PROFILE_REPORTING is set
# to 1, the package profile should not be reported.
conf.__getitem__.return_value.get_int.return_value = 0
self.assertFalse(self.profile_mgr.profile_reporting_enabled())
with patch.dict('os.environ', {'SUBMAN_DISABLE_PROFILE_REPORTING': '0'}), \
patch.object(cache, 'conf') as conf:
# report_package_profile in rhsm.conf is set to 1 and SUBMAN_DISABLE_PROFILE_REPORTING is set
# to 0, the package profile should be reported.
conf.__getitem__.return_value.get_int.return_value = 1
self.assertTrue(self.profile_mgr.profile_reporting_enabled())
# report_package_profile in rhsm.conf is set to 0 and SUBMAN_DISABLE_PROFILE_REPORTING is set
# to 0, the package profile should not be reported.
conf.__getitem__.return_value.get_int.return_value = 0
self.assertFalse(self.profile_mgr.profile_reporting_enabled())
with patch.dict('os.environ', {}), patch.object(cache, 'conf') as conf:
# report_package_profile in rhsm.conf is set to 1 and SUBMAN_DISABLE_PROFILE_REPORTING is not
# set, the package profile should be reported.
conf.__getitem__.return_value.get_int.return_value = 1
self.assertTrue(self.profile_mgr.profile_reporting_enabled())
# report_package_profile in rhsm.conf is set to 0 and SUBMAN_DISABLE_PROFILE_REPORTING is not
# set, the package profile should not be reported.
conf.__getitem__.return_value.get_int.return_value = 0
self.assertFalse(self.profile_mgr.profile_reporting_enabled())
@patch('subscription_manager.cache.get_supported_resources')
def test_update_check_error_uploading(self, mock_get_supported_resources):
mock_get_supported_resources.return_value = ['packages']
uuid = 'FAKEUUID'
uep = Mock()
uep.has_capability = Mock(return_value=False)
self.profile_mgr.has_changed = Mock(return_value=True)
self.profile_mgr.write_cache = Mock()
# Throw an exception when trying to upload:
uep.updatePackageProfile = Mock(side_effect=Exception('BOOM!'))
self.assertRaises(Exception, self.profile_mgr.update_check, uep, uuid, True)
uep.updatePackageProfile.assert_called_with(uuid,
FACT_MATCHER)
self.assertEqual(0, self.profile_mgr.write_cache.call_count)
@patch('subscription_manager.cache.get_supported_resources')
def test_combined_profile_update_check_error_uploading(self, mock_get_supported_resources):
mock_get_supported_resources.return_value = ['packages']
uuid = 'FAKEUUID'
uep = Mock()
uep.has_capability = Mock(return_value=True)
self.profile_mgr.has_changed = Mock(return_value=True)
self.profile_mgr.write_cache = Mock()
# Throw an exception when trying to upload:
uep.updateCombinedProfile = Mock(side_effect=Exception('BOOM!'))
self.assertRaises(Exception, self.profile_mgr.update_check, uep, uuid, True)
uep.updateCombinedProfile.assert_called_with(uuid,
FACT_MATCHER)
self.assertEqual(0, self.profile_mgr.write_cache.call_count)
def test_has_changed_no_cache(self):
self.profile_mgr._cache_exists = Mock(return_value=False)
self.assertTrue(self.profile_mgr.has_changed())
def test_has_changed_no_changes(self):
cached_pkgs = [
Package(name="package1", version="1.0.0", release=1, arch="x86_64"),
Package(name="package2", version="2.0.0", release=2, arch="x86_64")
]
temp_repo_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, temp_repo_dir)
repo_file_name = os.path.join(temp_repo_dir, 'awesome.repo')
with open(repo_file_name, 'w') as repo_file:
repo_file.write(CONTENT_REPO_FILE)
cached_profile = self._mock_pkg_profile(cached_pkgs, repo_file_name, ENABLED_MODULES)
self.profile_mgr._cache_exists = Mock(return_value=True)
self.profile_mgr._read_cache = Mock(return_value=cached_profile)
self.assertFalse(self.profile_mgr.has_changed())
self.profile_mgr._read_cache.assert_called_with()
def test_has_changed(self):
cached_pkgs = [
Package(name="package1", version="1.0.0", release=1, arch="x86_64"),
Package(name="package3", version="3.0.0", release=3, arch="x86_64")
]
cached_profile = self._mock_pkg_profile(cached_pkgs, "/non/existing/path/to/repo/file", [])
self.profile_mgr._cache_exists = Mock(return_value=True)
self.profile_mgr._read_cache = Mock(return_value=cached_profile)
self.assertTrue(self.profile_mgr.has_changed())
self.profile_mgr._read_cache.assert_called_with()
@patch('subscription_manager.cache.get_supported_resources')
def test_update_check_consumer_uuid_none(self, mock_get_supported_resources):
mock_get_supported_resources.return_value = ['packages']
uuid = None
uep = Mock()
self.profile_mgr.has_changed = Mock(return_value=True)
self.profile_mgr.write_cache = Mock()
res = self.profile_mgr.update_check(uep, uuid)
self.assertEqual(0, res)
def test_package_json_handles_non_unicode(self):
package = Package(name=b'\xf6', version=b'\xf6', release=b'\xf6', arch=b'\xf6', vendor=b'\xf6')
data = package.to_dict()
json_str = json.dumps(data) # to json
data = json.loads(json_str) # and back to an object
for attr in ['name', 'version', 'release', 'arch', 'vendor']:
self.assertEqual(u'\ufffd', data[attr])
def test_package_json_as_unicode_type(self):
# note that the data type at time of writing is bytes, so this is just defensive coding
package = Package(name=u'Björk', version=u'Björk', release=u'Björk', arch=u'Björk', vendor=u'Björk')
data = package.to_dict()
json_str = json.dumps(data) # to json
data = json.loads(json_str) # and back to an object
for attr in ['name', 'version', 'release', 'arch', 'vendor']:
self.assertEqual(u'Björk', data[attr])
def test_package_json_missing_attributes(self):
package = Package(name=None, version=None, release=None, arch=None, vendor=None)
data = package.to_dict()
json_str = json.dumps(data) # to json
data = json.loads(json_str) # and back to an object
for attr in ['name', 'version', 'release', 'arch', 'vendor']:
self.assertEqual(None, data[attr])
def test_module_md_uniquify(self):
modules_input = [
{
"name": "duck",
"stream": 0,
"version": "20180730233102",
"context": "deadbeef",
"arch": "noarch",
"profiles": ["default"],
"installed_profiles": [],
"status": "enabled"
},
{
"name": "duck",
"stream": 0,
"version": "20180707144203",
"context": "c0ffee42",
"arch": "noarch",
"profiles": ["default", "server"],
"installed_profiles": ["server"],
"status": "unknown"
}
]
self.assertEqual(modules_input, ModulesProfile._uniquify(modules_input))
# now test dup modules
self.assertEqual(modules_input, ModulesProfile._uniquify(modules_input + [modules_input[0]]))
@staticmethod
def _mock_pkg_profile(packages, repo_file, enabled_modules):
"""
Turn a list of package objects into an RPMProfile object.
"""
dict_list = []
for pkg in packages:
dict_list.append(pkg.to_dict())
mock_file = Mock()
mock_file.read = Mock(return_value=json.dumps(dict_list))
mock_rpm_profile = RPMProfile(from_file=mock_file)
mock_enabled_repos_profile = EnabledReposProfile(repo_file=repo_file)
mock_module_profile = ModulesProfile()
mock_module_profile.collect = Mock(return_value=enabled_modules)
mock_profile = {
"rpm": mock_rpm_profile,
"enabled_repos": mock_enabled_repos_profile,
"modulemd": mock_module_profile
}
return mock_profile
class TestInstalledProductsCache(SubManFixture):
def setUp(self):
super(TestInstalledProductsCache, self).setUp()
self.prod_dir = StubCertificateDirectory([
StubProductCertificate(StubProduct('a-product', name="Product A", provided_tags="product,product-a")),
StubProductCertificate(StubProduct('b-product', name="Product B", provided_tags="product,product-b")),
StubProductCertificate(StubProduct('c-product', name="Product C", provided_tags="product-c")),
])
inj.provide(inj.PROD_DIR, self.prod_dir)
self.mgr = InstalledProductsManager()
def test_cert_parsing(self):
self.assertEqual(3, len(list(self.mgr.installed.keys())))
self.assertTrue('a-product' in self.mgr.installed)
self.assertTrue('b-product' in self.mgr.installed)
self.assertTrue('c-product' in self.mgr.installed)
self.assertEqual("Product A", self.mgr.installed['a-product']['productName'])
self.assertEqual(set(["product", "product-a", "product-b", "product-c"]), set(self.mgr.tags))
def test_load_data(self):
cached = {
'products': {
'prod1': 'Product 1',
'prod2': 'Product 2'
},
'tags': ['p1', 'p2']
}
mock_file = Mock()
mock_file.read = Mock(return_value=json.dumps(cached))
data = self.mgr._load_data(mock_file)
self.assertEqual(data, cached)
def test_has_changed(self):
cached = {
'products': {
'prod1': 'Product 1',
'prod2': 'Product 2'
},
'tags': ['p1', 'p2']
}
self.mgr._read_cache = Mock(return_value=cached)
self.mgr._cache_exists = Mock(return_value=True)
self.assertTrue(self.mgr.has_changed())
def test_has_changed_with_tags_only(self):
cached = {
'products': {
'a-product': {'productName': 'Product A', 'productId': 'a-product', 'version': '1.0', 'arch': 'x86_64'},
'b-product': {'productName': 'Product B', 'productId': 'b-product', 'version': '1.0', 'arch': 'x86_64'},
'c-product': {'productName': 'Product C', 'productId': 'c-product', 'version': '1.0', 'arch': 'x86_64'}
},
'tags': ['different']
}
self.mgr._read_cache = Mock(return_value=cached)
self.mgr._cache_exists = Mock(return_value=True)
self.assertTrue(self.mgr.has_changed())
def test_old_format_seen_as_invalid(self):
cached = {
'a-product': {'productName': 'Product A', 'productId': 'a-product', 'version': '1.0', 'arch': 'x86_64'},
'b-product': {'productName': 'Product B', 'productId': 'b-product', 'version': '1.0', 'arch': 'x86_64'},
'c-product': {'productName': 'Product C', 'productId': 'c-product', 'version': '1.0', 'arch': 'x86_64'}
}
self.mgr._read_cache = Mock(return_value=cached)
self.mgr._cache_exists = Mock(return_value=True)
self.assertTrue(self.mgr.has_changed())
def test_has_not_changed(self):
cached = {
'products': {
'a-product': {'productName': 'Product A', 'productId': 'a-product', 'version': '1.0', 'arch': 'x86_64'},
'b-product': {'productName': 'Product B', 'productId': 'b-product', 'version': '1.0', 'arch': 'x86_64'},
'c-product': {'productName': 'Product C', 'productId': 'c-product', 'version': '1.0', 'arch': 'x86_64'}
},
'tags': ['product-a', 'product-b', 'product-c', 'product']
}
self.mgr._read_cache = Mock(return_value=cached)
self.mgr._cache_exists = Mock(return_value=True)
self.assertFalse(self.mgr.has_changed())
def test_update_check_no_change(self):
uuid = 'FAKEUUID'
uep = Mock()
uep.updateConsumer = Mock()
self.mgr.has_changed = Mock(return_value=False)
self.mgr.write_cache = Mock()
self.mgr.update_check(uep, uuid)
self.assertEqual(0, uep.updateConsumer.call_count)
self.assertEqual(0, self.mgr.write_cache.call_count)
def test_update_check_has_changed(self):
uuid = 'FAKEUUID'
uep = Mock()
uep.updateConsumer = Mock()
self.mgr.has_changed = Mock(return_value=True)
self.mgr.write_cache = Mock()
self.mgr.update_check(uep, uuid, True)
expected = ["product", "product-a", "product-b", "product-c"]
uep.updateConsumer.assert_called_with(uuid,
content_tags=set(expected),
installed_products=self.mgr.format_for_server())
self.assertEqual(1, self.mgr.write_cache.call_count)
def test_update_check_error_uploading(self):
uuid = 'FAKEUUID'
uep = Mock()
self.mgr.has_changed = Mock(return_value=True)
self.mgr.write_cache = Mock()
# Throw an exception when trying to upload:
uep.updateConsumer = Mock(side_effect=Exception('BOOM!'))
self.assertRaises(Exception, self.mgr.update_check, uep, uuid, True)
expected = ["product", "product-a", "product-b", "product-c"]
uep.updateConsumer.assert_called_with(uuid,
content_tags=set(expected),
installed_products=self.mgr.format_for_server())
self.assertEqual(0, self.mgr.write_cache.call_count)
class TestReleaseStatusCache(SubManFixture):
def setUp(self):
super(TestReleaseStatusCache, self).setUp()
self.release_cache = ReleaseStatusCache()
self.release_cache.write_cache = Mock()
def test_load_from_server(self):
uep = Mock()
dummy_release = {'releaseVer': 'MockServer'}
uep.getRelease = Mock(return_value=dummy_release)
self.release_cache.read_status(uep, "THISISAUUID")
self.assertEqual(dummy_release, self.release_cache.server_status)
def test_server_no_release_call(self):
uep = Mock()
uep.getRelease = Mock(side_effect=RestlibException("boom"))
status = self.release_cache.read_status(uep, "SOMEUUID")
self.assertEqual(None, status)
def test_server_network_error_no_cache(self):
uep = Mock()
uep.getRelease = Mock(side_effect=socket.error("boom"))
self.release_cache._cache_exists = Mock(return_value=False)
self.assertEqual(None, self.release_cache.read_status(uep, "SOMEUUID"))
def test_server_network_error_with_cache(self):
uep = Mock()
uep.getRelease = Mock(side_effect=socket.error("boom"))
dummy_release = {'releaseVer': 'MockServer'}
self.release_cache._read_cache = Mock(return_value=dummy_release)
self.release_cache._cache_exists = Mock(return_value=True)
self.assertEqual(dummy_release, self.release_cache.read_status(uep, "SOMEUUID"))
def test_rate_limit_exceed_with_cache(self):
uep = Mock()
uep.getRelease = Mock(side_effect=RateLimitExceededException(429))
dummy_release = {'releaseVer': 'MockServer'}
self.release_cache._read_cache = Mock(return_value=dummy_release)
self.release_cache._cache_exists = Mock(return_value=True)
self.assertEqual(dummy_release, self.release_cache.read_status(uep, "SOMEUUID"))
def test_server_network_works_with_cache(self):
uep = Mock()
dummy_release = {'releaseVer': 'MockServer'}
uep.getRelease = Mock(return_value=dummy_release)
self.release_cache._cache_exists = Mock(return_value=True)
self.release_cache._read_cache = Mock(return_value=dummy_release)
self.assertEqual(dummy_release, self.release_cache.read_status(uep, "SOMEUUID"))
self.assertEqual(1, self.release_cache.write_cache.call_count)
self.assertEqual(0, self.release_cache._read_cache.call_count)
self.assertEqual(dummy_release, self.release_cache.read_status(uep, "SOMEUUID"))
self.assertEqual(1, uep.getRelease.call_count)
def test_server_network_works_cache_caches(self):
uep = Mock()
dummy_release = {'releaseVer': 'MockServer'}
uep.getRelease = Mock(return_value=dummy_release)
self.release_cache._cache_exists = Mock(return_value=False)
self.release_cache.server_status = None
self.release_cache._read_cache = Mock(return_value=dummy_release)
self.assertEqual(dummy_release, self.release_cache.read_status(uep, "SOMEUUID"))
self.assertEqual(1, self.release_cache.write_cache.call_count)
self.assertEqual(0, self.release_cache._read_cache.call_count)
self.release_cache._cache_exists = Mock(return_value=True)
self.assertEqual(dummy_release, self.release_cache.read_status(uep, "SOMEUUID"))
self.assertEqual(1, self.release_cache.write_cache.call_count)
self.assertEqual(1, uep.getRelease.call_count)
class TestEntitlementStatusCache(SubManFixture):
def setUp(self):
super(TestEntitlementStatusCache, self).setUp()
self.status_cache = EntitlementStatusCache()
self.status_cache.write_cache = Mock()
def test_load_from_server(self):
uep = Mock()
dummy_status = {"a": "1"}
uep.getCompliance = Mock(return_value=dummy_status)
self.status_cache.load_status(uep, "SOMEUUID")
self.assertEqual(dummy_status, self.status_cache.server_status)
self.assertEqual(1, self.status_cache.write_cache.call_count)
def test_load_from_server_on_date_args(self):
uep = Mock()
dummy_status = {"a": "1"}
uep.getCompliance = Mock(return_value=dummy_status)
self.status_cache.load_status(uep, "SOMEUUID", "2199-12-25")
self.assertEqual(dummy_status, self.status_cache.server_status)
self.assertEqual(1, self.status_cache.write_cache.call_count)
def test_load_from_server_on_date_kwargs(self):
uep = Mock()
dummy_status = {"a": "1"}
uep.getCompliance = Mock(return_value=dummy_status)
self.status_cache.load_status(uep, "SOMEUUID", on_date="2199-12-25")
self.assertEqual(dummy_status, self.status_cache.server_status)
self.assertEqual(1, self.status_cache.write_cache.call_count)
def test_server_no_compliance_call(self):
uep = Mock()
uep.getCompliance = Mock(side_effect=RestlibException("boom"))
status = self.status_cache.load_status(uep, "SOMEUUID")
self.assertEqual(None, status)
def test_server_network_error(self):
dummy_status = {"a": "1"}
uep = Mock()
uep.getCompliance = Mock(side_effect=socket.error("boom"))
self.status_cache._cache_exists = Mock(return_value=True)
self.status_cache._read_cache = Mock(return_value=dummy_status)
status = self.status_cache.load_status(uep, "SOMEUUID")
self.assertEqual(dummy_status, status)
self.assertEqual(1, self.status_cache._read_cache.call_count)
# Extremely unlikely but just in case:
def test_server_network_error_no_cache(self):
uep = Mock()
uep.getCompliance = Mock(side_effect=socket.error("boom"))
self.status_cache._cache_exists = Mock(return_value=False)
self.assertEqual(None, self.status_cache.load_status(uep, "SOMEUUID"))
def test_write_cache(self):
mock_server_status = {'fake server status': random.uniform(1, 2 ** 32)}
status_cache = EntitlementStatusCache()
status_cache.server_status = mock_server_status
cache_dir = tempfile.mkdtemp()
cache_file = os.path.join(cache_dir, 'status_cache.json')
status_cache.CACHE_FILE = cache_file
status_cache.write_cache()
# try to load the file 5 times, if
# we still can't read it, fail
# we don't know when the write_cache thread ends or
# when it starts. Need to track the cache threads
# but we do not...
tries = 0
while tries <= 5:
try:
new_status_buf = open(cache_file).read()
new_status = json.loads(new_status_buf)
break
except Exception as e:
log.exception(e)
tries += 1
time.sleep(.1)
continue
shutil.rmtree(cache_dir)
self.assertEqual(new_status, mock_server_status)
def test_unauthorized_exception_handled(self):
uep = Mock()
uep.getCompliance = Mock(side_effect=UnauthorizedException(401, "GET"))
self.assertEqual(None, self.status_cache.load_status(uep, "aaa"))
class TestPoolStatusCache(SubManFixture):
"""
Class for testing PoolStatusCache
"""
def setUp(self):
super(TestPoolStatusCache, self).setUp()
self.pool_status_cache = PoolStatusCache()
self.pool_status_cache.write_cache = Mock()
def test_load_data(self):
cached = {
'pools': {
'pool1': 'Pool 1',
'pool2': 'Pool 2'
},
'tags': ['p1', 'p2']
}
mock_file = Mock()
mock_file.read = Mock(return_value=json.dumps(cached))
data = self.pool_status_cache._load_data(mock_file)
self.assertEqual(data, cached)
def test_load_from_server(self):
uep = Mock()
dummy_pools = {
'pools': {
'pool1': 'Pool 1',
'pool2': 'Pool 2'
},
'tags': ['p1', 'p2']
}
uep.getEntitlementList = Mock(return_value=dummy_pools)
self.pool_status_cache.read_status(uep, "THISISAUUID")
self.assertEqual(dummy_pools, self.pool_status_cache.server_status)
class TestPoolTypeCache(SubManFixture):
"""
Class for testing PoolTypeCache
"""
def setUp(self):
super(TestPoolTypeCache, self).setUp()
self.cp_provider = inj.require(inj.CP_PROVIDER)
self.cp_provider.consumer_auth_cp = Mock()
self.cp = self.cp_provider.consumer_auth_cp
certs = [StubEntitlementCertificate(StubProduct('pid1'), pool=StubPool('someid'))]
self.ent_dir = StubEntitlementDirectory(certificates=certs)
self.pool_cache = inj.require(inj.POOL_STATUS_CACHE)
self.pool_cache.write_cache = Mock()
def test_empty_cache(self):
pooltype_cache = PoolTypeCache()
result = pooltype_cache.get("some id")
self.assertEqual('', result)
def test_get_pooltype(self):
self.cp.getEntitlementList.return_value = [self._build_ent_json('poolid', 'some type')]
pooltype_cache = PoolTypeCache()
pooltype_cache._do_update()
result = pooltype_cache.get("poolid")
self.assertEqual('some type', result)
def test_requires_update(self):
pooltype_cache = PoolTypeCache()
pooltype_cache.ent_dir = self.ent_dir
# Doesn't have data for pool with id 'someid'
self.assertTrue(pooltype_cache.requires_update())
pooltype_cache.pooltype_map['someid'] = 'some type'
# After adding data for that entitlements pool, it shouldn't need an update
self.assertFalse(pooltype_cache.requires_update())
def test_update(self):
pooltype_cache = PoolTypeCache()
pooltype_cache.ent_dir = self.ent_dir
self.cp.getEntitlementList.return_value = [
self._build_ent_json('poolid', 'some type'),
self._build_ent_json('poolid2', 'some other type')]
# requires_update should be true, and should allow this method
# to generate a correct mapping
pooltype_cache.update()
self.assertEqual(2, len(pooltype_cache.pooltype_map))
self.assertEqual('some type', pooltype_cache.get('poolid'))
self.assertEqual('some other type', pooltype_cache.get('poolid2'))
# This is populated when available subs are refreshed
def test_update_from_pools(self):
# Input is a map of pool ids to pool json
pools_map = {}
for i in range(5):
pool_id = 'poolid' + str(i)
pools_map[pool_id] = self._build_pool_json(pool_id, 'some type')
pooltype_cache = PoolTypeCache()
pooltype_cache.update_from_pools(pools_map)
self.assertEqual(5, len(pooltype_cache.pooltype_map))
for i in range(5):
expected_id = 'poolid' + str(i)
self.assertEqual('some type', pooltype_cache.get(expected_id))
def test_requires_update_ents_with_no_pool(self):
pooltype_cache = PoolTypeCache()
pooltype_cache.ent_dir = self.ent_dir
for ent in self.ent_dir.certs:
ent.pool = None
# No ents have pools so there is nothing we can update
self.assertFalse(pooltype_cache.requires_update())
def test_reading_pool_type_from_json_cache(self):
pool_status = [self._build_ent_json('poolid', 'some type')]
self.pool_cache.load_status = Mock()
self.pool_cache.server_status = pool_status
pooltype_cache = PoolTypeCache()
pooltype_cache._do_update()
result = pooltype_cache.get("poolid")
self.assertEqual('some type', result)
def _build_ent_json(self, pool_id, pool_type):
result = {}
result['id'] = "1234"
result['pool'] = self._build_pool_json(pool_id, pool_type)
return result
def _build_pool_json(self, pool_id, pool_type):
return {'id': pool_id, 'calculatedAttributes': {'compliance_type': pool_type}}
class TestContentAccessCache(SubManFixture):
MOCK_CONTENT = {
"lastUpdate": "2016-12-01T21:56:35+0000",
"contentListing": {"42": ["cert-part1", "cert-part2"]}
}
MOCK_CONTENT_EMPTY_CONTENT_LISTING = {
"lastUpdate": "2016-12-01T21:56:35+0000",
"contentListing": None
}
MOCK_CERT = """
before
-----BEGIN ENTITLEMENT DATA-----
entitlement data goes here
-----END ENTITLEMENT DATA-----
after
"""
MOCK_OPEN_EMPTY = mock_open()
MOCK_OPEN_CACHE = mock_open(read_data=json.dumps(MOCK_CONTENT))
def setUp(self):
super(TestContentAccessCache, self).setUp()
self.cache = ContentAccessCache()
self.cache.cp_provider = Mock()
self.mock_uep = Mock()
self.mock_uep.getAccessibleContent = Mock(return_value=self.MOCK_CONTENT)
self.cache.cp_provider.get_consumer_auth_cp = Mock(return_value=self.mock_uep)
self.cache.identity = Mock()
self.cert = Mock()
@patch('subscription_manager.cache.open', MOCK_OPEN_EMPTY)
def test_empty_cache(self):
self.assertFalse(self.cache.exists())
@patch('subscription_manager.cache.open', MOCK_OPEN_EMPTY)
def test_writes_to_cache_after_read(self):
self.cache.check_for_update()
self.MOCK_OPEN_EMPTY.assert_any_call(ContentAccessCache.CACHE_FILE, 'w')
self.MOCK_OPEN_EMPTY().write.assert_any_call(json.dumps(self.MOCK_CONTENT))
@patch('subscription_manager.cache.open', MOCK_OPEN_EMPTY)
def test_cert_updated_after_read(self):
self.cert.serial = 42
update_data = self.cache.check_for_update()
self.cache.update_cert(self.cert, update_data)
self.MOCK_OPEN_EMPTY.assert_any_call(self.cert.path, 'w')
self.MOCK_OPEN_EMPTY().write.assert_any_call(''.join(self.MOCK_CONTENT['contentListing']['42']))
@patch('subscription_manager.cache.open', MOCK_OPEN_CACHE)
def test_check_for_update_provides_date(self):
mock_exists = Mock(return_value=True)
with patch('os.path.exists', mock_exists):
self.cache.check_for_update()
date = isodate.parse_date("2016-12-01T21:56:35+0000")
self.mock_uep.getAccessibleContent.assert_called_once_with(self.cache.identity.uuid, if_modified_since=date)
@patch('os.path.exists', Mock(return_value=True))
def test_cache_remove_deletes_file(self):
mock_remove = Mock()
with patch('os.remove', mock_remove):
self.cache.remove()
mock_remove.assert_called_once_with(ContentAccessCache.CACHE_FILE)
@patch('subscription_manager.cache.open', MOCK_OPEN_EMPTY)
def test_cache_handles_empty_content_listing(self):
self.mock_uep.getAccessibleContent = Mock(return_value=self.MOCK_CONTENT_EMPTY_CONTENT_LISTING)
self.cache.check_for_update()
# getting this far means we did not raise an exception :-)
@patch('subscription_manager.cache.open', MOCK_OPEN_EMPTY)
def test_cache_fails_server_issues_gracefully(self):
self.mock_uep.getAccessibleContent = Mock(side_effect=RestlibException(404))
self.cache.check_for_update()
# getting this far means we did not raise an exception :-)
class TestContentAccessModeCache(SubManFixture):
MOCK_CACHE_FILE_CONTENT = '{"7f85da06-5c35-44ba-931d-f11f6e581f89": "entitlement"}'
def setUp(self):
super(TestContentAccessModeCache, self).setUp()
self.cache = ContentAccessModeCache()
def test_reading_nonexisting_cache(self):
data = self.cache.read_cache_only()
self.assertIsNone(data)
def test_reading_existing_cache(self):
temp_cache_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, temp_cache_dir)
self.cache.CACHE_FILE = os.path.join(temp_cache_dir, 'content_access_mode.json')
with open(self.cache.CACHE_FILE, 'w') as cache_file:
cache_file.write(self.MOCK_CACHE_FILE_CONTENT)
data = self.cache.read_cache_only()
self.assertTrue("7f85da06-5c35-44ba-931d-f11f6e581f89" in data)
self.assertEqual(data["7f85da06-5c35-44ba-931d-f11f6e581f89"], "entitlement")
class TestSupportedResourcesCache(SubManFixture):
MOCK_CACHE_FILE_CONTENT = '{"a3f43883-315b-4cc4-bfb5-5771946d56d7": {"": "/", "cdn": "/cdn"}}'
MOCK_SUPPORTED_RESOURCES_RESPONSE = {"pools": "/pools", "roles": "/roles", "users": "/users"}
def setUp(self):
super(TestSupportedResourcesCache, self).setUp()
self.cache = SupportedResourcesCache()
def test_reading_nonexisting_cache(self):
data = self.cache.read_cache_only()
self.assertIsNone(data)
def test_reading_existing_cache(self):
temp_cache_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, temp_cache_dir)
self.cache.CACHE_FILE = os.path.join(temp_cache_dir, 'supported_resources.json')
with open(self.cache.CACHE_FILE, 'w') as cache_file:
cache_file.write(self.MOCK_CACHE_FILE_CONTENT)
data = self.cache.read_cache_only()
self.assertTrue("a3f43883-315b-4cc4-bfb5-5771946d56d7" in data)
self.assertEqual(data["a3f43883-315b-4cc4-bfb5-5771946d56d7"], {"": "/", "cdn": "/cdn"})
def test_cache_is_obsoleted_by_new_identity(self):
temp_cache_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, temp_cache_dir)
self.cache.CACHE_FILE = os.path.join(temp_cache_dir, 'supported_resources.json')
with open(self.cache.CACHE_FILE, 'w') as cache_file:
cache_file.write(self.MOCK_CACHE_FILE_CONTENT)
mock_uep = Mock()
mock_uep.get_supported_resources = Mock(return_value=self.MOCK_SUPPORTED_RESOURCES_RESPONSE)
mock_identity = Mock()
mock_identity.uuid = 'f0000000-aaaa-bbbb-bbbb-5771946d56d8'
data = self.cache.read_data(uep=mock_uep, identity=mock_identity)
self.assertEqual(data, self.MOCK_SUPPORTED_RESOURCES_RESPONSE)
def test_cache_is_obsoleted_by_timeout(self):
old_timeout = self.cache.TIMEOUT
self.cache.TIMEOUT = 1
temp_cache_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, temp_cache_dir)
self.cache.CACHE_FILE = os.path.join(temp_cache_dir, 'supported_resources.json')
with open(self.cache.CACHE_FILE, 'w') as cache_file:
cache_file.write(self.MOCK_CACHE_FILE_CONTENT)
mock_uep = Mock()
mock_uep.get_supported_resources = Mock(return_value=self.MOCK_SUPPORTED_RESOURCES_RESPONSE)
mock_identity = Mock()
mock_identity.uuid = 'a3f43883-315b-4cc4-bfb5-5771946d56d7'
time.sleep(2)
data = self.cache.read_data(uep=mock_uep, identity=mock_identity)
self.assertEqual(data, self.MOCK_SUPPORTED_RESOURCES_RESPONSE)
self.cache.TIMEOUT = old_timeout
class TestAvailableEntitlementsCache(SubManFixture):
MOCK_CACHE_FILE_CONTENT = '''{
"b1002709-6d67-443e-808b-a7afcbe5b47e": {
"filter_options": {
"after_date": null,
"future": null,
"match_installed": null,
"matches": "*fakeos*",
"no_overlap": null,
"on_date": null,
"service_level": null,
"show_all": null
},
"pools": [
{
"addons": null,
"attributes": [],
"contractNumber": "0",
"endDate": "01/16/2021",
"id": "ff8080816fb38f78016fb392d26f0267",
"management_enabled": false,
"pool_type": "Standard",
"productId": "fakeos-bits",
"productName": "Fake OS Bits",
"providedProducts": {
"38072": "Fake OS Bits"
},
"quantity": "5",
"roles": null,
"service_level": null,
"service_type": null,
"startDate": "01/17/2020",
"suggested": 1,
"usage": null
}
],
"timeout": 1579613054.079684
}
}
'''
def setUp(self):
super(TestAvailableEntitlementsCache, self).setUp()
self.cache = AvailableEntitlementsCache()
def test_reading_nonexisting_cache(self):
"""
Test reading cache, when there is no cache file yet
"""
data = self.cache.read_cache_only()
self.assertIsNone(data)
def test_reading_existing_cache(self):
"""
Test reading cache from file
"""
temp_cache_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, temp_cache_dir)
self.cache.CACHE_FILE = os.path.join(temp_cache_dir, 'available_entitlements.json')
with open(self.cache.CACHE_FILE, 'w') as cache_file:
cache_file.write(self.MOCK_CACHE_FILE_CONTENT)
data = self.cache.read_cache_only()
self.assertTrue("b1002709-6d67-443e-808b-a7afcbe5b47e" in data)
self.assertEqual(data["b1002709-6d67-443e-808b-a7afcbe5b47e"]["timeout"], 1579613054.079684)
self.assertEqual(data["b1002709-6d67-443e-808b-a7afcbe5b47e"]["filter_options"]["matches"], "*fakeos*")
self.assertEqual(len(data["b1002709-6d67-443e-808b-a7afcbe5b47e"]["pools"]), 1)
def test_timeout(self):
"""
Test computing timeout of cache based on smoothed response time (SRT)
"""
uep = inj.require(inj.CP_PROVIDER).get_consumer_auth_cp()
uep.conn.smoothed_rt = 3.0
timeout = self.cache.timeout()
self.assertTrue(timeout >= self.cache.LBOUND)
self.assertTrue(timeout <= self.cache.UBOUND)
def test_timeout_no_srt(self):
"""
Test computing timeout, when there is no SRT yet
"""
uep = inj.require(inj.CP_PROVIDER).get_consumer_auth_cp()
uep.conn.smoothed_rt = None
timeout = self.cache.timeout()
self.assertEqual(timeout, self.cache.LBOUND)
def test_min_timeout(self):
"""
Test computing timout, when SRT is smaller than lower bound
"""
uep = inj.require(inj.CP_PROVIDER).get_consumer_auth_cp()
uep.conn.smoothed_rt = 0.01
timeout = self.cache.timeout()
self.assertEqual(timeout, self.cache.LBOUND)
def test_max_timeout(self):
"""
Test computing timout, when SRT is bigger than upper bound
"""
uep = inj.require(inj.CP_PROVIDER).get_consumer_auth_cp()
uep.conn.smoothed_rt = 20.0
timeout = self.cache.timeout()
self.assertEqual(timeout, self.cache.UBOUND)
|
gpl-2.0
| -9,068,067,173,417,491,000
| 39.500448
| 124
| 0.626627
| false
| 3.519175
| true
| false
| false
|
Stratoscale/upseto
|
upseto/gitwrapper.py
|
1
|
3364
|
import urlparse
import os
import re
from upseto import run
from upseto import gitconfigparser
def originURLBasename(originURL):
originURLBasename = urlparse.urlparse(originURL).path.split("/")[-1]
if originURLBasename.endswith(".git"):
originURLBasename = originURLBasename[: - len(".git")] # pragma: no cover
return originURLBasename
def normalizeOriginURL(originURL):
originURL = re.sub(r'^git@(\S+?):(.*)$', r'https://\1/\2', originURL)
if originURL.endswith(".git"):
originURL = originURL[: - len(".git")] # pragma: no cover
return originURL
class GitWrapper:
def __init__(self, directory):
self._cachedOriginURL = None
self._directory = directory
if not os.path.isdir(os.path.join(directory, ".git")):
raise Exception(
"Directory '%s' does not look like a git repository (no .git subdirectory)" %
directory)
if self.originURLBasename() != os.path.basename(os.path.abspath(directory)):
raise Exception(
"Directory '%s' must be named exactly like the "
"origin URL '%s' (with no '.git' extension)" % (
directory, self.originURL()))
@classmethod
def existing(cls, originURL, baseDirectory):
basename = originURLBasename(originURL)
directory = os.path.join(baseDirectory, basename)
if not os.path.isdir(directory):
raise Exception("Directory '%s' does not exist" % directory)
existing = cls(directory)
if normalizeOriginURL(existing.originURL()) != normalizeOriginURL(originURL):
raise Exception(
"Existing directory '%s' origin URL is '%s' which is not the expected '%s' "
"(normalized '%s' and '%s')" % (
directory, existing.originURL(), originURL,
normalizeOriginURL(existing.originURL()),
normalizeOriginURL(originURL)))
return existing
@classmethod
def clone(cls, originURL, baseDirectory):
basename = originURLBasename(originURL)
run.run(["git", "clone", originURL, basename], cwd=baseDirectory)
clone = cls(os.path.join(baseDirectory, basename))
clone.checkout('master')
return clone
def directory(self):
return self._directory
def hash(self, branch='HEAD'):
return self._run(["git", "rev-parse", branch]).strip()
def originURL(self):
if self._cachedOriginURL is None:
url = gitconfigparser.GitConfigParser(self._directory).originURL()
parts = list(urlparse.urlparse(url))
netloc = parts[1]
if '@' in netloc:
netloc = netloc.split('@')[1]
parts[1] = netloc
self._cachedOriginURL = urlparse.urlunparse(parts)
return self._cachedOriginURL
def originURLBasename(self):
return originURLBasename(self.originURL())
def fetch(self):
self._run(["git", "fetch", "--prune"])
def checkout(self, branch):
self._run(["git", "checkout", branch])
def shortStatus(self):
return self._run(["git", "status", "-s"])
def run(self, args):
return self._run(["git"] + args)
def _run(self, command):
return run.run(command=command, cwd=self._directory)
|
apache-2.0
| -6,241,408,017,556,831,000
| 35.172043
| 93
| 0.603448
| false
| 4.194514
| false
| false
| false
|
JohnLZeller/dd-agent
|
tests/test_win32.py
|
1
|
1616
|
# stdlib
import unittest
import logging
import gc
import sys
# 3p
#from nose.plugins.attrib import attr
# project
import checks.system.win32 as w32
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__file__)
AGENT_CONFIG = {} # None of the windows checks use this.
class TestWin32(unittest.TestCase):
def _checkMemoryLeak(self, func):
# FIXME: This should use @attr('windows')instead of checking for the
# platform, but just importing nose.plugins.attrib causes all the tests
# to fail with uncollected garbage.
if sys.platform != 'win32':
return
gc.set_debug(gc.DEBUG_LEAK)
try:
start = len(gc.garbage)
func()
end = len(gc.garbage)
self.assertEquals(end - start, 0, gc.garbage)
finally:
gc.set_debug(0)
def testDisk(self):
dsk = w32.Disk(log)
self._checkMemoryLeak(lambda: dsk.check(AGENT_CONFIG))
def testIO(self):
io = w32.IO(log)
self._checkMemoryLeak(lambda: io.check(AGENT_CONFIG))
def testProcesses(self):
proc = w32.Processes(log)
self._checkMemoryLeak(lambda: proc.check(AGENT_CONFIG))
def testMemory(self):
mem = w32.Memory(log)
self._checkMemoryLeak(lambda: mem.check(AGENT_CONFIG))
def testNetwork(self):
net = w32.Network(log)
self._checkMemoryLeak(lambda: net.check(AGENT_CONFIG))
def testCPU(self):
cpu = w32.Cpu(log)
self._checkMemoryLeak(lambda: cpu.check(AGENT_CONFIG))
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
| -6,161,906,718,949,827,000
| 25.491803
| 79
| 0.627475
| false
| 3.583149
| true
| false
| false
|
hgwood/codingame
|
shadows_of_the_knight/part_two.py
|
1
|
1627
|
import sys
import random
import math
w, h = map(int, input().split())
jumps = int(input())
x, y = map(int, input().split())
px, py = x, y
search_zone = [(x, y) for x in range(w) for y in range(h)]
def distance(ax, ay, bx, by):
return math.sqrt((bx - ax)**2 + (by - ay)**2)
def around(zone, x, y):
return [(x, y) for (x, y) in (
(x, y - 1),
(x + 1, y - 1),
(x + 1, y),
(x + 1, y + 1),
(x, y + 1),
(x - 1, y + 1),
(x - 1, y),
(x - 1, y - 1)) if (x, y) in zone]
def centroid(zone):
sumx, sumy = (0, 0)
for x, y in zone:
sumx += x
sumy += y
print(sumx / len(zone), sumy / len(zone), file=sys.stderr)
result = round(sumx / len(zone)), round(sumy / len(zone))
if result not in zone: result = random.choice(around(zone, *result))
return result
while True:
temperature = input()
if temperature == "UNKNOWN": pass
elif temperature == "WARMER":
search_zone = [(szx, szy) for (szx, szy) in search_zone if distance(szx, szy, x, y) < distance(szx, szy, px, py)]
elif temperature == "COLDER":
search_zone = [(szx, szy) for (szx, szy) in search_zone if distance(szx, szy, x, y) > distance(szx, szy, px, py)]
elif temperature == "SAME":
search_zone = [(szx, szy) for (szx, szy) in search_zone if distance(szx, szy, x, y) == distance(szx, szy, px, py)]
px, py = x, y
x, y = centroid(search_zone)
search_zone = [(szx, szy) for (szx, szy) in search_zone if (szx, szy) != (x, y)]
print(w, h, jumps, x, y, temperature, len(search_zone), file=sys.stderr)
print(x, y)
|
gpl-3.0
| 9,150,318,430,111,574,000
| 32.895833
| 122
| 0.540873
| false
| 2.684818
| false
| false
| false
|
ntoll/yotta
|
yotta/init.py
|
1
|
4970
|
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
from __future__ import print_function
import os
import logging
import re
# Component, , represents an installed component, internal
from .lib import component
# version, , represent versions and specifications, internal
from .lib import version
# validate, , validate various things, internal
from .lib import validate
Known_Licenses = {
'isc': 'https://spdx.org/licenses/ISC',
'apache-2.0': 'https://spdx.org/licenses/Apache-2.0',
'mit': 'https://spdx.org/licenses/MIT',
'bsd-3-clause': 'https://spdx.org/licenses/BSD-3-Clause'
}
Git_Repo_RE = re.compile("^(git[+a-zA-Z-]*:.*|.*\.git|.*git@.*github\.com.*)$")
HG_Repo_RE = re.compile("^(hg[+a-zA-Z-]*:.*|.*\.hg)$")
SVN_Repo_RE = re.compile("^svn[+a-zA-Z-]*:.*$")
def getUserInput(question, default=None, type_class=str):
# python 2 + 3 compatibility
try:
global input
input = raw_input
except NameError:
pass
while True:
default_descr = ''
if default is not None:
default_descr = ' <%s> ' % str(default)
value = input(question + default_descr)
if default is not None and not value:
if type_class:
return type_class(default)
else:
return default
try:
typed_value = type_class(value)
break
except:
print('"%s" isn\'t a valid "%s" value' % (value, type_class.__name__))
return typed_value
def yesNo(string):
if string.strip().lower() in ('yes', 'y'):
return True
elif string.strip().lower() in ('no', 'n'):
return False
else:
raise ValueError()
yesNo.__name__ = "Yes/No"
def repoObject(string):
string = string.strip()
if not string:
return None
elif Git_Repo_RE.match(string):
repo_type = 'git'
url = Git_Repo_RE.match(string).group(0)
elif HG_Repo_RE.match(string):
repo_type = 'hg'
url = HG_Repo_RE.match(string).group(0)
elif SVN_Repo_RE.match(string):
repo_type = 'svn'
url = SVN_Repo_RE.match(string).group(0)
else:
raise ValueError()
return {'type':repo_type, 'url':url}
def listOfWords(string):
if isinstance(string, list):
return string
else:
return list(filter(bool, re.split(",|\\s", string)))
def addOptions(parser):
pass
def execCommand(args, following_args):
cwd = os.getcwd()
c = component.Component(cwd)
if c:
logging.info('The current directory already a contains a module: existing description will be modified')
elif os.path.isfile(c.getDescriptionFile()):
logging.error('A module description exists but could not be loaded:')
logging.error(c.error)
return 1
default_name = c.getName()
if not default_name:
default_name = validate.componentNameCoerced(os.path.split(cwd)[1])
c.setName(getUserInput("Enter the module name:", default_name))
c.setVersion(getUserInput("Enter the initial version:", str(c.getVersion() or "0.0.0"), version.Version))
def current(x):
return c.description[x] if x in c.description else None
c.description['description'] = getUserInput("Short description: ", current('description'))
c.description['keywords'] = getUserInput("Keywords: ", ' '.join(current('keywords') or []), listOfWords)
c.description['author'] = getUserInput("Author: ", current('author'))
current_repo_url = current('repository')
if isinstance(current_repo_url, dict):
current_repo_url = current_repo_url['url']
new_repo_url = getUserInput("Repository url: ", current_repo_url, repoObject)
if new_repo_url:
c.description['repository'] = new_repo_url
c.description['homepage'] = getUserInput("Homepage: ", current('homepage'))
if not current('licenses') or current('license'):
license = getUserInput('What is the license for this project (Apache-2.0, ISC, MIT etc.)? ', 'Apache-2.0')
license_url = None
if license.lower().strip() in Known_Licenses:
license_url = Known_Licenses[license.lower().strip()]
c.description['licenses'] = [{'type':license, 'url':license_url}]
else:
c.description['license'] = license
c.description['dependencies'] = current('dependencies') or {}
c.description['targetDependencies'] = current('targetDependencies') or {}
isexe = getUserInput("Is this module an executable?", "no", yesNo)
if isexe:
c.description['bin'] = './source'
# Create folders while initing
folders_to_create = ["./source", "./test", "./" + c.getName()]
for folder_name in folders_to_create:
if not os.path.exists(folder_name):
os.mkdir(folder_name)
c.writeDescription()
|
apache-2.0
| -5,474,865,060,728,439,000
| 32.809524
| 114
| 0.617505
| false
| 3.665192
| false
| false
| false
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/express_route_circuit_sku.py
|
1
|
1581
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExpressRouteCircuitSku(Model):
"""Contains SKU in an ExpressRouteCircuit.
:param name: The name of the SKU.
:type name: str
:param tier: The tier of the SKU. Possible values are 'Standard' and
'Premium'. Possible values include: 'Standard', 'Premium', 'Transport'
:type tier: str or
~azure.mgmt.network.v2017_06_01.models.ExpressRouteCircuitSkuTier
:param family: The family of the SKU. Possible values are: 'UnlimitedData'
and 'MeteredData'. Possible values include: 'UnlimitedData', 'MeteredData'
:type family: str or
~azure.mgmt.network.v2017_06_01.models.ExpressRouteCircuitSkuFamily
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ExpressRouteCircuitSku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.tier = kwargs.get('tier', None)
self.family = kwargs.get('family', None)
|
mit
| -8,774,509,893,830,967,000
| 38.525
| 79
| 0.602783
| false
| 3.923077
| false
| false
| false
|
ros2/launch
|
launch/launch/substitutions/command.py
|
1
|
5080
|
# Copyright 2020 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the Command substitution."""
import os
import shlex
import subprocess
from typing import Iterable
from typing import List
from typing import Text
import launch.logging
from .substitution_failure import SubstitutionFailure
from ..frontend.expose import expose_substitution
from ..launch_context import LaunchContext
from ..some_substitutions_type import SomeSubstitutionsType
from ..substitution import Substitution
@expose_substitution('command')
class Command(Substitution):
"""
Substitution that gets the output of a command as a string.
If the command is not found or fails a `SubstitutionFailure` error is raised.
Behavior on stderr output is configurable, see constructor.
"""
def __init__(
self,
command: SomeSubstitutionsType,
*,
on_stderr: SomeSubstitutionsType = 'fail'
) -> None:
"""
Construct a command substitution.
:param command: command to be executed. The substitutions will be performed, and
`shlex.split` will be used on the result.
:param on_stderr: specifies what to do when there is stderr output.
Can be one of:
- 'fail': raises `SubstitutionFailere` when stderr output is detected.
- 'ignore': `stderr` output is ignored.
- 'warn': The `stderr` output is ignored, but a warning is logged if detected.
- 'capture': The `stderr` output will be captured, together with stdout.
It can also be a substitution, that results in one of those four options.
"""
super().__init__()
from ..utilities import normalize_to_list_of_substitutions # import here to avoid loop
self.__command = normalize_to_list_of_substitutions(command)
self.__on_stderr = normalize_to_list_of_substitutions(on_stderr)
@classmethod
def parse(cls, data: Iterable[SomeSubstitutionsType]):
"""Parse `Command` substitution."""
if len(data) < 1 or len(data) > 2:
raise ValueError('command substitution expects 1 or 2 arguments')
kwargs = {'command': data[0]}
if len(data) == 2:
kwargs['on_stderr'] = data[1]
return cls, kwargs
@property
def command(self) -> List[Substitution]:
"""Getter for command."""
return self.__command
@property
def on_stderr(self) -> List[Substitution]:
"""Getter for on_stderr."""
return self.__on_stderr
def describe(self) -> Text:
"""Return a description of this substitution as a string."""
return 'Command({})'.format(' + '.join([sub.describe() for sub in self.command]))
def perform(self, context: LaunchContext) -> Text:
"""Perform the substitution by running the command and capturing its output."""
from ..utilities import perform_substitutions # import here to avoid loop
command_str = perform_substitutions(context, self.command)
if os.name != 'nt':
command = shlex.split(command_str)
else:
command = command_str
on_stderr = perform_substitutions(context, self.on_stderr)
if on_stderr not in ('fail', 'ignore', 'warn', 'capture'):
raise SubstitutionFailure(
"expected 'on_stderr' to be one of: 'fail', 'ignore', 'warn' or 'capture'")
stderr = subprocess.PIPE
if on_stderr == 'capture':
stderr = subprocess.STDOUT
try:
result = subprocess.run(
command,
stdout=subprocess.PIPE,
stderr=stderr,
universal_newlines=True)
except FileNotFoundError as ex:
raise SubstitutionFailure(f'file not found: {ex}')
if result.returncode != 0:
on_error_message = f'executed command failed. Command: {command_str}'
if result.stderr:
on_error_message += f'\nCaptured stderr output: {result.stderr}'
raise SubstitutionFailure(on_error_message)
if result.stderr:
on_stderr_message = f'executed command showed stderr output.' \
f' Command: {command_str}\n' \
f'Captured stderr output:\n{result.stderr}'
if on_stderr == 'fail':
raise SubstitutionFailure(on_stderr_message)
elif on_stderr == 'warn':
launch.logging.get_logger().warning(on_stderr_message)
return result.stdout
|
apache-2.0
| -927,587,593,320,410,900
| 38.379845
| 95
| 0.638976
| false
| 4.413553
| false
| false
| false
|
cherrygirl/micronaet7
|
pickin_import/importation.py
|
1
|
6137
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP module
# Copyright (C) 2010 Micronaet srl (<http://www.micronaet.it>)
#
# Italian OpenERP Community (<http://www.openerp-italia.com>)
#
#############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from datetime import datetime
from openerp.tools.translate import _
import time
class importation_default_location(osv.osv):
''' List of 2 element for get defaulf in location and stock one
'''
_name = 'importation.default.location'
_description = 'Default import location'
def get_location(self, cr, uid, name, context = None):
''' Return default value of location
'''
location_ids=self.search(cr, uid, [('name','=',name)])
if location_ids:
return self.read(cr, uid, location_ids)[0]['location_id'][0]
return False
_columns = {
'name':fields.selection([
('customer','Customer'),
('supplier','Supplier'),
('internal','Internal'), ],'Location type', select=True, readonly=False),
'location_id':fields.many2one('stock.location', 'Location stock', required=True),
}
class importation_purchase_order(osv.osv):
''' List of purchase order elements loaded
'''
_name = 'importation.purchase.order'
_description = 'Purchase order import'
_rec_name= 'product_id'
def check_lot(self, cr, uid, product_id, partner_id, purchase_order, context=None):
''' Check in in database is yet loaded a product for a specific purchase order
of a specific customer
Return lot_id assigned if exist, else False
'''
try:
# Search lot for product-partner-order:
item_ids = self.search(cr, uid, [
('product_id', '=', product_id),
('partner_id', '=', partner_id),
('purchase_order', '=', purchase_order),
])
if item_ids: # exist
item_read = self.read(cr, uid, item_ids)[0]
return item_read['lot_id'][0] # lot_id.id
else:
# Search lot for product-partner (no order)
item_ids = self.search(cr, uid, [
('product_id', '=', product_id),
('partner_id', '=', partner_id),
('purchase_order', '=', False),
])
if item_ids: # exist
item_read = self.read(cr, uid, item_ids)[0]
return item_read['lot_id'][0] # lot_id.id
else:
# Search default lot for product
product_proxy = self.pool.get('product.product').browse(
cr, uid, product_id, context=context)
if product_proxy.default_prodlot_id:
return product_proxy.default_prodlot_id.id
except:
pass
return False
def new_lot(self, cr, uid, product_id, partner_id, purchase_order, lot_id, context=None):
''' Check if in the DB exits key element (product_id, partner_id, purchase_order)
if true, assign lot_id as the last lot value
if false, create new element
'''
item_ids = self.search(cr, uid, [
('product_id', '=', product_id),
('partner_id', '=', partner_id),
('purchase_order', '=', purchase_order),
])
if item_ids: # save this lot as last
item_modify = self.write(cr, uid, item_ids, {
'lot_id': lot_id, }, context=context)
else:
item_create = self.create(cr, uid, {
'product_id': product_id,
'partner_id': partner_id,
'purchase_order': purchase_order,
'lot_id': lot_id,
}, context=context)
return
_columns = {
'product_id': fields.many2one('product.product', 'Product', required=True),
'partner_id': fields.many2one('res.partner', 'Partner', required=True),
'purchase_order': fields.char('Purchase order', size=15, required=False, readonly=False, help="ID of PO that generate this pickin list"),
'lot_id': fields.many2one('stock.production.lot', 'Lot', required=False),
}
_sql_constraints = [ # TODO non controlla se è vuoto il terzo!
('key_element_unique', 'unique(product_id,partner_id,purchase_order)', 'Key element: product, partner, order must be unique!')
]
class stock_picking_extra_fields(osv.osv):
''' Add extra information for import stock.picking
'''
_name = 'stock.picking'
_inherit = 'stock.picking'
_columns = {
'import_document':fields.char('Document n.', size=18, required=False, readonly=False, help="Link to original imported document, format number/year ex.: 8015/2012"),
'wizard_id': fields.integer('Wizard ID', help="Save wizard creazion ID for open stock.picking after importation")
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 859,732,967,655,162,200
| 42.211268
| 172
| 0.5559
| false
| 4.157182
| false
| false
| false
|
lukas-ke/faint-graphics-editor
|
build-sys/code_utils/find_defs.py
|
1
|
3953
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Lukas Kemmer
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
from core import enumerate_files, get_root_dir
import re
import sys
EXTENSIONS = [
".cpp",
".hh",
".txt",
]
EXCLUDED_DIRS = [
"__pycache__",
"build",
"build-sys",
"code_utils",
"doxygen",
"help",
"installer",
]
def format_filename(root_dir, f):
f = f.replace(root_dir, "")
return f[1:] if f[0] == "\\" else f
def write_defs_summary(file_name, defs):
with open(file_name, 'w') as f:
for d in sorted(defs.keys()):
key = d
path = defs[d][0]
charNum = str(defs[d][2])
label = defs[d][1]
if len(label) == 0:
label = key
f.write(key + "---" +
path + "---" +
charNum + "---" +
label + "===")
def format_def_line(l):
return l.strip().replace("// ", "")
def format_def_content(text):
return "\n".join(format_def_line(l) for l in text.split("\n"))
def write_defs_index(file_name, defs):
with open(file_name, 'w') as f:
f.write("Index of definitions in Faint source files.\n")
f.write("With faint-start-magic, the following entries\n")
f.write("should be clickable links.\n\n")
for key in sorted(defs.keys()):
f.write("\\ref(%s) (%s)\n" % (key, key))
if __name__ == '__main__':
root_dir = sys.argv[1]
out_file = sys.argv[2]
index_file = sys.argv[3]
defs_pattern = re.compile(r"\\def\((.*?)\)(.*?);", re.DOTALL|re.MULTILINE)
refs_pattern = re.compile(r"\\ref\((.*?)\)")
defs = {}
refs = {}
for filename in enumerate_files(root_dir,
extensions=EXTENSIONS,
excluded_dirs=EXCLUDED_DIRS):
with open(filename) as f:
text = f.read()
for entry in re.finditer(defs_pattern, text):
name = entry.group(1)
if name in defs:
print("Duplicate definition: %s" % name)
else:
content = format_def_content(entry.group(2).strip())
defs[name] = [filename, content, entry.span()[0]]
for entry in re.finditer(refs_pattern, text):
name = entry.group(1)
if name not in refs:
refs[name] = []
refs[name].append(filename)
print("Definitions:")
for num, name in enumerate(sorted(defs.keys())):
print(name + ": " + format_filename(root_dir, defs[name][0]))
for f in refs.get(name, []):
print(" " * len(name) + " <- %s" % format_filename(root_dir, f))
if num != len(defs) - 1:
print()
print()
print("References:")
for name in sorted(refs.keys()):
for f in refs[name]:
print(name + ": ", format_filename(root_dir, f))
for name in defs.keys():
if name not in refs:
print("Unreferenced define: %s" % name)
for name in refs.keys():
if name not in defs.keys():
print("Invalid reference: %s %s" % (name, refs[name][0]))
write_defs_summary(out_file, defs)
write_defs_index(index_file, defs)
|
apache-2.0
| 6,920,618,032,387,999,000
| 29.373016
| 78
| 0.521882
| false
| 3.646679
| false
| false
| false
|
vitorfs/bootcamp
|
bootcamp/articles/tests/test_views.py
|
1
|
4751
|
import tempfile
from PIL import Image
from django.test import Client, override_settings
from django.urls import reverse
from test_plus.test import TestCase
from bootcamp.articles.models import Article
def get_temp_img():
size = (200, 200)
color = (255, 0, 0, 0)
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as f:
image = Image.new("RGB", size, color)
image.save(f, "PNG")
return open(f.name, mode="rb")
class ArticlesViewsTest(TestCase):
def setUp(self):
self.user = self.make_user("first_user")
self.other_user = self.make_user("second_user")
self.client = Client()
self.other_client = Client()
self.client.login(username="first_user", password="password")
self.other_client.login(username="second_user", password="password")
self.article = Article.objects.create(
title="A really nice title",
content="This is a really good content",
status="P",
user=self.user,
)
self.not_p_article = Article.objects.create(
title="A really nice to-be title",
content="""This is a really good content, just if somebody
published it, that would be awesome, but no, nobody wants to
publish it, because they know this is just a test, and you
know than nobody wants to publish a test, just a test;
everybody always wants the real deal.""",
user=self.user,
)
self.test_image = get_temp_img()
def tearDown(self):
self.test_image.close()
def test_index_articles(self):
response = self.client.get(reverse("articles:list"))
self.assertEqual(response.status_code, 200)
def test_error_404(self):
response_no_art = self.client.get(
reverse("articles:article", kwargs={"slug": "no-slug"})
)
self.assertEqual(response_no_art.status_code, 404)
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_create_article(self):
response = self.client.post(
reverse("articles:write_new"),
{
"title": "A not that really nice title",
"content": "Whatever works for you",
"tags": "list, lists",
"status": "P",
"image": self.test_image,
},
)
assert response.status_code == 302
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_single_article(self):
current_count = Article.objects.count()
response = self.client.post(
reverse("articles:write_new"),
{
"title": "A not that really nice title",
"content": "Whatever works for you",
"tags": "list, lists",
"status": "P",
"image": self.test_image,
},
)
# response_art = self.client.get(
# reverse("articles:article",
# kwargs={"slug": "a-not-that-really-nice-title"}))
# assert response_art.status_code == 200
assert response.status_code == 302
assert Article.objects.count() == current_count + 1
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_draft_article(self):
response = self.client.post(
reverse("articles:write_new"),
{
"title": "A not that really nice title",
"content": "Whatever works for you",
"tags": "list, lists",
"status": "D",
"image": self.test_image,
},
)
resp = self.client.get(reverse("articles:drafts"))
assert resp.status_code == 200
assert response.status_code == 302
assert (
resp.context["articles"][0].slug
== "first-user-a-not-that-really-nice-title"
)
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_draft_article_change(self):
response = self.client.post(
reverse("articles:edit_article", kwargs={"pk": self.not_p_article.id}),
{
"title": "A really nice changed title",
"content": "Whatever works for you",
"tags": "list, lists",
"status": "D",
"image": self.test_image,
},
)
resp = self.client.get(reverse("articles:drafts"))
assert resp.status_code == 200
assert response.status_code == 302
assert resp.context["articles"][0].title == "A really nice changed title"
assert (
resp.context["articles"][0].slug == "first-user-a-really-nice-to-be-title"
)
|
mit
| 4,451,839,674,186,486,000
| 34.721805
| 86
| 0.55904
| false
| 4.109862
| true
| false
| false
|
xletmjm416/space-trader-galaxy
|
main.py
|
1
|
2578
|
# -*- coding: utf-8 -*-
"""
Main Space Trader Galaxy class.
Created on Thu Jul 6 01:59:38 2017
@author: mjm
"""
import logging
import logging.config
import character as char
import good as good
def trade(seller, buyer, commodity, pay):
"""
Seller sells commodity to the buyer who pays with pay.
Arguments:
seller - type Character
buyer - type Character
commodity - type Good
pay - type Good
Returns:
success (True) or fail (False)
"""
"""
Pseudocode
log init
if seller owns commodity:
if buyer owns pay:
commodity.transfer(new_owner=buyer)
pay.transfer(new_owner=seller)
log
return True
else:
log
return False
else:
log
return False
log end
"""
pass
success = False
if commodity in seller.belongings: #TODO change into owns() by Character
if pay in buyer.belongings: #same here
commodity.transfer(new_owner=buyer)
pay.transfer(new_owner=seller)
pass
success = True
else:
pass
success = False
else:
pass
success = False
pass
return success
def test_trade():
print """Initialisation"""
matt = char.Character("Matt", [], "Earth")
tony = char.Character("Tony", [], "Mars")
matt1 = good.Good("Matt's item 1", matt)
tony1 = good.Good("Tony's item 1", tony)
tony2 = good.Good("Tony's item 2", tony)
print matt.describe()
print
print tony.describe()
print
print """Trade undertaking:"""
print "- Tony trades with Matt, seller does not have good"
print trade(tony, matt, matt1, matt1)
print
print matt.describe()
print
print tony.describe()
print
print "- Tony trades with Matt, buyer does not have good"
print trade(tony, matt, tony1, tony2)
print
print matt.describe()
print
print tony.describe()
print
print "- Tony trades with Matt, None have the good"
print trade(tony, matt, matt1, tony1)
print
print matt.describe()
print
print tony.describe()
print
print "- Matt trades with Tony, both have the good"""
print trade(matt, tony, matt1, tony1)
print matt.describe()
print
print tony.describe()
print
def main():
logging.config.fileConfig('logs.conf')
logging.info("Program started.")
test_trade()
logging.info("Program finished.")
if __name__ == "__main__":
main()
|
mit
| 8,642,490,935,781,050,000
| 22.66055
| 76
| 0.588441
| false
| 3.625879
| false
| false
| false
|
craffel/mir_eval
|
mir_eval/sonify.py
|
1
|
10893
|
'''
Methods which sonify annotations for "evaluation by ear".
All functions return a raw signal at the specified sampling rate.
'''
import numpy as np
from numpy.lib.stride_tricks import as_strided
from scipy.interpolate import interp1d
from . import util
from . import chord
def clicks(times, fs, click=None, length=None):
"""Returns a signal with the signal 'click' placed at each specified time
Parameters
----------
times : np.ndarray
times to place clicks, in seconds
fs : int
desired sampling rate of the output signal
click : np.ndarray
click signal, defaults to a 1 kHz blip
length : int
desired number of samples in the output signal,
defaults to ``times.max()*fs + click.shape[0] + 1``
Returns
-------
click_signal : np.ndarray
Synthesized click signal
"""
# Create default click signal
if click is None:
# 1 kHz tone, 100ms
click = np.sin(2*np.pi*np.arange(fs*.1)*1000/(1.*fs))
# Exponential decay
click *= np.exp(-np.arange(fs*.1)/(fs*.01))
# Set default length
if length is None:
length = int(times.max()*fs + click.shape[0] + 1)
# Pre-allocate click signal
click_signal = np.zeros(length)
# Place clicks
for time in times:
# Compute the boundaries of the click
start = int(time*fs)
end = start + click.shape[0]
# Make sure we don't try to output past the end of the signal
if start >= length:
break
if end >= length:
click_signal[start:] = click[:length - start]
break
# Normally, just add a click here
click_signal[start:end] = click
return click_signal
def time_frequency(gram, frequencies, times, fs, function=np.sin, length=None,
n_dec=1):
"""Reverse synthesis of a time-frequency representation of a signal
Parameters
----------
gram : np.ndarray
``gram[n, m]`` is the magnitude of ``frequencies[n]``
from ``times[m]`` to ``times[m + 1]``
Non-positive magnitudes are interpreted as silence.
frequencies : np.ndarray
array of size ``gram.shape[0]`` denoting the frequency of
each row of gram
times : np.ndarray, shape= ``(gram.shape[1],)`` or ``(gram.shape[1], 2)``
Either the start time of each column in the gram,
or the time interval corresponding to each column.
fs : int
desired sampling rate of the output signal
function : function
function to use to synthesize notes, should be :math:`2\pi`-periodic
length : int
desired number of samples in the output signal,
defaults to ``times[-1]*fs``
n_dec : int
the number of decimals used to approximate each sonfied frequency.
Defaults to 1 decimal place. Higher precision will be slower.
Returns
-------
output : np.ndarray
synthesized version of the piano roll
"""
# Default value for length
if times.ndim == 1:
# Convert to intervals
times = util.boundaries_to_intervals(times)
if length is None:
length = int(times[-1, 1] * fs)
times, _ = util.adjust_intervals(times, t_max=length)
# Truncate times so that the shape matches gram
n_times = gram.shape[1]
times = times[:n_times]
def _fast_synthesize(frequency):
"""A faster way to synthesize a signal.
Generate one cycle, and simulate arbitrary repetitions
using array indexing tricks.
"""
# hack so that we can ensure an integer number of periods and samples
# rounds frequency to 1st decimal, s.t. 10 * frequency will be an int
frequency = np.round(frequency, n_dec)
# Generate 10*frequency periods at this frequency
# Equivalent to n_samples = int(n_periods * fs / frequency)
# n_periods = 10*frequency is the smallest integer that guarantees
# that n_samples will be an integer, since assuming 10*frequency
# is an integer
n_samples = int(10.0**n_dec * fs)
short_signal = function(2.0 * np.pi * np.arange(n_samples) *
frequency / fs)
# Calculate the number of loops we need to fill the duration
n_repeats = int(np.ceil(length/float(short_signal.shape[0])))
# Simulate tiling the short buffer by using stride tricks
long_signal = as_strided(short_signal,
shape=(n_repeats, len(short_signal)),
strides=(0, short_signal.itemsize))
# Use a flatiter to simulate a long 1D buffer
return long_signal.flat
def _const_interpolator(value):
"""Return a function that returns `value`
no matter the input.
"""
def __interpolator(x):
return value
return __interpolator
# Threshold the tfgram to remove non-positive values
gram = np.maximum(gram, 0)
# Pre-allocate output signal
output = np.zeros(length)
time_centers = np.mean(times, axis=1) * float(fs)
for n, frequency in enumerate(frequencies):
# Get a waveform of length samples at this frequency
wave = _fast_synthesize(frequency)
# Interpolate the values in gram over the time grid
if len(time_centers) > 1:
gram_interpolator = interp1d(
time_centers, gram[n, :],
kind='linear', bounds_error=False,
fill_value=(gram[n, 0], gram[n, -1]))
# If only one time point, create constant interpolator
else:
gram_interpolator = _const_interpolator(gram[n, 0])
# Scale each time interval by the piano roll magnitude
for m, (start, end) in enumerate((times * fs).astype(int)):
# Clip the timings to make sure the indices are valid
start, end = max(start, 0), min(end, length)
# add to waveform
output[start:end] += (
wave[start:end] * gram_interpolator(np.arange(start, end)))
# Normalize, but only if there's non-zero values
norm = np.abs(output).max()
if norm >= np.finfo(output.dtype).tiny:
output /= norm
return output
def pitch_contour(times, frequencies, fs, amplitudes=None, function=np.sin,
length=None, kind='linear'):
'''Sonify a pitch contour.
Parameters
----------
times : np.ndarray
time indices for each frequency measurement, in seconds
frequencies : np.ndarray
frequency measurements, in Hz.
Non-positive measurements will be interpreted as un-voiced samples.
fs : int
desired sampling rate of the output signal
amplitudes : np.ndarray
amplitude measurments, nonnegative
defaults to ``np.ones((length,))``
function : function
function to use to synthesize notes, should be :math:`2\pi`-periodic
length : int
desired number of samples in the output signal,
defaults to ``max(times)*fs``
kind : str
Interpolation mode for the frequency and amplitude values.
See: ``scipy.interpolate.interp1d`` for valid settings.
Returns
-------
output : np.ndarray
synthesized version of the pitch contour
'''
fs = float(fs)
if length is None:
length = int(times.max() * fs)
# Squash the negative frequencies.
# wave(0) = 0, so clipping here will un-voice the corresponding instants
frequencies = np.maximum(frequencies, 0.0)
# Build a frequency interpolator
f_interp = interp1d(times * fs, 2 * np.pi * frequencies / fs, kind=kind,
fill_value=0.0, bounds_error=False, copy=False)
# Estimate frequency at sample points
f_est = f_interp(np.arange(length))
if amplitudes is None:
a_est = np.ones((length, ))
else:
# build an amplitude interpolator
a_interp = interp1d(
times * fs, amplitudes, kind=kind,
fill_value=0.0, bounds_error=False, copy=False)
a_est = a_interp(np.arange(length))
# Sonify the waveform
return a_est * function(np.cumsum(f_est))
def chroma(chromagram, times, fs, **kwargs):
"""Reverse synthesis of a chromagram (semitone matrix)
Parameters
----------
chromagram : np.ndarray, shape=(12, times.shape[0])
Chromagram matrix, where each row represents a semitone [C->Bb]
i.e., ``chromagram[3, j]`` is the magnitude of D# from ``times[j]`` to
``times[j + 1]``
times: np.ndarray, shape=(len(chord_labels),) or (len(chord_labels), 2)
Either the start time of each column in the chromagram,
or the time interval corresponding to each column.
fs : int
Sampling rate to synthesize audio data at
kwargs
Additional keyword arguments to pass to
:func:`mir_eval.sonify.time_frequency`
Returns
-------
output : np.ndarray
Synthesized chromagram
"""
# We'll just use time_frequency with a Shepard tone-gram
# To create the Shepard tone-gram, we copy the chromagram across 7 octaves
n_octaves = 7
# starting from C2
base_note = 24
# and weight each octave by a normal distribution
# The normal distribution has mean 72 (one octave above middle C)
# and std 6 (one half octave)
mean = 72
std = 6
notes = np.arange(12*n_octaves) + base_note
shepard_weight = np.exp(-(notes - mean)**2./(2.*std**2.))
# Copy the chromagram matrix vertically n_octaves times
gram = np.tile(chromagram.T, n_octaves).T
# This fixes issues if the supplied chromagram is int type
gram = gram.astype(float)
# Apply Sheppard weighting
gram *= shepard_weight.reshape(-1, 1)
# Compute frequencies
frequencies = 440.0*(2.0**((notes - 69)/12.0))
return time_frequency(gram, frequencies, times, fs, **kwargs)
def chords(chord_labels, intervals, fs, **kwargs):
"""Synthesizes chord labels
Parameters
----------
chord_labels : list of str
List of chord label strings.
intervals : np.ndarray, shape=(len(chord_labels), 2)
Start and end times of each chord label
fs : int
Sampling rate to synthesize at
kwargs
Additional keyword arguments to pass to
:func:`mir_eval.sonify.time_frequency`
Returns
-------
output : np.ndarray
Synthesized chord labels
"""
util.validate_intervals(intervals)
# Convert from labels to chroma
roots, interval_bitmaps, _ = chord.encode_many(chord_labels)
chromagram = np.array([np.roll(interval_bitmap, root)
for (interval_bitmap, root)
in zip(interval_bitmaps, roots)]).T
return chroma(chromagram, intervals, fs, **kwargs)
|
mit
| 147,390,526,746,069,660
| 32.109422
| 78
| 0.615074
| false
| 3.919755
| false
| false
| false
|
quattor/aquilon
|
lib/aquilon/worker/commands/add_network_device.py
|
1
|
6514
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq add network_device`."""
from sqlalchemy.orm import subqueryload
from aquilon.exceptions_ import ArgumentError
from aquilon.aqdb.model import NetworkDevice, Model, Archetype, Chassis, NetworkDeviceChassisSlot
from aquilon.aqdb.model.network import get_net_id_from_ip
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.dbwrappers.dns import grab_address
from aquilon.worker.dbwrappers.location import get_location
from aquilon.worker.dbwrappers.interface import (get_or_create_interface,
assign_address,
check_netdev_iftype)
from aquilon.worker.dbwrappers.host import create_host
from aquilon.worker.processes import DSDBRunner
from aquilon.worker.templates.switchdata import PlenarySwitchData
from aquilon.worker.dbwrappers.change_management import ChangeManagement
class CommandAddNetworkDevice(BrokerCommand):
requires_plenaries = True
required_parameters = ["network_device", "model", "type",
"ip", "interface", "iftype"]
def render(self, session, logger, plenaries, network_device, label, model, type, ip,
interface, iftype, mac, vendor, serial, comments, exporter, chassis, slot,
archetype, domain, sandbox, user, justification, reason, **arguments):
dbmodel = Model.get_unique(session, name=model, vendor=vendor,
compel=True)
if not dbmodel.model_type.isNetworkDeviceType():
raise ArgumentError("This command can only be used to "
"add network devices.")
dblocation = get_location(session, query_options=[subqueryload('parents')], **arguments)
if chassis:
dbchassis = Chassis.get_unique(session, chassis, compel=True)
if slot is None:
raise ArgumentError("The --chassis option requires a --slot.")
if dblocation and dblocation != dbchassis.location:
raise ArgumentError("{0} conflicts with chassis location "
"{1}.".format(dblocation, dbchassis.location))
dblocation = dbchassis.location
elif slot is not None:
raise ArgumentError("The --slot option requires a --chassis.")
dbdns_rec, _ = grab_address(session, network_device, ip,
allow_restricted_domain=True,
allow_reserved=True, preclude=True,
exporter=exporter, require_grn=False)
if not label:
label = dbdns_rec.fqdn.name
try:
NetworkDevice.check_label(label)
except ArgumentError:
raise ArgumentError("Could not deduce a valid hardware label "
"from the network device name. Please specify "
"--label.")
# FIXME: What do the error messages for an invalid enum (switch_type)
# look like?
dbnetdev = NetworkDevice(label=label, switch_type=type,
location=dblocation, model=dbmodel,
serial_no=serial, comments=comments)
session.add(dbnetdev)
if chassis:
dbslot = session.query(NetworkDeviceChassisSlot).filter_by(chassis=dbchassis,
slot_number=slot).first()
if dbslot and dbslot.network_device:
raise ArgumentError("{0} slot {1} already has network device "
"{2}.".format(dbchassis, slot,
dbslot.network_device.label))
if not dbslot:
dbslot = NetworkDeviceChassisSlot(chassis=dbchassis, slot_number=slot)
dbslot.network_device = dbnetdev
session.add(dbslot)
dbnetdev.primary_name = dbdns_rec
check_netdev_iftype(iftype)
dbinterface = get_or_create_interface(session, dbnetdev,
name=interface, mac=mac,
interface_type=iftype)
dbnetwork = get_net_id_from_ip(session, ip)
# TODO: should we call check_ip_restrictions() here?
assign_address(dbinterface, ip, dbnetwork, logger=logger)
if not archetype:
hw_section = 'hardware_network_device'
if not self.config.has_option(hw_section, 'default_archetype'):
raise ArgumentError("Cannot determine the archetype for "
"network devices. Please specify "
"--archetype.")
archetype = self.config.get(hw_section, 'default_archetype')
dbarchetype = Archetype.get_unique(session, archetype, compel=True)
dbhost = create_host(session, logger, self.config, dbnetdev,
dbarchetype, domain=domain, sandbox=sandbox,
**arguments)
session.flush()
# Validate ChangeManagement
cm = ChangeManagement(session, user, justification, reason, logger, self.command, **arguments)
cm.consider(dbhost)
cm.validate()
# Add the legacy template separately
plenaries.add(dbnetdev, cls=PlenarySwitchData)
plenaries.add(dbnetdev)
plenaries.add(dbhost)
with plenaries.transaction():
dsdb_runner = DSDBRunner(logger=logger)
dsdb_runner.update_host(dbnetdev, None)
dsdb_runner.commit_or_rollback("Could not add network device to DSDB")
return
|
apache-2.0
| -409,722,923,466,763,400
| 45.863309
| 102
| 0.602241
| false
| 4.336884
| false
| false
| false
|
Eksmo/calibre
|
src/calibre/ebooks/mobi/writer2/resources.py
|
1
|
5456
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import imghdr
from calibre.ebooks.mobi import MAX_THUMB_DIMEN, MAX_THUMB_SIZE
from calibre.ebooks.mobi.utils import (rescale_image, mobify_image,
write_font_record)
from calibre.ebooks import generate_masthead
from calibre.ebooks.oeb.base import OEB_RASTER_IMAGES
PLACEHOLDER_GIF = b'GIF89a\x01\x00\x01\x00\x80\x00\x00\x00\x00\x00\xff\xff\xff!\xf9\x04\x01\x00\x00\x00\x00,\x00\x00\x00\x00\x01\x00\x01\x00@\x02\x01D\x00;'
class Resources(object):
def __init__(self, oeb, opts, is_periodical, add_fonts=False,
process_images=True):
self.oeb, self.log, self.opts = oeb, oeb.log, opts
self.is_periodical = is_periodical
self.process_images = process_images
self.item_map = {}
self.records = []
self.mime_map = {}
self.masthead_offset = 0
self.used_image_indices = set()
self.image_indices = set()
self.cover_offset = self.thumbnail_offset = None
self.add_resources(add_fonts)
def process_image(self, data):
if not self.process_images:
return data
return (mobify_image(data) if self.opts.mobi_keep_original_images else
rescale_image(data))
def add_resources(self, add_fonts):
oeb = self.oeb
oeb.logger.info('Serializing resources...')
index = 1
mh_href = None
if 'masthead' in oeb.guide and oeb.guide['masthead'].href:
mh_href = oeb.guide['masthead'].href
self.records.append(None)
index += 1
self.used_image_indices.add(0)
self.image_indices.add(0)
elif self.is_periodical:
# Generate a default masthead
data = generate_masthead(unicode(self.oeb.metadata['title'][0]))
self.records.append(data)
self.used_image_indices.add(0)
self.image_indices.add(0)
index += 1
cover_href = self.cover_offset = self.thumbnail_offset = None
if (oeb.metadata.cover and
unicode(oeb.metadata.cover[0]) in oeb.manifest.ids):
cover_id = unicode(oeb.metadata.cover[0])
item = oeb.manifest.ids[cover_id]
cover_href = item.href
for item in self.oeb.manifest.values():
if item.media_type not in OEB_RASTER_IMAGES: continue
try:
data = self.process_image(item.data)
except:
self.log.warn('Bad image file %r' % item.href)
continue
else:
if mh_href and item.href == mh_href:
self.records[0] = data
continue
self.image_indices.add(len(self.records))
self.records.append(data)
self.item_map[item.href] = index
self.mime_map[item.href] = 'image/%s'%imghdr.what(None, data)
index += 1
if cover_href and item.href == cover_href:
self.cover_offset = self.item_map[item.href] - 1
self.used_image_indices.add(self.cover_offset)
try:
data = rescale_image(item.data, dimen=MAX_THUMB_DIMEN,
maxsizeb=MAX_THUMB_SIZE)
except:
self.log.warn('Failed to generate thumbnail')
else:
self.image_indices.add(len(self.records))
self.records.append(data)
self.thumbnail_offset = index - 1
self.used_image_indices.add(self.thumbnail_offset)
index += 1
finally:
item.unload_data_from_memory()
if add_fonts:
for item in self.oeb.manifest.values():
if item.href and item.href.rpartition('.')[-1].lower() in {
'ttf', 'otf'} and isinstance(item.data, bytes):
self.records.append(write_font_record(item.data))
self.item_map[item.href] = len(self.records)
def add_extra_images(self):
'''
Add any images that were created after the call to add_resources()
'''
for item in self.oeb.manifest.values():
if (item.media_type not in OEB_RASTER_IMAGES or item.href in
self.item_map): continue
try:
data = self.process_image(item.data)
except:
self.log.warn('Bad image file %r' % item.href)
else:
self.records.append(data)
self.item_map[item.href] = len(self.records)
finally:
item.unload_data_from_memory()
def serialize(self, records, used_images):
used_image_indices = self.used_image_indices | {
v-1 for k, v in self.item_map.iteritems() if k in used_images}
for i in self.image_indices-used_image_indices:
self.records[i] = PLACEHOLDER_GIF
records.extend(self.records)
def __bool__(self):
return bool(self.records)
__nonzero__ = __bool__
|
gpl-3.0
| -8,675,809,270,525,001,000
| 37.971429
| 156
| 0.552786
| false
| 3.606081
| false
| false
| false
|
openstack/octavia
|
octavia/db/migration/alembic_migrations/versions/0fd2c131923f_add_timeout_fields_to_listener.py
|
1
|
1794
|
# Copyright 2018 GoDaddy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add timeout fields to listener
Revision ID: 0fd2c131923f
Revises: ba35e0fb88e1
Create Date: 2018-03-23 03:34:26.657254
"""
from alembic import op
import sqlalchemy as sa
from octavia.common import constants
# revision identifiers, used by Alembic.
revision = '0fd2c131923f'
down_revision = 'ba35e0fb88e1'
def upgrade():
op.add_column('listener',
sa.Column('timeout_client_data',
sa.Integer(), nullable=True,
default=constants.DEFAULT_TIMEOUT_CLIENT_DATA))
op.add_column('listener',
sa.Column('timeout_member_connect',
sa.Integer(), nullable=True,
default=constants.DEFAULT_TIMEOUT_MEMBER_CONNECT))
op.add_column('listener',
sa.Column('timeout_member_data',
sa.Integer(), nullable=True,
default=constants.DEFAULT_TIMEOUT_MEMBER_DATA))
op.add_column('listener',
sa.Column('timeout_tcp_inspect',
sa.Integer(), nullable=True,
default=constants.DEFAULT_TIMEOUT_TCP_INSPECT))
|
apache-2.0
| -9,021,182,196,373,340,000
| 34.88
| 78
| 0.622074
| false
| 4.09589
| false
| false
| false
|
Endika/OpenUpgrade
|
openerp/addons/openupgrade_records/model/openupgrade_record.py
|
1
|
3901
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module Copyright (C) 2012-2014 OpenUpgrade community
# https://launchpad.net/~openupgrade-committers
#
# Contributors:
# Therp BV <http://therp.nl>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
try:
from openerp.osv.orm import Model
from openerp.osv import fields
except ImportError:
from osv.osv import osv as Model
from osv import fields
# Cannot use forward references in 6.0
class openupgrade_record(Model):
_name = 'openupgrade.record'
openupgrade_record()
class openupgrade_attribute(Model):
_name = 'openupgrade.attribute'
_rec_name = 'name'
_columns = {
'name': fields.char(
'Name', size=24,
readonly=True,
),
'value': fields.char(
'Value',
size=4096,
readonly=True,
),
'record_id': fields.many2one(
'openupgrade.record', ondelete='CASCADE',
readonly=True,
),
}
openupgrade_attribute()
class openupgrade_record(Model):
_inherit = 'openupgrade.record'
_columns = {
'name': fields.char('Name', size=256, readonly=True),
'module': fields.char('Module', size=128, readonly=True),
'model': fields.char('Model', size=128, readonly=True),
'field': fields.char('Field', size=128, readonly=True),
'mode': fields.selection(
[('create', 'Create'), ('modify', 'Modify')],
'Mode',
help='Set to Create if a field is newly created '
'in this module. If this module modifies an attribute of an '
'exting field, set to Modify.',
readonly=True,
),
'type': fields.selection(
[('field', 'Field'), ('xmlid', 'XML ID')],
'Type',
readonly=True,
),
'attribute_ids': fields.one2many(
'openupgrade.attribute', 'record_id', 'Attributes',
readonly=True,
),
}
def field_dump(self, cr, uid, context=None):
keys = [
'module',
'mode',
'model',
'field',
'type',
'isfunction',
'isproperty',
'isrelated',
'relation',
'required',
'selection_keys',
'req_default',
'inherits',
]
template = dict([(x, False) for x in keys])
ids = self.search(cr, uid, [('type', '=', 'field')], context=context)
records = self.browse(cr, uid, ids, context=context)
data = []
for record in records:
repr = template.copy()
repr.update({
'module': record.module,
'model': record.model,
'field': record.field,
'mode': record.mode,
})
repr.update(
dict([(x.name, x.value) for x in record.attribute_ids]))
data.append(repr)
return data
openupgrade_record()
|
agpl-3.0
| 9,089,793,331,484,869,000
| 30.97541
| 78
| 0.530377
| false
| 4.310497
| false
| false
| false
|
blythemusic/ClyphX
|
ClyphXControlSurfaceActions.py
|
1
|
18706
|
"""
# Copyright (C) 2013-2016 Stray <stray411@hotmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# For questions regarding this module contact
# Stray <stray411@hotmail.com>
"""
# emacs-mode: -*- python-*-
# -*- coding: utf-8 -*-
import Live
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent
from _Framework.ControlSurface import ControlSurface
from _Framework.SessionComponent import SessionComponent
from _Framework.MixerComponent import MixerComponent
from _Framework.DeviceComponent import DeviceComponent
from consts import *
if IS_LIVE_9:
from ClyphXPushActions import ClyphXPushActions
from ClyphXPXTActions import ClyphXPXTActions
from ClyphXMXTActions import ClyphXMXTActions
if IS_LIVE_9_5:
from ClyphXArsenalActions import ClyphXArsenalActions
from ableton.v2.control_surface import ControlSurface as CS
else:
from _Framework.ControlSurface import ControlSurface as CS
class ClyphXControlSurfaceActions(ControlSurfaceComponent):
__module__ = __name__
__doc__ = ' Actions related to control surfaces '
def __init__(self, parent):
ControlSurfaceComponent.__init__(self)
self._parent = parent
self._push_actions = None
if IS_LIVE_9:
self._push_actions = ClyphXPushActions(parent)
self._pxt_actions = ClyphXPXTActions(parent)
self._mxt_actions = ClyphXMXTActions(parent)
if IS_LIVE_9_5:
self._arsenal_actions = ClyphXArsenalActions(parent)
self._scripts = {}
def disconnect(self):
self._scripts = {}
self._parent = None
self._arsenal_actions = None
self._push_actions = None
self._pxt_actions = None
self._mxt_actions = None
if IS_LIVE_9:
ControlSurfaceComponent.disconnect(self)
def on_enabled_changed(self):
pass
def update(self):
pass
def connect_script_instances(self, instanciated_scripts):
""" Build dict of connected scripts and their components, doesn't work with non-Framework scripts, but does work with User Remote Scripts """
if IS_LIVE_9:
instanciated_scripts = self._parent._control_surfaces()
self._scripts = {}
for index in range (len(instanciated_scripts)):
script = instanciated_scripts[index]
self._scripts[index] = {'script' : script, 'name' : None, 'repeat' : False, 'mixer' : None, 'device' : None, 'last_ring_pos' : None,
'session' : None, 'track_link' : False, 'scene_link' : False, 'centered_link' : False, 'color' : False}
script_name = script.__class__.__name__
if isinstance (script, (ControlSurface, CS)):
if script_name == 'GenericScript':
script_name = script._suggested_input_port
if script_name.startswith('Arsenal'):
self._arsenal_actions.set_script(script)
if script_name == 'Push' and IS_LIVE_9:
self._push_actions.set_script(script)
if script_name.startswith('PXT_Live') and IS_LIVE_9:
self._pxt_actions.set_script(script)
if script_name == 'MXT_Live' and IS_LIVE_9:
self._mxt_actions.set_script(script)
if not script_name.startswith('ClyphX'):
if (IS_LIVE_9 and script._components == None) or script.components == None:
return
else:
self._scripts[index]['name'] = script_name.upper()
for c in script.components:
if isinstance (c, SessionComponent):
self._scripts[index]['session'] = c
if script_name.startswith('APC'):
self._scripts[index]['color'] = {'GREEN' : (1, 2), 'RED' : (3, 4), 'AMBER' : (5, 6)}
self._scripts[index]['metro'] = {'controls' : c._stop_track_clip_buttons, 'component' : None, 'override' : None}
if script_name == 'Launchpad':
self._scripts[index]['color'] = {'GREEN' : (52, 56), 'RED' : (7, 11), 'AMBER' : (55, 59)}
self._scripts[index]['metro'] = {'controls' : script._selector._side_buttons, 'component' : None, 'override' : script._selector}
if isinstance (c, MixerComponent):
self._scripts[index]['mixer'] = c
if isinstance (c, DeviceComponent):
self._scripts[index]['device'] = c
if IS_LIVE_9_5 and script_name == 'Push':
self._scripts[index]['session'] = script._session_ring
self._scripts[index]['mixer'] = script._mixer
elif script_name == 'Nocturn':
self._scripts[index]['device'] = script.device_controller
script.device_controller.canonical_parent = script
def dispatch_push_action(self, track, xclip, ident, action, args):
""" Dispatch Push-related actions to PushActions. """
if self._push_actions:
self._push_actions.dispatch_action(track, xclip, ident, action, args)
def dispatch_pxt_action(self, track, xclip, ident, action, args):
""" Dispatch PXT-related actions to PXTActions. """
if self._pxt_actions:
self._pxt_actions.dispatch_action(track, xclip, ident, action, args)
def dispatch_mxt_action(self, track, xclip, ident, action, args):
""" Dispatch MXT-related actions to MXTActions. """
if self._mxt_actions:
self._mxt_actions.dispatch_action(track, xclip, ident, action, args)
def dispatch_arsenal_action(self, track, xclip, ident, action, args):
""" Dispatch Arsenal-related actions to ArsenalActions. """
if self._arsenal_actions:
self._arsenal_actions.dispatch_action(track, xclip, ident, action, args)
def dispatch_cs_action(self, track, xclip, ident, action, args):
""" Dispatch appropriate control surface actions """
script = self._get_script_to_operate_on(action)
if script != None:
if 'METRO ' in args and self._scripts[script].has_key('metro'):
self.handle_visual_metro(self._scripts[script], args)
elif 'RINGLINK ' in args and self._scripts[script]['session']:
self.handle_ring_link(self._scripts[script]['session'], script, args[9:])
elif 'RING ' in args and self._scripts[script]['session']:
self.handle_session_offset(script, self._scripts[script]['session'], args[5:])
elif 'COLORS ' in args and self._scripts[script]['session'] and self._scripts[script]['color']:
self.handle_session_colors(self._scripts[script]['session'], self._scripts[script]['color'], args[7:])
elif 'DEV LOCK' in args and self._scripts[script]['device']:
self._scripts[script]['device'].canonical_parent.toggle_lock()
elif 'BANK ' in args and self._scripts[script]['mixer']:
self.handle_track_bank(script, xclip, ident, self._scripts[script]['mixer'], self._scripts[script]['session'], args[5:])
elif 'RPT' in args and IS_LIVE_9:
self.handle_note_repeat(self._scripts[script]['script'], script, args)
else:
if self._scripts[script]['mixer'] and '/' in args[:4]:
self.handle_track_action(self._scripts[script]['mixer'], xclip, ident, args)
def _get_script_to_operate_on(self, script_info):
""" Returns the script index to operate on, which can be specified in terms of its index
or its name. Also, can use SURFACE (legacy) or CS (new) to indicate a surface action. """
script = None
try:
script_spec = None
if 'SURFACE' in script_info:
script_spec = script_info.strip('SURFACE')
elif 'CS' in script_info:
script_spec = script_info.strip('CS')
if len(script_spec) == 1:
script = int(script_spec) - 1
if not self._scripts.has_key(script):
script = None
else:
script_spec = script_spec.strip('"').strip()
for k, v in self._scripts.items():
if v['name'] == script_spec:
script = k
except: script = None
return script
def handle_note_repeat(self, script, script_index, args):
""" Set note repeat for the given surface """
args = args.replace('RPT', '').strip()
if args in REPEAT_STATES:
if args == 'OFF':
script._c_instance.note_repeat.enabled = False
self._scripts[script_index]['repeat'] = False
else:
script._c_instance.note_repeat.repeat_rate = REPEAT_STATES[args]
script._c_instance.note_repeat.enabled = True
self._scripts[script_index]['repeat'] = True
else:
self._scripts[script_index]['repeat'] = not self._scripts[script_index]['repeat']
script._c_instance.note_repeat.enabled = self._scripts[script_index]['repeat']
def handle_track_action(self, mixer, xclip, ident, args):
""" Get control surface track(s) to operate on and call main action dispatch """
track_start = None
track_end = None
track_range = args.split('/')[0]
actions = str(args[args.index('/')+1:].strip()).split()
new_action = actions[0]
new_args = ''
if len(actions) > 1:
new_args = ' '.join(actions[1:])
if 'ALL' in track_range:
track_start = 0
track_end = len(mixer._channel_strips)
elif '-' in track_range:
track_range = track_range.split('-')
try:
track_start = int(track_range[0]) - 1
track_end = int(track_range[1])
except:
track_start = None
track_end = None
else:
try:
track_start = int(track_range) - 1
track_end = track_start + 1
except:
track_start = None
track_end = None
if track_start != None and track_end != None:
if track_start in range (len(mixer._channel_strips) + 1) and track_end in range (len(mixer._channel_strips) + 1) and track_start < track_end:
track_list = []
for index in range (track_start, track_end):
if index + mixer._track_offset in range (len(mixer.tracks_to_use())):
track_list.append(mixer.tracks_to_use()[index + mixer._track_offset])
if track_list:
self._parent.action_dispatch(track_list, xclip, new_action, new_args, ident)
def handle_track_bank(self, script_key, xclip, ident, mixer, session, args):
""" Move track bank (or session bank) and select first track in bank...this works even with controllers without banks like User Remote Scripts """
if IS_LIVE_9_5 and self._scripts[script_key]['name'] == 'PUSH':
t_offset, s_offset = self._push_actions.get_session_offsets(session)
tracks = session.tracks_to_use()
else:
t_offset, s_offset = mixer._track_offset, session._scene_offset if session else None
tracks = mixer.tracks_to_use()
new_offset = None
if args == 'FIRST':
new_offset = 0
elif args == 'LAST':
new_offset = len(tracks) - len(mixer._channel_strips)
else:
try:
offset = int(args)
if offset + t_offset in range (len(tracks)):
new_offset = offset + t_offset
except: new_offset = None
if new_offset >= 0:
if session:
session.set_offsets(new_offset, s_offset)
else:
mixer.set_track_offset(new_offset)
self.handle_track_action(mixer, xclip, ident, '1/SEL')
def handle_session_offset(self, script_key, session, args):
""" Handle moving session offset absolutely or relatively as well as storing/recalling its last position. """
if IS_LIVE_9_5 and self._scripts[script_key]['name'] == 'PUSH':
last_pos = self._push_actions.handle_session_offset(session, self._scripts[script_key]['last_ring_pos'], args, self._parse_ring_spec)
self._scripts[script_key]['last_ring_pos'] = last_pos or None
return
try:
new_track = session._track_offset
new_scene = session._scene_offset
if args.strip() == 'LAST':
last_pos = self._scripts[script_key]['last_ring_pos']
if last_pos:
session.set_offsets(last_pos[0], last_pos[1])
return
else:
self._scripts[script_key]['last_ring_pos'] = (new_track, new_scene)
new_track, args = self._parse_ring_spec('T', args, new_track, self.song().tracks)
new_scene, args = self._parse_ring_spec('S', args, new_scene, self.song().scenes)
if new_track == -1 or new_scene == -1:
return
session.set_offsets(new_track, new_scene)
except: pass
def _parse_ring_spec(self, spec_id, arg_string, default_index, list_to_search):
""" Parses a ring action specification and returns the specified track/scene index
as well as the arg_string without the specification that was parsed. """
index = default_index
arg_array = arg_string.split()
for a in arg_array:
if a.startswith(spec_id):
if a[1].isdigit():
index = int(a.strip(spec_id)) - 1
arg_string = arg_string.replace(a, '', 1).strip()
break
elif a[1] in ('<', '>'):
index += self._parent.get_adjustment_factor(a.strip(spec_id))
arg_string = arg_string.replace(a, '', 1).strip()
break
elif a[1] == '"':
name_start_pos = arg_string.index(spec_id + '"')
name = arg_string[name_start_pos + 2:]
name_end_pos = name.index('"')
name = name[:name_end_pos]
for i, item in enumerate(list_to_search):
if name == item.name.upper():
index = i
break
arg_string = arg_string.replace(spec_id + '"' + name + '"', '', 1).strip()
break
return (index, arg_string)
def handle_ring_link(self, session, script_index, args):
""" Handles linking/unliking session offsets to the selected track or scene with centering if specified. """
self._scripts[script_index]['track_link'] = args == 'T' or 'T ' in args or ' T' in args
self._scripts[script_index]['scene_link'] = 'S' in args
self._scripts[script_index]['centered_link'] = 'CENTER' in args
def handle_session_colors(self, session, colors, args):
""" Handle changing clip launch LED colors """
args = args.split()
if len(args) == 3:
for a in args:
if not a in colors:
return
for scene_index in range(session.height()):
scene = session.scene(scene_index)
for track_index in range(session.width()):
clip_slot = scene.clip_slot(track_index)
clip_slot.set_started_value(colors[args[0]][0])
clip_slot.set_triggered_to_play_value(colors[args[0]][1])
clip_slot.set_recording_value(colors[args[1]][0])
clip_slot.set_triggered_to_record_value(colors[args[1]][1])
clip_slot.set_stopped_value(colors[args[2]][0])
clip_slot.update()
def handle_visual_metro(self, script, args):
""" Handle visual metro for APCs and Launchpad. """
if 'ON' in args and not script['metro']['component']:
m = VisualMetro(self._parent, script['metro']['controls'], script['metro']['override'])
script['metro']['component'] = m
elif 'OFF' in args and script['metro']['component']:
script['metro']['component'].disconnect()
script['metro']['component'] = None
def on_selected_track_changed(self):
""" Moves the track offset of all track linked surfaces to the selected track with centering if specified. """
trk = self.song().view.selected_track
if trk in self.song().tracks:
trk_id = list(self.song().visible_tracks).index(trk)
for k, v in self._scripts.items():
if v['track_link']:
new_trk_id = trk_id
try:
session = self._scripts[k]['session']
if IS_LIVE_9_5 and v['name'] == 'PUSH':
width = self._push_actions.get_session_dimensions(session)[0]
t_offset, s_offset = self._push_actions.get_session_offsets(session)
else:
width = session.width()
t_offset, s_offset = session._track_offset, session._scene_offset
if self._scripts[k]['centered_link']:
mid_point = (width / 2)
if new_trk_id < mid_point:
if t_offset <= new_trk_id:
return
else:
new_trk_id = 0
else:
centered_id = new_trk_id - mid_point
if centered_id in range(len(self.song().visible_tracks)):
new_trk_id = centered_id
session.set_offsets(new_trk_id, s_offset)
except: pass
def on_selected_scene_changed(self):
""" Moves the scene offset of all scene linked surfaces to the selected scene with centering if specified. """
scn_id = list(self.song().scenes).index(self.song().view.selected_scene)
for k, v in self._scripts.items():
if v['scene_link']:
new_scn_id = scn_id
try:
session = self._scripts[k]['session']
if IS_LIVE_9_5 and v['name'] == 'PUSH':
height = self._push_actions.get_session_dimensions(session)[1]
t_offset, s_offset = self._push_actions.get_session_offsets(session)
else:
height = session.height()
t_offset, s_offset = session._track_offset, session._scene_offset
if self._scripts[k]['centered_link']:
mid_point = (height / 2)
if new_scn_id < mid_point:
if s_offset <= new_scn_id:
return
else:
new_scn_id = 0
else:
centered_id = new_scn_id - mid_point
if centered_id in range(len(self.song().scenes)):
new_scn_id = centered_id
session.set_offsets(t_offset, new_scn_id)
except: pass
class VisualMetro(ControlSurfaceComponent):
__module__ = __name__
__doc__ = ' Visual metro for APCs and Launchpad '
def __init__(self, parent, controls, override):
ControlSurfaceComponent.__init__(self)
self._parent = parent
self._controls = controls
self._override = override
self._last_beat = -1
self.song().add_current_song_time_listener(self.on_time_changed)
self.song().add_is_playing_listener(self.on_time_changed)
def disconnect(self):
if self._controls:
self.clear()
self._controls = None
self.song().remove_current_song_time_listener(self.on_time_changed)
self.song().remove_is_playing_listener(self.on_time_changed)
self._override = None
self._parent = None
if IS_LIVE_9:
ControlSurfaceComponent.disconnect(self)
def on_enabled_changed(self):
pass
def update(self):
pass
def on_time_changed(self):
""" Show visual metronome via control LEDs upon beat changes (will not be shown if in Launchpad User 1) """
if self.song().is_playing and (not self._override or (self._override and self._override._mode_index != 1)):
time = str(self.song().get_current_beats_song_time()).split('.')
if self._last_beat != int(time[1])-1:
self._last_beat = int(time[1])-1
self.clear()
if self._last_beat < len(self._controls):
self._controls[self._last_beat].turn_on()
else:
self._controls[len(self._controls)-1].turn_on()
else:
self.clear()
def clear(self):
""" Clear all control LEDs """
for c in self._controls:
c.turn_off()
# local variables:
# tab-width: 4
|
lgpl-2.1
| 3,540,569,321,692,649,500
| 37.809129
| 147
| 0.655512
| false
| 3.265712
| false
| false
| false
|
OCA/event
|
partner_event/models/event_registration.py
|
1
|
2816
|
# Copyright 2014 Tecnativa S.L. - Pedro M. Baeza
# Copyright 2015 Tecnativa S.L. - Javier Iniesta
# Copyright 2016 Tecnativa S.L. - Antonio Espinosa
# Copyright 2016 Tecnativa S.L. - Vicent Cubells
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import api, fields, models
class EventRegistration(models.Model):
_inherit = "event.registration"
partner_id = fields.Many2one(
ondelete='restrict',
)
attendee_partner_id = fields.Many2one(
comodel_name='res.partner',
string='Attendee Partner',
ondelete='restrict',
copy=False,
)
def _prepare_partner(self, vals):
return {
'name': vals.get('name') or vals.get('email'),
'email': vals.get('email', False),
'phone': vals.get('phone', False),
}
@api.model
def create(self, vals):
if not vals.get('attendee_partner_id') and vals.get('email'):
Partner = self.env['res.partner']
Event = self.env['event.event']
# Look for a partner with that email
email = vals.get('email').replace('%', '').replace('_', '\\_')
attendee_partner = Partner.search([
('email', '=ilike', email)
], limit=1)
event = Event.browse(vals['event_id'])
if attendee_partner:
vals['name'] = vals.setdefault('name', attendee_partner.name)
vals['phone'] = vals.setdefault(
'phone', attendee_partner.phone)
elif event.create_partner:
# Create partner
attendee_partner = Partner.sudo().create(
self._prepare_partner(vals))
vals['attendee_partner_id'] = attendee_partner.id
return super(EventRegistration, self).create(vals)
@api.multi
def partner_data_update(self, data):
reg_data = dict((k, v) for k, v in
data.items() if k in ['name', 'email', 'phone'])
if reg_data:
# Only update registration data if this event is not old
registrations = self.filtered(
lambda x: x.event_end_date >= fields.Datetime.now())
registrations.write(reg_data)
@api.onchange('attendee_partner_id', 'partner_id')
def _onchange_partner(self):
if self.attendee_partner_id:
if not self.partner_id:
self.partner_id = self.attendee_partner_id
get_attendee_partner_address = {
'get_attendee_partner_address': self.attendee_partner_id,
}
return super(EventRegistration, self.with_context(
**get_attendee_partner_address))._onchange_partner()
return super(EventRegistration, self)._onchange_partner()
|
agpl-3.0
| -2,625,753,482,065,121,300
| 38.111111
| 77
| 0.572798
| false
| 3.905687
| false
| false
| false
|
Jonathan-Livingston-Seagull/cerebro-dl
|
cerebro/models/hidden_layer_model.py
|
1
|
2771
|
import theano
import theano.tensor as T
import numpy
class HiddenLayerModel(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh
Hidden unit activation is given by: tanh(dot(input,W) + b)
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.input = input
# end-snippet-1
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = numpy.asarray(
rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (
lin_output if activation is None
else activation(lin_output)
)
# parameters of the model
self.params = [self.W, self.b]
|
bsd-3-clause
| 6,810,689,489,409,649,000
| 34.525641
| 79
| 0.562252
| false
| 4.021771
| false
| false
| false
|
tqchen/tvm
|
tutorials/frontend/from_onnx.py
|
1
|
3929
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile ONNX Models
===================
**Author**: `Joshua Z. Zhang <https://zhreshold.github.io/>`_
This article is an introductory tutorial to deploy ONNX models with Relay.
For us to begin with, ONNX package must be installed.
A quick solution is to install protobuf compiler, and
.. code-block:: bash
pip install onnx --user
or please refer to offical site.
https://github.com/onnx/onnx
"""
import onnx
import numpy as np
import tvm
from tvm import te
import tvm.relay as relay
from tvm.contrib.download import download_testdata
######################################################################
# Load pretrained ONNX model
# ---------------------------------------------
# The example super resolution model used here is exactly the same model in onnx tutorial
# http://pytorch.org/tutorials/advanced/super_resolution_with_caffe2.html
# we skip the pytorch model construction part, and download the saved onnx model
model_url = "".join(
[
"https://gist.github.com/zhreshold/",
"bcda4716699ac97ea44f791c24310193/raw/",
"93672b029103648953c4e5ad3ac3aadf346a4cdc/",
"super_resolution_0.2.onnx",
]
)
model_path = download_testdata(model_url, "super_resolution.onnx", module="onnx")
# now you have super_resolution.onnx on disk
onnx_model = onnx.load(model_path)
######################################################################
# Load a test image
# ---------------------------------------------
# A single cat dominates the examples!
from PIL import Image
img_url = "https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
img_ycbcr = img.convert("YCbCr") # convert to YCbCr
img_y, img_cb, img_cr = img_ycbcr.split()
x = np.array(img_y)[np.newaxis, np.newaxis, :, :]
######################################################################
# Compile the model with relay
# ---------------------------------------------
target = "llvm"
input_name = "1"
shape_dict = {input_name: x.shape}
mod, params = relay.frontend.from_onnx(onnx_model, shape_dict)
with tvm.transform.PassContext(opt_level=1):
intrp = relay.build_module.create_executor("graph", mod, tvm.cpu(0), target)
######################################################################
# Execute on TVM
# ---------------------------------------------
dtype = "float32"
tvm_output = intrp.evaluate()(tvm.nd.array(x.astype(dtype)), **params).asnumpy()
######################################################################
# Display results
# ---------------------------------------------
# We put input and output image neck to neck
from matplotlib import pyplot as plt
out_y = Image.fromarray(np.uint8((tvm_output[0, 0]).clip(0, 255)), mode="L")
out_cb = img_cb.resize(out_y.size, Image.BICUBIC)
out_cr = img_cr.resize(out_y.size, Image.BICUBIC)
result = Image.merge("YCbCr", [out_y, out_cb, out_cr]).convert("RGB")
canvas = np.full((672, 672 * 2, 3), 255)
canvas[0:224, 0:224, :] = np.asarray(img)
canvas[:, 672:, :] = np.asarray(result)
plt.imshow(canvas.astype(np.uint8))
plt.show()
|
apache-2.0
| -3,683,840,951,492,471,000
| 36.419048
| 89
| 0.614915
| false
| 3.601283
| false
| false
| false
|
kgeorge/kgeorge-cv
|
samples/skindetect/authoring/python/checkImages.py
|
1
|
1352
|
__author__ = 'kgeorge'
from optparse import OptionParser
from PIL import Image
import os
def main():
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="imagefilename", metavar="FILE")
(options, args) = parser.parse_args()
print options.filename
srcBaseDir = r'/Users/kgeorge/Documents/projects/kgeorge-cv/samples/skindetect/authoring/image/'
maskBaseDir = r'/Users/kgeorge/Downloads'
maskFilename = os.path.splitext(options.filename)[0] + '.png'
im = Image.open(os.path.join(srcBaseDir, options.filename))
im2 = Image.open(os.path.join(maskBaseDir, 'skindetect-' + maskFilename))
print im.size, im2.size
if(im2.size[0] >= im.size[0] and im2.size[1] >= im.size[1]):
im2 = im2.crop((0,0, im.size[0], im2.size[1]))
#im.paste(im2, (0,0))
im2 = im2.convert('L')
im2 = im2.convert('1')
pass
elif (im2.size[0] <= im.size[0] and im2.size[1] <= im.size[1]):
print 'mask smaller than image'
pass
else:
raise IOError
im.paste(im2, (0,0))
maskFilename = os.path.splitext(options.filename)[0] + '_mask' + '.png'
im.save(os.path.join(srcBaseDir, maskFilename))
print options.filename, im.size
print options.filename, im2.size
pass
if __name__ == '__main__':
main()
|
bsd-3-clause
| 8,474,431,141,774,026,000
| 29.727273
| 100
| 0.617604
| false
| 3.093822
| false
| false
| false
|
rollbar/pyrollbar
|
rollbar/examples/fastapi/app_logger.py
|
1
|
1747
|
#!/usr/bin/env python
# This example uses Uvicorn package that must be installed. However, it can be
# replaced with any other ASGI-compliant server.
#
# NOTE: Python 3.6 requires aiocontextvars package to be installed.
# Optional asynchronous reporting requires HTTPX package to be installed.
#
# Run: python app_logger.py
import logging
import fastapi
import rollbar
import uvicorn
from rollbar.contrib.fastapi import LoggerMiddleware
from rollbar.logger import RollbarHandler
# Initialize Rollbar SDK with your server-side ACCESS_TOKEN
rollbar.init(
'ACCESS_TOKEN',
environment='staging',
handler='async', # For asynchronous reporting use: default, async or httpx
)
# Set root logger to log DEBUG and above
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Report ERROR and above to Rollbar
rollbar_handler = RollbarHandler()
rollbar_handler.setLevel(logging.ERROR)
# Attach Rollbar handler to the root logger
logger.addHandler(rollbar_handler)
# Integrate Rollbar with FastAPI application
app = fastapi.FastAPI()
app.add_middleware(LoggerMiddleware) # should be added as the last middleware
# GET query params will be sent to Rollbar and available in the UI
# $ curl http://localhost:8888?param1=hello¶m2=world
@app.get('/')
async def read_root():
# Report log entries
logger.critical('Critical message sent to Rollbar')
logger.error('Error message sent to Rollbar')
# Ignore log entries
logger.warning('Warning message is not sent to Rollbar')
logger.info('Info message is not sent to Rollbar')
logger.debug('Debug message is not sent to Rollbar')
return {'hello': 'world'}
if __name__ == '__main__':
uvicorn.run(app, host='localhost', port=8888)
|
mit
| 5,014,467,864,173,894,000
| 28.116667
| 79
| 0.746422
| false
| 3.662474
| false
| false
| false
|
lorensen/VTKExamples
|
src/Python/Filtering/ConnectivityFilter.py
|
1
|
1442
|
#!/usr/bin/env python
import vtk
def main():
sphereSource1 = vtk.vtkSphereSource()
sphereSource1.Update()
delaunay1 = vtk.vtkDelaunay3D()
delaunay1.SetInputConnection(sphereSource1.GetOutputPort())
delaunay1.Update()
sphereSource2 = vtk.vtkSphereSource()
sphereSource2.SetCenter(5,0,0)
sphereSource2.Update()
delaunay2 = vtk.vtkDelaunay3D()
delaunay2.SetInputConnection(sphereSource2.GetOutputPort())
delaunay2.Update()
appendFilter = vtk.vtkAppendFilter()
appendFilter.AddInputConnection(delaunay1.GetOutputPort())
appendFilter.AddInputConnection(delaunay2.GetOutputPort())
appendFilter.Update()
connectivityFilter = vtk.vtkConnectivityFilter()
connectivityFilter.SetInputConnection(appendFilter.GetOutputPort())
connectivityFilter.SetExtractionModeToAllRegions()
connectivityFilter.ColorRegionsOn()
connectivityFilter.Update()
# Visualize
mapper = vtk.vtkDataSetMapper()
mapper.SetInputConnection(connectivityFilter.GetOutputPort())
mapper.Update()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
renderer = vtk.vtkRenderer()
renderer.AddActor(actor)
renWindow = vtk.vtkRenderWindow()
renWindow.AddRenderer(renderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWindow)
iren.Initialize()
iren.Start()
if __name__ == '__main__':
main()
|
apache-2.0
| -5,614,666,063,002,479,000
| 27.27451
| 71
| 0.715673
| false
| 3.845333
| false
| false
| false
|
frontendphil/analyzr
|
analyzr/settings.py
|
1
|
6077
|
from os.path import abspath, dirname
# Django settings for analyzr project.
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
PROJECT_PATH = abspath('%s/..' % dirname(abspath(__file__)))
CHECKOUT_PATH = '%s/repos' % PROJECT_PATH
RESULT_PATH = '%s/results' % PROJECT_PATH
CONFIG_PATH = '%s/templates/config' % PROJECT_PATH
CONTRIBUTORS_PER_PAGE = 10
ANONYMIZE = True
# defines hardness of the squale aggregation algorithm
# LOW = 3, MEDIUM = 9, HARD = 30
LAMBDA = 9.0
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '%s/analyzr.db' % PROJECT_PATH, # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
EMAIL = {
"host": "", # smtp server
"account": "", # email account name
"password": "" # account password
}
SEND_EMAILS = True
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = "%s/static" % PROJECT_PATH
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
LOGIN_URL = "/login"
COMPRESS_ENABLED = not DEBUG
COMPRESS_URL = STATIC_URL
COMPRESS_ROOT = STATIC_ROOT
COMPRESS_OUTPUT_DIR = "cache"
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
'compressor.finders.CompressorFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'analyzr.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'analyzr.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
"%s/templates" % PROJECT_PATH,
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'south',
'parsr',
'timezone_field',
'annoying',
'compressor',
'django_extensions',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
try:
from local_settings import *
except:
pass
|
mit
| -6,614,729,428,346,790,000
| 30.487047
| 127
| 0.682245
| false
| 3.6808
| false
| false
| false
|
bhautikj/vrProjector
|
vrProjectorWrapper.py
|
1
|
3828
|
# Copyright 2016 Bhautik J Joshi
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import vrProjector
def main():
parser = argparse.ArgumentParser(description='Reproject photospheres')
parser.add_argument('--sourceProjection', required=True, help='Type of source projection. Valid values are: Equirectangular, Cubemap, SideBySideFisheye')
parser.add_argument('--sourceImage', required=True, help='Source image[s]. List multiple images in double quotes like so "front.png right.png back.png left.png top.png bottom.png"')
parser.add_argument('--useBilnear', required=False, help='Use bilinear interpolation when reprojecting. Valid values are true and false.')
parser.add_argument('--outProjection', required=True, help='Type of output projection. Valid values are: Equirectangular, Cubemap, SideBySideFisheye, Fisheye')
parser.add_argument('--outImage', required=True, help='output image[s]. List multiple images in double quotes like so "front.png right.png back.png left.png top.png bottom.png"')
parser.add_argument('--outWidth', required=True, help='output image[s] width in pixels')
parser.add_argument('--outHeight', required=True, help='output image[s] height in pixels')
args = parser.parse_args()
source = None
if args.sourceProjection.lower() == "Equirectangular".lower():
source = vrProjector.EquirectangularProjection()
source.loadImage(args.sourceImage)
elif args.sourceProjection.lower() == "SideBySideFisheye".lower():
source = vrProjector.SideBySideFisheyeProjection()
source.loadImage(args.sourceImage)
elif args.sourceProjection.lower() == "Cubemap".lower():
source = vrProjector.CubemapProjection()
imageList = args.sourceImage.split(' ')
source.loadImages(imageList[0], imageList[1], imageList[2], imageList[3], imageList[4], imageList[5])
elif args.sourceProjection.lower() == "Fisheye".lower():
source = vrProjector.FisheyeProjection()
source.loadImage(args.sourceImage)
else:
print("Quitting because unsupported source projection type: ", args.sourceProjection)
return
if args.useBilnear is not None:
if args.useBilnear.lower() == "true":
source.set_use_bilinear(True)
out = None
if args.outProjection.lower() == "Equirectangular".lower():
out = vrProjector.EquirectangularProjection()
out.initImage(int(args.outWidth), int(args.outHeight))
elif args.outProjection.lower() == "SideBySideFisheye".lower():
out = vrProjector.SideBySideFisheyeProjection()
out.initImage(int(args.outWidth), int(args.outHeight))
elif args.outProjection.lower() == "Cubemap".lower():
out = vrProjector.CubemapProjection()
out.initImages(int(args.outWidth), int(args.outHeight))
elif args.outProjection.lower() == "Fisheye".lower():
out = vrProjector.FisheyeProjection()
out.initImage(int(args.outWidth), int(args.outHeight))
else:
print("Quitting because unsupported output projection type: ", args.outProjection)
return
out.reprojectToThis(source)
# out.reprojectToThisThreaded(source, 16)
if args.outProjection.lower() == "Cubemap".lower():
imageList = args.outImage.split(' ')
out.saveImages(imageList[0], imageList[1], imageList[2], imageList[3], imageList[4], imageList[5])
else:
out.saveImage(args.outImage)
if __name__ == "__main__":
main()
|
apache-2.0
| 3,129,478,283,443,361,000
| 47.468354
| 183
| 0.738506
| false
| 3.57423
| false
| false
| false
|
Urinx/SomeCodes
|
Bioinformatics/other_code/Traindata.py
|
1
|
2902
|
#!/usr/bin/env python
# coding: utf-8
from time import time
import random
class Traindata():
"""docstring for Traindata"""
posi = []
nega = []
cv = []
k = 3
def __init__(self):
pass
def load_intm(self, filename, isPosi):
startTime = time()
dataset = self.posi if isPosi else self.nega
state = 'Positive' if isPosi else 'Negative'
with open(filename) as f:
f.readline()
i = 0
while 1:
line = f.readline().replace('\t',' ')
if not line: break
pp = line.split(' ')[1].replace('|',' ')
dataset.append(pp)
i += 1
totalTime = time()-startTime
print '[*] Load '+state+' PPIs data('+str(i)+') from \''+filename+'\' in '+str(totalTime)+'s'
def load_txt(self, filename, isPosi):
startTime = time()
dataset = self.posi if isPosi else self.nega
state = 'Positive' if isPosi else 'Negative'
with open(filename) as f:
line = ' '
i = 0
while line:
line = f.readline().replace('\r\n','')
dataset.append(' '.join(line.split('\t')))
i += 1
dataset.pop()
i -= 1
totalTime = time()-startTime
print '[*] Load '+state+' PPIs data('+str(i)+') from \''+filename+'\' in '+str(totalTime)+'s'
def load_mitab(self, filename, isPosi):
startTime = time()
dataset = self.posi if isPosi else self.nega
state = 'Positive' if isPosi else 'Negative'
with open(filename) as f:
i = 0
while 1:
line = f.readline().replace('\n','')
if not line: break
p1, p2 = line.replace('uniprotkb:','').split('\t')[:2]
dataset.append(' '.join([p1,p2]))
i += 1
totalTime = time()-startTime
print '[*] Load '+state+' PPIs data('+str(i)+') from \''+filename+'\' in '+str(totalTime)+'s'
# K-fold Cross Validation
def KCV(self, k):
startTime = time()
self.k = k
self.cv = []
p = len(self.posi)
n = len(self.nega)
prange = range(p)
nrange = range(n)
random.shuffle(prange)
random.shuffle(nrange)
dp, mp = p / k, p % k
dn, mn = n / k, n %k
for i in xrange(k):
tmp = []
for jp in prange[i*dp:(i+1)*dp]:
tmp.append('+ '+self.posi[jp])
if i < mp:
tmp.append('+ '+self.posi[prange[-(i+1)]])
for jn in nrange[i*dn:(i+1)*dn]:
tmp.append('- '+self.nega[jn])
if i >= k - mn:
tmp.append('- '+self.nega[nrange[-(k-i)]])
self.cv.append(tmp)
totalTime = time()-startTime
print '[*] Set cross validation data (k='+str(k)+') in '+str(totalTime)+'s'
def done(self):
p = len(self.posi)
n = len(self.nega)
print '[*] Positive data: '+str(p)+', Negative data: '+str(n)+', Total: '+str(p+n)
def unittest(self):
pass
if __name__=='__main__':
traindata = Traindata()
traindata.load_intm('yeast.db.all.200908.intm', True)
traindata.load_intm('human.db.all.201008.intm', True)
traindata.load_intm('human.db.all.201108-201008.intm', True)
traindata.load_txt('combined.txt', False)
traindata.load_mitab('18509523_neg.mitab', False)
traindata.KCV(10)
traindata.done()
|
gpl-2.0
| -9,218,771,574,054,769,000
| 26.121495
| 95
| 0.601309
| false
| 2.612061
| false
| false
| false
|
galad-loth/LearnDescriptor
|
patchmatch/train_matchnet.py
|
1
|
1806
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 25 11:28:35 2018
@author: galad-loth
"""
import mxnet as mx
import logging
import sys
from metric_net import match_net
from data import get_UBC_patch_dataiter
logging.basicConfig(level=logging.INFO)
root_logger = logging.getLogger()
stdout_handler = logging.StreamHandler(sys.stdout)
root_logger.addHandler(stdout_handler)
root_logger.setLevel(logging.INFO)
def train_match_net():
datadir="D:\\_Datasets\\UBCPatch"
dataset="liberty"
gt_file="m50_100000_100000_0.txt"
batch_size=50
train_iter,val_iter=get_UBC_patch_dataiter(datadir, dataset,gt_file,
batch_size,"siam",True, 0.05)
model_prefix="checkpoint\\matchnet"
checkpoint = mx.callback.do_checkpoint(model_prefix)
eval_metric=mx.metric.Accuracy()
train_net=match_net(512,256)
train_mod = mx.mod.Module(train_net,context=mx.gpu(),
data_names=['data1','data2'],label_names=["loss_label"])
train_mod.bind(data_shapes=train_iter.provide_data,
label_shapes=train_iter.provide_label)
# train_mod.init_params()
train_mod.fit(train_data=train_iter,
eval_data=val_iter,
initializer =mx.initializer.Xavier(),
optimizer='sgd',
optimizer_params={'learning_rate':0.01,
"momentum":0.9,
"wd":0.005,
"lr_scheduler":mx.lr_scheduler.FactorScheduler(8000,0.9)},
eval_metric=eval_metric,
epoch_end_callback=checkpoint,
num_epoch=10)
if __name__=="__main__":
train_match_net()
|
apache-2.0
| -2,724,554,486,041,508,400
| 32.075472
| 92
| 0.566445
| false
| 3.56213
| false
| false
| false
|
nmayorov/scipy
|
scipy/integrate/_quad_vec.py
|
3
|
20742
|
import sys
import copy
import heapq
import collections
import functools
import numpy as np
from scipy._lib._util import MapWrapper
class LRUDict(collections.OrderedDict):
def __init__(self, max_size):
self.__max_size = max_size
def __setitem__(self, key, value):
existing_key = (key in self)
super(LRUDict, self).__setitem__(key, value)
if existing_key:
self.move_to_end(key)
elif len(self) > self.__max_size:
self.popitem(last=False)
def update(self, other):
# Not needed below
raise NotImplementedError()
class SemiInfiniteFunc(object):
"""
Argument transform from (start, +-oo) to (0, 1)
"""
def __init__(self, func, start, infty):
self._func = func
self._start = start
self._sgn = -1 if infty < 0 else 1
# Overflow threshold for the 1/t**2 factor
self._tmin = sys.float_info.min**0.5
def get_t(self, x):
z = self._sgn * (x - self._start) + 1
if z == 0:
# Can happen only if point not in range
return np.inf
return 1 / z
def __call__(self, t):
if t < self._tmin:
return 0.0
else:
x = self._start + self._sgn * (1 - t) / t
f = self._func(x)
return self._sgn * (f / t) / t
class DoubleInfiniteFunc(object):
"""
Argument transform from (-oo, oo) to (-1, 1)
"""
def __init__(self, func):
self._func = func
# Overflow threshold for the 1/t**2 factor
self._tmin = sys.float_info.min**0.5
def get_t(self, x):
s = -1 if x < 0 else 1
return s / (abs(x) + 1)
def __call__(self, t):
if abs(t) < self._tmin:
return 0.0
else:
x = (1 - abs(t)) / t
f = self._func(x)
return (f / t) / t
def _max_norm(x):
return np.amax(abs(x))
def _get_sizeof(obj):
try:
return sys.getsizeof(obj)
except TypeError:
# occurs on pypy
if hasattr(obj, '__sizeof__'):
return int(obj.__sizeof__())
return 64
class _Bunch(object):
def __init__(self, **kwargs):
self.__keys = kwargs.keys()
self.__dict__.update(**kwargs)
def __repr__(self):
return "_Bunch({})".format(", ".join("{}={}".format(k, repr(self.__dict__[k]))
for k in self.__keys))
def quad_vec(f, a, b, epsabs=1e-200, epsrel=1e-8, norm='2', cache_size=100e6, limit=10000,
workers=1, points=None, quadrature=None, full_output=False):
r"""Adaptive integration of a vector-valued function.
Parameters
----------
f : callable
Vector-valued function f(x) to integrate.
a : float
Initial point.
b : float
Final point.
epsabs : float, optional
Absolute tolerance.
epsrel : float, optional
Relative tolerance.
norm : {'max', '2'}, optional
Vector norm to use for error estimation.
cache_size : int, optional
Number of bytes to use for memoization.
workers : int or map-like callable, optional
If `workers` is an integer, part of the computation is done in
parallel subdivided to this many tasks (using
:class:`python:multiprocessing.pool.Pool`).
Supply `-1` to use all cores available to the Process.
Alternatively, supply a map-like callable, such as
:meth:`python:multiprocessing.pool.Pool.map` for evaluating the
population in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
points : list, optional
List of additional breakpoints.
quadrature : {'gk21', 'gk15', 'trapezoid'}, optional
Quadrature rule to use on subintervals.
Options: 'gk21' (Gauss-Kronrod 21-point rule),
'gk15' (Gauss-Kronrod 15-point rule),
'trapezoid' (composite trapezoid rule).
Default: 'gk21' for finite intervals and 'gk15' for (semi-)infinite
full_output : bool, optional
Return an additional ``info`` dictionary.
Returns
-------
res : {float, array-like}
Estimate for the result
err : float
Error estimate for the result in the given norm
info : dict
Returned only when ``full_output=True``.
Info dictionary. Is an object with the attributes:
success : bool
Whether integration reached target precision.
status : int
Indicator for convergence, success (0),
failure (1), and failure due to rounding error (2).
neval : int
Number of function evaluations.
intervals : ndarray, shape (num_intervals, 2)
Start and end points of subdivision intervals.
integrals : ndarray, shape (num_intervals, ...)
Integral for each interval.
Note that at most ``cache_size`` values are recorded,
and the array may contains *nan* for missing items.
errors : ndarray, shape (num_intervals,)
Estimated integration error for each interval.
Notes
-----
The algorithm mainly follows the implementation of QUADPACK's
DQAG* algorithms, implementing global error control and adaptive
subdivision.
The algorithm here has some differences to the QUADPACK approach:
Instead of subdividing one interval at a time, the algorithm
subdivides N intervals with largest errors at once. This enables
(partial) parallelization of the integration.
The logic of subdividing "next largest" intervals first is then
not implemented, and we rely on the above extension to avoid
concentrating on "small" intervals only.
The Wynn epsilon table extrapolation is not used (QUADPACK uses it
for infinite intervals). This is because the algorithm here is
supposed to work on vector-valued functions, in an user-specified
norm, and the extension of the epsilon algorithm to this case does
not appear to be widely agreed. For max-norm, using elementwise
Wynn epsilon could be possible, but we do not do this here with
the hope that the epsilon extrapolation is mainly useful in
special cases.
References
----------
[1] R. Piessens, E. de Doncker, QUADPACK (1983).
Examples
--------
We can compute integrations of a vector-valued function:
>>> from scipy.integrate import quad_vec
>>> import matplotlib.pyplot as plt
>>> alpha = np.linspace(0.0, 2.0, num=30)
>>> f = lambda x: x**alpha
>>> x0, x1 = 0, 2
>>> y, err = quad_vec(f, x0, x1)
>>> plt.plot(alpha, y)
>>> plt.xlabel(r"$\alpha$")
>>> plt.ylabel(r"$\int_{0}^{2} x^\alpha dx$")
>>> plt.show()
"""
a = float(a)
b = float(b)
# Use simple transformations to deal with integrals over infinite
# intervals.
kwargs = dict(epsabs=epsabs,
epsrel=epsrel,
norm=norm,
cache_size=cache_size,
limit=limit,
workers=workers,
points=points,
quadrature='gk15' if quadrature is None else quadrature,
full_output=full_output)
if np.isfinite(a) and np.isinf(b):
f2 = SemiInfiniteFunc(f, start=a, infty=b)
if points is not None:
kwargs['points'] = tuple(f2.get_t(xp) for xp in points)
return quad_vec(f2, 0, 1, **kwargs)
elif np.isfinite(b) and np.isinf(a):
f2 = SemiInfiniteFunc(f, start=b, infty=a)
if points is not None:
kwargs['points'] = tuple(f2.get_t(xp) for xp in points)
res = quad_vec(f2, 0, 1, **kwargs)
return (-res[0],) + res[1:]
elif np.isinf(a) and np.isinf(b):
sgn = -1 if b < a else 1
# NB. explicitly split integral at t=0, which separates
# the positive and negative sides
f2 = DoubleInfiniteFunc(f)
if points is not None:
kwargs['points'] = (0,) + tuple(f2.get_t(xp) for xp in points)
else:
kwargs['points'] = (0,)
if a != b:
res = quad_vec(f2, -1, 1, **kwargs)
else:
res = quad_vec(f2, 1, 1, **kwargs)
return (res[0]*sgn,) + res[1:]
elif not (np.isfinite(a) and np.isfinite(b)):
raise ValueError("invalid integration bounds a={}, b={}".format(a, b))
norm_funcs = {
None: _max_norm,
'max': _max_norm,
'2': np.linalg.norm
}
if callable(norm):
norm_func = norm
else:
norm_func = norm_funcs[norm]
mapwrapper = MapWrapper(workers)
parallel_count = 128
min_intervals = 2
try:
_quadrature = {None: _quadrature_gk21,
'gk21': _quadrature_gk21,
'gk15': _quadrature_gk15,
'trapz': _quadrature_trapezoid, # alias for backcompat
'trapezoid': _quadrature_trapezoid}[quadrature]
except KeyError as e:
raise ValueError("unknown quadrature {!r}".format(quadrature)) from e
# Initial interval set
if points is None:
initial_intervals = [(a, b)]
else:
prev = a
initial_intervals = []
for p in sorted(points):
p = float(p)
if not (a < p < b) or p == prev:
continue
initial_intervals.append((prev, p))
prev = p
initial_intervals.append((prev, b))
global_integral = None
global_error = None
rounding_error = None
interval_cache = None
intervals = []
neval = 0
for x1, x2 in initial_intervals:
ig, err, rnd = _quadrature(x1, x2, f, norm_func)
neval += _quadrature.num_eval
if global_integral is None:
if isinstance(ig, (float, complex)):
# Specialize for scalars
if norm_func in (_max_norm, np.linalg.norm):
norm_func = abs
global_integral = ig
global_error = float(err)
rounding_error = float(rnd)
cache_count = cache_size // _get_sizeof(ig)
interval_cache = LRUDict(cache_count)
else:
global_integral += ig
global_error += err
rounding_error += rnd
interval_cache[(x1, x2)] = copy.copy(ig)
intervals.append((-err, x1, x2))
heapq.heapify(intervals)
CONVERGED = 0
NOT_CONVERGED = 1
ROUNDING_ERROR = 2
NOT_A_NUMBER = 3
status_msg = {
CONVERGED: "Target precision reached.",
NOT_CONVERGED: "Target precision not reached.",
ROUNDING_ERROR: "Target precision could not be reached due to rounding error.",
NOT_A_NUMBER: "Non-finite values encountered."
}
# Process intervals
with mapwrapper:
ier = NOT_CONVERGED
while intervals and len(intervals) < limit:
# Select intervals with largest errors for subdivision
tol = max(epsabs, epsrel*norm_func(global_integral))
to_process = []
err_sum = 0
for j in range(parallel_count):
if not intervals:
break
if j > 0 and err_sum > global_error - tol/8:
# avoid unnecessary parallel splitting
break
interval = heapq.heappop(intervals)
neg_old_err, a, b = interval
old_int = interval_cache.pop((a, b), None)
to_process.append(((-neg_old_err, a, b, old_int), f, norm_func, _quadrature))
err_sum += -neg_old_err
# Subdivide intervals
for dint, derr, dround_err, subint, dneval in mapwrapper(_subdivide_interval, to_process):
neval += dneval
global_integral += dint
global_error += derr
rounding_error += dround_err
for x in subint:
x1, x2, ig, err = x
interval_cache[(x1, x2)] = ig
heapq.heappush(intervals, (-err, x1, x2))
# Termination check
if len(intervals) >= min_intervals:
tol = max(epsabs, epsrel*norm_func(global_integral))
if global_error < tol/8:
ier = CONVERGED
break
if global_error < rounding_error:
ier = ROUNDING_ERROR
break
if not (np.isfinite(global_error) and np.isfinite(rounding_error)):
ier = NOT_A_NUMBER
break
res = global_integral
err = global_error + rounding_error
if full_output:
res_arr = np.asarray(res)
dummy = np.full(res_arr.shape, np.nan, dtype=res_arr.dtype)
integrals = np.array([interval_cache.get((z[1], z[2]), dummy)
for z in intervals], dtype=res_arr.dtype)
errors = np.array([-z[0] for z in intervals])
intervals = np.array([[z[1], z[2]] for z in intervals])
info = _Bunch(neval=neval,
success=(ier == CONVERGED),
status=ier,
message=status_msg[ier],
intervals=intervals,
integrals=integrals,
errors=errors)
return (res, err, info)
else:
return (res, err)
def _subdivide_interval(args):
interval, f, norm_func, _quadrature = args
old_err, a, b, old_int = interval
c = 0.5 * (a + b)
# Left-hand side
if getattr(_quadrature, 'cache_size', 0) > 0:
f = functools.lru_cache(_quadrature.cache_size)(f)
s1, err1, round1 = _quadrature(a, c, f, norm_func)
dneval = _quadrature.num_eval
s2, err2, round2 = _quadrature(c, b, f, norm_func)
dneval += _quadrature.num_eval
if old_int is None:
old_int, _, _ = _quadrature(a, b, f, norm_func)
dneval += _quadrature.num_eval
if getattr(_quadrature, 'cache_size', 0) > 0:
dneval = f.cache_info().misses
dint = s1 + s2 - old_int
derr = err1 + err2 - old_err
dround_err = round1 + round2
subintervals = ((a, c, s1, err1), (c, b, s2, err2))
return dint, derr, dround_err, subintervals, dneval
def _quadrature_trapezoid(x1, x2, f, norm_func):
"""
Composite trapezoid quadrature
"""
x3 = 0.5*(x1 + x2)
f1 = f(x1)
f2 = f(x2)
f3 = f(x3)
s2 = 0.25 * (x2 - x1) * (f1 + 2*f3 + f2)
round_err = 0.25 * abs(x2 - x1) * (float(norm_func(f1))
+ 2*float(norm_func(f3))
+ float(norm_func(f2))) * 2e-16
s1 = 0.5 * (x2 - x1) * (f1 + f2)
err = 1/3 * float(norm_func(s1 - s2))
return s2, err, round_err
_quadrature_trapezoid.cache_size = 3 * 3
_quadrature_trapezoid.num_eval = 3
def _quadrature_gk(a, b, f, norm_func, x, w, v):
"""
Generic Gauss-Kronrod quadrature
"""
fv = [0.0]*len(x)
c = 0.5 * (a + b)
h = 0.5 * (b - a)
# Gauss-Kronrod
s_k = 0.0
s_k_abs = 0.0
for i in range(len(x)):
ff = f(c + h*x[i])
fv[i] = ff
vv = v[i]
# \int f(x)
s_k += vv * ff
# \int |f(x)|
s_k_abs += vv * abs(ff)
# Gauss
s_g = 0.0
for i in range(len(w)):
s_g += w[i] * fv[2*i + 1]
# Quadrature of abs-deviation from average
s_k_dabs = 0.0
y0 = s_k / 2.0
for i in range(len(x)):
# \int |f(x) - y0|
s_k_dabs += v[i] * abs(fv[i] - y0)
# Use similar error estimation as quadpack
err = float(norm_func((s_k - s_g) * h))
dabs = float(norm_func(s_k_dabs * h))
if dabs != 0 and err != 0:
err = dabs * min(1.0, (200 * err / dabs)**1.5)
eps = sys.float_info.epsilon
round_err = float(norm_func(50 * eps * h * s_k_abs))
if round_err > sys.float_info.min:
err = max(err, round_err)
return h * s_k, err, round_err
def _quadrature_gk21(a, b, f, norm_func):
"""
Gauss-Kronrod 21 quadrature with error estimate
"""
# Gauss-Kronrod points
x = (0.995657163025808080735527280689003,
0.973906528517171720077964012084452,
0.930157491355708226001207180059508,
0.865063366688984510732096688423493,
0.780817726586416897063717578345042,
0.679409568299024406234327365114874,
0.562757134668604683339000099272694,
0.433395394129247190799265943165784,
0.294392862701460198131126603103866,
0.148874338981631210884826001129720,
0,
-0.148874338981631210884826001129720,
-0.294392862701460198131126603103866,
-0.433395394129247190799265943165784,
-0.562757134668604683339000099272694,
-0.679409568299024406234327365114874,
-0.780817726586416897063717578345042,
-0.865063366688984510732096688423493,
-0.930157491355708226001207180059508,
-0.973906528517171720077964012084452,
-0.995657163025808080735527280689003)
# 10-point weights
w = (0.066671344308688137593568809893332,
0.149451349150580593145776339657697,
0.219086362515982043995534934228163,
0.269266719309996355091226921569469,
0.295524224714752870173892994651338,
0.295524224714752870173892994651338,
0.269266719309996355091226921569469,
0.219086362515982043995534934228163,
0.149451349150580593145776339657697,
0.066671344308688137593568809893332)
# 21-point weights
v = (0.011694638867371874278064396062192,
0.032558162307964727478818972459390,
0.054755896574351996031381300244580,
0.075039674810919952767043140916190,
0.093125454583697605535065465083366,
0.109387158802297641899210590325805,
0.123491976262065851077958109831074,
0.134709217311473325928054001771707,
0.142775938577060080797094273138717,
0.147739104901338491374841515972068,
0.149445554002916905664936468389821,
0.147739104901338491374841515972068,
0.142775938577060080797094273138717,
0.134709217311473325928054001771707,
0.123491976262065851077958109831074,
0.109387158802297641899210590325805,
0.093125454583697605535065465083366,
0.075039674810919952767043140916190,
0.054755896574351996031381300244580,
0.032558162307964727478818972459390,
0.011694638867371874278064396062192)
return _quadrature_gk(a, b, f, norm_func, x, w, v)
_quadrature_gk21.num_eval = 21
def _quadrature_gk15(a, b, f, norm_func):
"""
Gauss-Kronrod 15 quadrature with error estimate
"""
# Gauss-Kronrod points
x = (0.991455371120812639206854697526329,
0.949107912342758524526189684047851,
0.864864423359769072789712788640926,
0.741531185599394439863864773280788,
0.586087235467691130294144838258730,
0.405845151377397166906606412076961,
0.207784955007898467600689403773245,
0.000000000000000000000000000000000,
-0.207784955007898467600689403773245,
-0.405845151377397166906606412076961,
-0.586087235467691130294144838258730,
-0.741531185599394439863864773280788,
-0.864864423359769072789712788640926,
-0.949107912342758524526189684047851,
-0.991455371120812639206854697526329)
# 7-point weights
w = (0.129484966168869693270611432679082,
0.279705391489276667901467771423780,
0.381830050505118944950369775488975,
0.417959183673469387755102040816327,
0.381830050505118944950369775488975,
0.279705391489276667901467771423780,
0.129484966168869693270611432679082)
# 15-point weights
v = (0.022935322010529224963732008058970,
0.063092092629978553290700663189204,
0.104790010322250183839876322541518,
0.140653259715525918745189590510238,
0.169004726639267902826583426598550,
0.190350578064785409913256402421014,
0.204432940075298892414161999234649,
0.209482141084727828012999174891714,
0.204432940075298892414161999234649,
0.190350578064785409913256402421014,
0.169004726639267902826583426598550,
0.140653259715525918745189590510238,
0.104790010322250183839876322541518,
0.063092092629978553290700663189204,
0.022935322010529224963732008058970)
return _quadrature_gk(a, b, f, norm_func, x, w, v)
_quadrature_gk15.num_eval = 15
|
bsd-3-clause
| -1,173,841,174,148,520,200
| 31.460094
| 102
| 0.584177
| false
| 3.290814
| false
| false
| false
|
marcelocure/octopusapi
|
octopusapi/middleware.py
|
1
|
1905
|
import json
import logging
import falcon
class StorageError(Exception):
@staticmethod
def handle(ex, req, resp, params):
description = ('Sorry, could not store the message, it worked on my machine')
raise falcon.HTTPError(falcon.HTTP_725, 'Database Error', description)
class AuthMiddleware(object):
def process_request(self, req, resp):
pass
class RequireJSON(object):
def process_request(self, req, resp):
if not req.client_accepts_json:
raise falcon.HTTPNotAcceptable('This API only supports responses encoded as JSON.')
if req.method in ('POST'):
if 'application/json' not in req.content_type:
raise falcon.HTTPUnsupportedMediaType('This API only supports requests encoded as JSON.')
class JSONTranslator(object):
def process_request(self, req, resp):
if req.content_length in (None, 0):
return
body = req.stream.read()
if not body:
raise falcon.HTTPBadRequest('Empty request body', 'A valid JSON document is required.')
try:
req.context['doc'] = json.loads(body.decode('utf-8'))
except (ValueError, UnicodeDecodeError):
raise falcon.HTTPError(falcon.HTTP_753, 'Malformed JSON', 'Could not decode the request body. The JSON was incorrect or not encoded as UTF-8.')
def process_response(self, req, resp, resource):
if 'result' not in req.context:
return
resp.body = json.dumps(req.context['result'])
def max_body(limit):
def hook(req, resp, resource, params):
length = req.content_length
if length is not None and length > limit:
msg = ('The size of the request is too large. The body must not exceed ' + str(limit) + ' bytes in length.')
raise falcon.HTTPRequestEntityTooLarge('Request body is too large', msg)
return hook
|
mit
| -8,678,748,957,230,808,000
| 33.017857
| 155
| 0.651969
| false
| 4.096774
| false
| false
| false
|
deepmind/slim-dataset
|
reader.py
|
1
|
10070
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reader for dataset used in the SLIM paper.
Example usage:
filenames, iterator, next_element = make_dataset(batch_size=16)
with tf.Session() as sess:
# Initialize `iterator` with train data.
# training_filenames = ["/var/data/train_1.tfrecord", ...]
sess.run(iterator.initializer, feed_dict={filenames: training_filenames})
ne_value = sess.run(next_element)
# Initialize `iterator` with validation data.
# validation_filenames = ["/var/data/train_1.tfrecord", ...]
# sess.run(iterator.initializer, feed_dict={filenames: validation_filenames})
ne_value = sess.run(next_element)
`next_element` is a tuple containing the query, the target, and the raw data.
The query is a tuple where the first element is the
sequence of 9 (images, cameras, captions) which can be given to the model
as context. The second element in the query is the camera angle of the
viewpoint to reconstruct. The target contains the image corresponding to the
queried viewpoint, the text description from that viewpoint and an image of
the scene viewed from above.
The raw data is a dictionary with all the fields as read from the tf.Record as
described in the documentation for `_parse_proto`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
_NUM_VIEWS = 10
_NUM_RAW_CAMERA_PARAMS = 3
_IMAGE_SCALE = 0.25
_USE_SIMPLIFIED_CAPTIONS = False
_PARSE_METADATA = False
def _parse_proto(buf):
"""Parse binary protocol buffer into tensors.
The protocol buffer is expected to contain the following fields:
* frames: 10 views of the scene rendered as images.
* top_down_frame: single view of the scene from above rendered as an image.
* cameras: 10 vectors describing the camera position from which the frames
have been rendered
* captions: A string description of the scene. For the natural language
dataset, contains descriptions written by human annotators. For
synthetic data contains a string describing each relation between
objects in the scene exactly once.
* simplified_captions: A string description of the scene. For the natural
language dataset contains a string describing each relation between
objects in the scene exactly once. For synthetic datasets contains
a string describing every possible pairwise relation between objects in
the scene.
* meta_shape: A vector of strings describing the object shapes.
* meta_color: A vector of strings describing the object colors.
* meta_size: A vector of strings describing the object sizes.
* meta_obj_positions: A matrix of floats describing the position of each
object in the scene.
* meta_obj_rotations: A matrix of floats describing the rotation of each
object in the scene.
* meta_obj_rotations: A matrix of floats describing the color of each
object in the scene as RGBA in the range [0, 1].
Args:
buf: A string containing the serialized protocol buffer.
Returns:
A dictionary containing tensors for each of the fields in the protocol
buffer. If _PARSE_METADATA is False, will omit fields starting with 'meta_'.
"""
feature_map = {
"frames":
tf.FixedLenFeature(shape=[_NUM_VIEWS], dtype=tf.string),
"top_down_frame":
tf.FixedLenFeature(shape=[1], dtype=tf.string),
"cameras":
tf.FixedLenFeature(
shape=[_NUM_VIEWS * _NUM_RAW_CAMERA_PARAMS], dtype=tf.float32),
"captions":
tf.VarLenFeature(dtype=tf.string),
"simplified_captions":
tf.VarLenFeature(dtype=tf.string),
"meta_shape":
tf.VarLenFeature(dtype=tf.string),
"meta_color":
tf.VarLenFeature(dtype=tf.string),
"meta_size":
tf.VarLenFeature(dtype=tf.string),
"meta_obj_positions":
tf.VarLenFeature(dtype=tf.float32),
"meta_obj_rotations":
tf.VarLenFeature(dtype=tf.float32),
"meta_obj_colors":
tf.VarLenFeature(dtype=tf.float32),
}
example = tf.parse_single_example(buf, feature_map)
images = tf.concat(example["frames"], axis=0)
images = tf.map_fn(
tf.image.decode_jpeg,
tf.reshape(images, [-1]),
dtype=tf.uint8,
back_prop=False)
top_down = tf.image.decode_jpeg(tf.squeeze(example["top_down_frame"]))
cameras = tf.reshape(example["cameras"], shape=[-1, _NUM_RAW_CAMERA_PARAMS])
captions = tf.sparse_tensor_to_dense(example["captions"], default_value="")
simplified_captions = tf.sparse_tensor_to_dense(
example["simplified_captions"], default_value="")
meta_shape = tf.sparse_tensor_to_dense(
example["meta_shape"], default_value="")
meta_color = tf.sparse_tensor_to_dense(
example["meta_color"], default_value="")
meta_size = tf.sparse_tensor_to_dense(example["meta_size"], default_value="")
meta_obj_positions = tf.sparse_tensor_to_dense(
example["meta_obj_positions"], default_value=0)
meta_obj_positions = tf.reshape(meta_obj_positions, shape=[-1, 3])
meta_obj_rotations = tf.sparse_tensor_to_dense(
example["meta_obj_rotations"], default_value=0)
meta_obj_rotations = tf.reshape(meta_obj_rotations, shape=[-1, 4])
meta_obj_colors = tf.sparse_tensor_to_dense(
example["meta_obj_colors"], default_value=0)
meta_obj_colors = tf.reshape(meta_obj_colors, shape=[-1, 4])
data_tensors = {
"images": images,
"cameras": cameras,
"captions": captions,
"simplified_captions": simplified_captions,
"top_down": top_down
}
if _PARSE_METADATA:
data_tensors.update({
"meta_shape": meta_shape,
"meta_color": meta_color,
"meta_size": meta_size,
"meta_obj_positions": meta_obj_positions,
"meta_obj_rotations": meta_obj_rotations,
"meta_obj_colors": meta_obj_colors
})
return data_tensors
def _make_indices():
indices = tf.range(0, _NUM_VIEWS)
indices = tf.random_shuffle(indices)
return indices
def _convert_and_resize_images(images, old_size):
images = tf.image.convert_image_dtype(images, dtype=tf.float32)
new_size = tf.cast(old_size, tf.float32) * _IMAGE_SCALE
new_size = tf.cast(new_size, tf.int32)
images = tf.image.resize_images(images, new_size, align_corners=True)
return images
def _preprocess_images(images, indices):
images_processed = tf.gather(images, indices)
old_size = tf.shape(images_processed)[1:3]
images_processed = _convert_and_resize_images(images_processed, old_size)
return images_processed
def _preprocess_td(td_image):
old_size = tf.shape(td_image)[0:2]
td_image = _convert_and_resize_images(td_image, old_size)
return td_image
def _preprocess_cameras(raw_cameras, indices):
"""Apply a nonlinear transformation to the vector of camera angles."""
raw_cameras = tf.gather(raw_cameras, indices)
azimuth = raw_cameras[:, 0]
pos = raw_cameras[:, 1:]
cameras = tf.concat(
[
pos,
tf.expand_dims(tf.sin(azimuth), -1),
tf.expand_dims(tf.cos(azimuth), -1)
],
axis=1)
return cameras
def _preprocess_captions(raw_caption, indices):
return tf.gather(raw_caption, indices)
def _preprocess_data(raw_data):
"""Randomly shuffle viewpoints and apply preprocessing to each modality."""
indices = _make_indices()
images = _preprocess_images(raw_data["images"], indices)
cameras = _preprocess_cameras(raw_data["cameras"], indices)
top_down = _preprocess_td(raw_data["top_down"])
if _USE_SIMPLIFIED_CAPTIONS:
captions = _preprocess_captions(raw_data["simplified_captions"], indices)
else:
captions = _preprocess_captions(raw_data["captions"], indices)
return [images, cameras, top_down, captions]
def _split_scene(images, cameras, top_down, captions):
"""Splits scene into query and target.
Args:
images: A tensor containing images.
cameras: A tensor containing cameras.
top_down: A tensor containing the scene seen from top.
captions: A tensor containing captions.
Returns:
A tuple query, target. The query is a tuple where the first element is the
sequence of 9 (images, cameras, captions) which can be given to the model
as context. The second element in the query is the camera angle of the
viewpoint to reconstruct. The target contains the image corresponding to the
queried viewpoint, the text description from that viewpoint and an image of
the scene viewed from above.
"""
context_image = images[:-1, :, :, :]
context_camera = cameras[:-1, :]
context_caption = captions[:-1]
target_image = images[-1, :, :, :]
target_camera = cameras[-1, :]
target_caption = captions[-1]
query = ((context_image, context_camera, context_caption), target_camera)
target = (target_image, target_caption, top_down)
return query, target
def _parse_function(buf):
raw_data = _parse_proto(buf)
scene_data = _preprocess_data(raw_data)
query, target = _split_scene(*scene_data)
return query, target, raw_data
def make_dataset(batch_size):
"""Returns a tf.data.Dataset object with the dataset."""
filenames = tf.placeholder(tf.string, shape=[None])
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(_parse_function)
dataset = dataset.repeat()
dataset = dataset.shuffle(128)
dataset = dataset.batch(batch_size)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
return filenames, iterator, next_element
|
apache-2.0
| 6,471,769,179,025,854,000
| 36.022059
| 80
| 0.697319
| false
| 3.632756
| false
| false
| false
|
mhils/mitmproxy
|
mitmproxy/proxy2/layers/http/_http1.py
|
1
|
17108
|
import abc
from typing import Union, Optional, Callable, Type
import h11
from h11._readers import ChunkedReader, ContentLengthReader, Http10Reader
from h11._receivebuffer import ReceiveBuffer
from mitmproxy import exceptions, http
from mitmproxy.net import http as net_http
from mitmproxy.net.http import http1, status_codes
from mitmproxy.net.http.http1 import read_sansio as http1_sansio
from mitmproxy.proxy2 import commands, events, layer
from mitmproxy.proxy2.context import Connection, ConnectionState, Context
from mitmproxy.proxy2.layers.http._base import ReceiveHttp, StreamId
from mitmproxy.proxy2.utils import expect
from mitmproxy.utils import human
from ._base import HttpConnection
from ._events import HttpEvent, RequestData, RequestEndOfMessage, RequestHeaders, RequestProtocolError, ResponseData, \
ResponseEndOfMessage, ResponseHeaders, ResponseProtocolError
TBodyReader = Union[ChunkedReader, Http10Reader, ContentLengthReader]
class Http1Connection(HttpConnection, metaclass=abc.ABCMeta):
stream_id: Optional[StreamId] = None
request: Optional[http.HTTPRequest] = None
response: Optional[http.HTTPResponse] = None
request_done: bool = False
response_done: bool = False
# this is a bit of a hack to make both mypy and PyCharm happy.
state: Union[Callable[[events.Event], layer.CommandGenerator[None]], Callable]
body_reader: TBodyReader
buf: ReceiveBuffer
ReceiveProtocolError: Type[Union[RequestProtocolError, ResponseProtocolError]]
ReceiveData: Type[Union[RequestData, ResponseData]]
ReceiveEndOfMessage: Type[Union[RequestEndOfMessage, ResponseEndOfMessage]]
def __init__(self, context: Context, conn: Connection):
super().__init__(context, conn)
self.buf = ReceiveBuffer()
@abc.abstractmethod
def send(self, event: HttpEvent) -> layer.CommandGenerator[None]:
yield from () # pragma: no cover
@abc.abstractmethod
def read_headers(self, event: events.ConnectionEvent) -> layer.CommandGenerator[None]:
yield from () # pragma: no cover
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
if isinstance(event, HttpEvent):
yield from self.send(event)
else:
if isinstance(event, events.DataReceived) and self.state != self.passthrough:
self.buf += event.data
yield from self.state(event)
@expect(events.Start)
def start(self, _) -> layer.CommandGenerator[None]:
self.state = self.read_headers
yield from ()
state = start
def read_body(self, event: events.Event) -> layer.CommandGenerator[None]:
assert self.stream_id
while True:
try:
if isinstance(event, events.DataReceived):
h11_event = self.body_reader(self.buf)
elif isinstance(event, events.ConnectionClosed):
h11_event = self.body_reader.read_eof()
else:
raise AssertionError(f"Unexpected event: {event}")
except h11.ProtocolError as e:
yield commands.CloseConnection(self.conn)
yield ReceiveHttp(self.ReceiveProtocolError(self.stream_id, f"HTTP/1 protocol error: {e}"))
return
if h11_event is None:
return
elif isinstance(h11_event, h11.Data):
data: bytes = bytes(h11_event.data)
if data:
yield ReceiveHttp(self.ReceiveData(self.stream_id, data))
elif isinstance(h11_event, h11.EndOfMessage):
assert self.request
if h11_event.headers:
raise NotImplementedError(f"HTTP trailers are not implemented yet.")
if self.request.data.method.upper() != b"CONNECT":
yield ReceiveHttp(self.ReceiveEndOfMessage(self.stream_id))
is_request = isinstance(self, Http1Server)
yield from self.mark_done(
request=is_request,
response=not is_request
)
return
def wait(self, event: events.Event) -> layer.CommandGenerator[None]:
"""
We wait for the current flow to be finished before parsing the next message,
as we may want to upgrade to WebSocket or plain TCP before that.
"""
assert self.stream_id
if isinstance(event, events.DataReceived):
return
elif isinstance(event, events.ConnectionClosed):
# for practical purposes, we assume that a peer which sent at least a FIN
# is not interested in any more data from us, see
# see https://github.com/httpwg/http-core/issues/22
if event.connection.state is not ConnectionState.CLOSED:
yield commands.CloseConnection(event.connection)
yield ReceiveHttp(self.ReceiveProtocolError(self.stream_id, f"Client disconnected.",
code=status_codes.CLIENT_CLOSED_REQUEST))
else: # pragma: no cover
raise AssertionError(f"Unexpected event: {event}")
def done(self, event: events.ConnectionEvent) -> layer.CommandGenerator[None]:
yield from () # pragma: no cover
def make_pipe(self) -> layer.CommandGenerator[None]:
self.state = self.passthrough
if self.buf:
already_received = self.buf.maybe_extract_at_most(len(self.buf))
yield from self.state(events.DataReceived(self.conn, already_received))
self.buf.compress()
def passthrough(self, event: events.Event) -> layer.CommandGenerator[None]:
assert self.stream_id
if isinstance(event, events.DataReceived):
yield ReceiveHttp(self.ReceiveData(self.stream_id, event.data))
elif isinstance(event, events.ConnectionClosed):
if isinstance(self, Http1Server):
yield ReceiveHttp(RequestEndOfMessage(self.stream_id))
else:
yield ReceiveHttp(ResponseEndOfMessage(self.stream_id))
def mark_done(self, *, request: bool = False, response: bool = False) -> layer.CommandGenerator[None]:
if request:
self.request_done = True
if response:
self.response_done = True
if self.request_done and self.response_done:
assert self.request
assert self.response
if should_make_pipe(self.request, self.response):
yield from self.make_pipe()
return
connection_done = (
http1_sansio.expected_http_body_size(self.request, self.response) == -1
or http1.connection_close(self.request.http_version, self.request.headers)
or http1.connection_close(self.response.http_version, self.response.headers)
# If we proxy HTTP/2 to HTTP/1, we only use upstream connections for one request.
# This simplifies our connection management quite a bit as we can rely on
# the proxyserver's max-connection-per-server throttling.
or (self.request.is_http2 and isinstance(self, Http1Client))
)
if connection_done:
yield commands.CloseConnection(self.conn)
self.state = self.done
return
self.request_done = self.response_done = False
self.request = self.response = None
if isinstance(self, Http1Server):
self.stream_id += 2
else:
self.stream_id = None
self.state = self.read_headers
if self.buf:
yield from self.state(events.DataReceived(self.conn, b""))
class Http1Server(Http1Connection):
"""A simple HTTP/1 server with no pipelining support."""
ReceiveProtocolError = RequestProtocolError
ReceiveData = RequestData
ReceiveEndOfMessage = RequestEndOfMessage
stream_id: int
def __init__(self, context: Context):
super().__init__(context, context.client)
self.stream_id = 1
def send(self, event: HttpEvent) -> layer.CommandGenerator[None]:
assert event.stream_id == self.stream_id
if isinstance(event, ResponseHeaders):
self.response = response = event.response
if response.is_http2:
response = response.copy()
# Convert to an HTTP/1 response.
response.http_version = "HTTP/1.1"
# not everyone supports empty reason phrases, so we better make up one.
response.reason = status_codes.RESPONSES.get(response.status_code, "")
# Shall we set a Content-Length header here if there is none?
# For now, let's try to modify as little as possible.
raw = http1.assemble_response_head(response)
yield commands.SendData(self.conn, raw)
elif isinstance(event, ResponseData):
assert self.response
if "chunked" in self.response.headers.get("transfer-encoding", "").lower():
raw = b"%x\r\n%s\r\n" % (len(event.data), event.data)
else:
raw = event.data
if raw:
yield commands.SendData(self.conn, raw)
elif isinstance(event, ResponseEndOfMessage):
assert self.response
if "chunked" in self.response.headers.get("transfer-encoding", "").lower():
yield commands.SendData(self.conn, b"0\r\n\r\n")
yield from self.mark_done(response=True)
elif isinstance(event, ResponseProtocolError):
if not self.response:
resp = http.make_error_response(event.code, event.message)
raw = http1.assemble_response(resp)
yield commands.SendData(self.conn, raw)
yield commands.CloseConnection(self.conn)
else:
raise AssertionError(f"Unexpected event: {event}")
def read_headers(self, event: events.ConnectionEvent) -> layer.CommandGenerator[None]:
if isinstance(event, events.DataReceived):
request_head = self.buf.maybe_extract_lines()
if request_head:
request_head = [bytes(x) for x in request_head] # TODO: Make url.parse compatible with bytearrays
try:
self.request = http1_sansio.read_request_head(request_head)
expected_body_size = http1_sansio.expected_http_body_size(self.request, expect_continue_as_0=False)
except (ValueError, exceptions.HttpSyntaxException) as e:
yield commands.Log(f"{human.format_address(self.conn.peername)}: {e}")
yield commands.CloseConnection(self.conn)
self.state = self.done
return
yield ReceiveHttp(RequestHeaders(self.stream_id, self.request, expected_body_size == 0))
self.body_reader = make_body_reader(expected_body_size)
self.state = self.read_body
yield from self.state(event)
else:
pass # FIXME: protect against header size DoS
elif isinstance(event, events.ConnectionClosed):
buf = bytes(self.buf)
if buf.strip():
yield commands.Log(f"Client closed connection before completing request headers: {buf!r}")
yield commands.CloseConnection(self.conn)
else:
raise AssertionError(f"Unexpected event: {event}")
def mark_done(self, *, request: bool = False, response: bool = False) -> layer.CommandGenerator[None]:
yield from super().mark_done(request=request, response=response)
if self.request_done and not self.response_done:
self.state = self.wait
class Http1Client(Http1Connection):
"""A simple HTTP/1 client with no pipelining support."""
ReceiveProtocolError = ResponseProtocolError
ReceiveData = ResponseData
ReceiveEndOfMessage = ResponseEndOfMessage
def __init__(self, context: Context):
super().__init__(context, context.server)
def send(self, event: HttpEvent) -> layer.CommandGenerator[None]:
if not self.stream_id:
assert isinstance(event, RequestHeaders)
self.stream_id = event.stream_id
self.request = event.request
assert self.stream_id == event.stream_id
if isinstance(event, RequestHeaders):
request = event.request
if request.is_http2:
# Convert to an HTTP/1 request.
request = request.copy() # (we could probably be a bit more efficient here.)
request.http_version = "HTTP/1.1"
if "Host" not in request.headers and request.authority:
request.headers.insert(0, "Host", request.authority)
request.authority = ""
raw = http1.assemble_request_head(request)
yield commands.SendData(self.conn, raw)
elif isinstance(event, RequestData):
assert self.request
if "chunked" in self.request.headers.get("transfer-encoding", "").lower():
raw = b"%x\r\n%s\r\n" % (len(event.data), event.data)
else:
raw = event.data
if raw:
yield commands.SendData(self.conn, raw)
elif isinstance(event, RequestEndOfMessage):
assert self.request
if "chunked" in self.request.headers.get("transfer-encoding", "").lower():
yield commands.SendData(self.conn, b"0\r\n\r\n")
elif http1_sansio.expected_http_body_size(self.request, self.response) == -1:
yield commands.CloseConnection(self.conn, half_close=True)
yield from self.mark_done(request=True)
elif isinstance(event, RequestProtocolError):
yield commands.CloseConnection(self.conn)
return
else:
raise AssertionError(f"Unexpected event: {event}")
def read_headers(self, event: events.ConnectionEvent) -> layer.CommandGenerator[None]:
if isinstance(event, events.DataReceived):
if not self.request:
# we just received some data for an unknown request.
yield commands.Log(f"Unexpected data from server: {bytes(self.buf)!r}")
yield commands.CloseConnection(self.conn)
return
assert self.stream_id
response_head = self.buf.maybe_extract_lines()
if response_head:
response_head = [bytes(x) for x in response_head] # TODO: Make url.parse compatible with bytearrays
try:
self.response = http1_sansio.read_response_head(response_head)
expected_size = http1_sansio.expected_http_body_size(self.request, self.response)
except (ValueError, exceptions.HttpSyntaxException) as e:
yield commands.CloseConnection(self.conn)
yield ReceiveHttp(ResponseProtocolError(self.stream_id, f"Cannot parse HTTP response: {e}"))
return
yield ReceiveHttp(ResponseHeaders(self.stream_id, self.response, expected_size == 0))
self.body_reader = make_body_reader(expected_size)
self.state = self.read_body
yield from self.state(event)
else:
pass # FIXME: protect against header size DoS
elif isinstance(event, events.ConnectionClosed):
if self.conn.state & ConnectionState.CAN_WRITE:
yield commands.CloseConnection(self.conn)
if self.stream_id:
if self.buf:
yield ReceiveHttp(ResponseProtocolError(self.stream_id,
f"unexpected server response: {bytes(self.buf)!r}"))
else:
# The server has closed the connection to prevent us from continuing.
# We need to signal that to the stream.
# https://tools.ietf.org/html/rfc7231#section-6.5.11
yield ReceiveHttp(ResponseProtocolError(self.stream_id, "server closed connection"))
else:
return
else:
raise AssertionError(f"Unexpected event: {event}")
def should_make_pipe(request: net_http.Request, response: net_http.Response) -> bool:
if response.status_code == 101:
return True
elif response.status_code == 200 and request.method.upper() == "CONNECT":
return True
else:
return False
def make_body_reader(expected_size: Optional[int]) -> TBodyReader:
if expected_size is None:
return ChunkedReader()
elif expected_size == -1:
return Http10Reader()
else:
return ContentLengthReader(expected_size)
__all__ = [
"Http1Client",
"Http1Server",
]
|
mit
| 300,163,260,257,517,600
| 45.113208
| 119
| 0.613573
| false
| 4.322385
| false
| false
| false
|
delph-in/pydelphin
|
delphin/eds/_operations.py
|
1
|
9714
|
"""
Operations on EDS
"""
import warnings
from itertools import count
from delphin import variable
from delphin import scope
from delphin import eds
from delphin import util
def from_mrs(m, predicate_modifiers=True, unique_ids=True,
representative_priority=None):
"""
Create an EDS by converting from MRS *m*.
In order for MRS to EDS conversion to work, the MRS must satisfy
the intrinsic variable property (see
:func:`delphin.mrs.has_intrinsic_variable_property`).
Args:
m: the input MRS
predicate_modifiers: if `True`, include predicate-modifier
edges; if `False`, only include basic dependencies; if a
callable, then call on the converted EDS before creating
unique ids (if `unique_ids=True`)
unique_ids: if `True`, recompute node identifiers to be unique
by the LKB's method; note that ids from *m* should already
be unique by PyDelphin's method
representative_priority: a function for ranking candidate
representative nodes; see :func:`scope.representatives`
Returns:
EDS
Raises:
EDSError: when conversion fails.
"""
# EP id to node id map; create now to keep ids consistent
hcmap = {hc.hi: hc for hc in m.hcons}
reps = scope.representatives(m, priority=representative_priority)
ivmap = {p.iv: (p, q)
for p, q in m.quantification_pairs()
if p is not None}
top = _mrs_get_top(m.top, hcmap, reps, m.index, ivmap)
deps = _mrs_args_to_basic_deps(m, hcmap, ivmap, reps)
nodes = _mrs_to_nodes(m, deps)
e = eds.EDS(
top=top,
nodes=nodes,
lnk=m.lnk,
surface=m.surface,
identifier=m.identifier)
if predicate_modifiers is True:
predicate_modifiers = find_predicate_modifiers
if predicate_modifiers:
addl_deps = predicate_modifiers(e, m, representatives=reps)
for id, node_deps in addl_deps.items():
e[id].edges.update(node_deps)
if unique_ids:
make_ids_unique(e, m)
return e
def _mrs_get_top(top, hcmap, reps, index, ivmap):
if top in hcmap and hcmap[top].lo in reps:
lbl = hcmap[top].lo
top = reps[lbl][0].id
else:
if top in hcmap:
warnings.warn(
f'broken handle constraint: {hcmap[top]}',
eds.EDSWarning
)
if top in reps:
top = reps[top][0].id
elif index in ivmap and ivmap[index][0].label in reps:
lbl = ivmap[index][0].label
top = reps[lbl][0].id
else:
warnings.warn('unable to find a suitable TOP', eds.EDSWarning)
top = None
return top
def _mrs_args_to_basic_deps(m, hcmap, ivmap, reps):
edges = {}
for src, roleargs in m.arguments().items():
if src in ivmap:
p, q = ivmap[src]
# non-quantifier EPs
edges[src] = {}
for role, tgt in roleargs:
# qeq
if tgt in hcmap:
lbl = hcmap[tgt].lo
if lbl in reps:
tgt = reps[lbl][0].id
else:
warnings.warn(
f'broken handle constraint: {hcmap[tgt]}',
eds.EDSWarning
)
continue
# label arg
elif tgt in reps:
tgt = reps[tgt][0].id
# regular arg
elif tgt in ivmap:
tgt = ivmap[tgt][0].id
# other (e.g., BODY, dropped arguments, etc.)
else:
continue
edges[src][role] = tgt
# add BV if the EP has a quantifier
if q is not None:
edges[q.id] = {eds.BOUND_VARIABLE_ROLE: src}
return edges
def _mrs_to_nodes(m, edges):
nodes = []
for ep in m.rels:
properties, type = None, None
if not ep.is_quantifier():
iv = ep.iv
properties = m.properties(iv)
type = variable.type(iv)
nodes.append(
eds.Node(ep.id,
ep.predicate,
type,
edges.get(ep.id, {}),
properties,
ep.carg,
ep.lnk,
ep.surface,
ep.base))
return nodes
def find_predicate_modifiers(e, m, representatives=None):
"""
Return an argument structure mapping for predicate-modifier edges.
In EDS, predicate modifiers are edges that describe a relation
between predications in the original MRS that is not evident on
the regular and scopal arguments. In practice these are EPs that
share a scope but do not select any other EPs within their scope,
such as when quantifiers are modified ("nearly every...") or with
relative clauses ("the chef whose soup spilled..."). These are
almost the same as the MOD/EQ links of DMRS, except that predicate
modifiers have more restrictions on their usage, mainly due to
their using a standard role (`ARG1`) instead of an
idiosyncratic one.
Generally users won't call this function directly, but by calling
:func:`from_mrs` with `predicate_modifiers=True`, but it is
visible here in case users want to inspect its results separately
from MRS-to-EDS conversion. Note that when calling it separately,
*e* should use the same predication ids as *m* (by calling
:func:`from_mrs` with `unique_ids=False`). Also, users may define
their own function with the same signature and return type and use
it in place of this one. See :func:`from_mrs` for details.
Args:
e: the EDS converted from *m* as by calling :func:`from_mrs`
with `predicate_modifiers=False` and `unique_ids=False`,
used to determine if parts of the graph are connected
m: the source MRS
representatives: the scope representatives; this argument is
mainly to prevent :func:`delphin.scope.representatives`
from being called twice on *m*
Returns:
A dictionary mapping source node identifiers to
role-to-argument dictionaries of any additional
predicate-modifier edges.
Examples:
>>> e = eds.from_mrs(m, predicate_modifiers=False)
>>> print(eds.find_predicate_modifiers(e.argument_structure(), m)
{'e5': {'ARG1': '_1'}}
"""
if representatives is None:
representatives = scope.representatives(m)
role = eds.PREDICATE_MODIFIER_ROLE
# find connected components so predicate modifiers only connect
# separate components
ids = {ep.id for ep in m.rels}
edges = []
for node in e.nodes:
for _, tgt in node.edges.items():
edges.append((node.id, tgt))
components = util._connected_components(ids, edges)
ccmap = {}
for i, component in enumerate(components):
for id in component:
ccmap[id] = i
addl = {}
if len(components) > 1:
for label, eps in representatives.items():
if len(eps) > 1:
first = eps[0]
joined = set([ccmap[first.id]])
for other in eps[1:]:
occ = ccmap[other.id]
type = variable.type(other.args.get(role, 'u0'))
needs_edge = occ not in joined
edge_available = type.lower() == 'u'
if needs_edge and edge_available:
addl.setdefault(other.id, {})[role] = first.id
joined.add(occ)
return addl
def make_ids_unique(e, m):
"""
Recompute the node identifiers in EDS *e* to be unique.
MRS objects used in conversion to EDS already have unique
predication ids, but they are created according to PyDelphin's
method rather than the LKB's method, namely with regard to
quantifiers and MRSs that do not have the intrinsic variable
property. This function recomputes unique EDS node identifiers by
the LKB's method.
.. note::
This function works in-place on *e* and returns nothing.
Args:
e: an EDS converted from MRS *m*, as from :func:`from_mrs`
with `unique_ids=False`
m: the MRS from which *e* was converted
"""
# deps can be used to single out ep from set sharing ARG0s
new_ids = (f'_{i}' for i in count(start=1))
nids = {}
used = {}
# initially only make new ids for quantifiers and those with no IV
for ep in m.rels:
nid = ep.iv
if nid is None or ep.is_quantifier():
nid = next(new_ids)
nids[ep.id] = nid
used.setdefault(nid, set()).add(ep.id)
# for ill-formed MRSs, more than one non-quantifier EP may have
# the same IV. Select a winner like selecting a scope
# representatives: the one not taking others in its group as an
# argument.
deps = {node.id: node.edges.items() for node in e.nodes}
for nid, ep_ids in used.items():
if len(ep_ids) > 1:
ep_ids = sorted(
ep_ids,
key=lambda n: any(d in ep_ids for _, d in deps.get(n, []))
)
for nid in ep_ids[1:]:
nids[nid] = next(new_ids)
# now use the unique ID mapping for reassignment
if e.top is not None:
e.top = nids[e.top]
for node in e.nodes:
node.id = nids[node.id]
edges = {role: nids[arg] for role, arg in node.edges.items()}
node.edges = edges
|
mit
| 6,722,001,387,782,816,000
| 34.323636
| 74
| 0.574532
| false
| 3.87012
| false
| false
| false
|
jrecuero/jc2li
|
jc2li/base.py
|
1
|
26215
|
__docformat__ = 'restructuredtext en'
# -----------------------------------------------------------------------------
# _ _
# (_)_ __ ___ _ __ ___ _ __| |_ ___
# | | '_ ` _ \| '_ \ / _ \| '__| __/ __|
# | | | | | | | |_) | (_) | | | |_\__ \
# |_|_| |_| |_| .__/ \___/|_| \__|___/
# |_|
# -----------------------------------------------------------------------------
#
# from __future__ import unicode_literals
from functools import wraps, partial
import sys
import inspect
import json
# import shlex
import jc2li.loggerator as loggerator
from prompt_toolkit import prompt
from prompt_toolkit.history import FileHistory
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.completion import Completer, Completion
# from prompt_toolkit.validation import Validator, ValidationError
from prompt_toolkit.token import Token
from prompt_toolkit.styles import style_from_dict
from jc2li.common import TREE_ATTR, SYNTAX_ATTR, ARGOS_ATTR
from jc2li.journal import Journal
# -----------------------------------------------------------------------------
#
# ___ ___ _ __ ___| |_ __ _ _ __ | |_ ___
# / __/ _ \| '_ \/ __| __/ _` | '_ \| __/ __|
# | (_| (_) | | | \__ \ || (_| | | | | |_\__ \
# \___\___/|_| |_|___/\__\__,_|_| |_|\__|___/
#
# -----------------------------------------------------------------------------
#
MODULE = 'CLI.base'
LOGGER = loggerator.getLoggerator(MODULE)
# -----------------------------------------------------------------------------
# _ _ __ _ _ _ _
# ___| | __ _ ___ ___ __| | ___ / _(_)_ __ (_) |_(_) ___ _ __ ___
# / __| |/ _` / __/ __| / _` |/ _ \ |_| | '_ \| | __| |/ _ \| '_ \/ __|
# | (__| | (_| \__ \__ \ | (_| | __/ _| | | | | | |_| | (_) | | | \__ \
# \___|_|\__,_|___/___/ \__,_|\___|_| |_|_| |_|_|\__|_|\___/|_| |_|___/
#
# -----------------------------------------------------------------------------
#
class CliBase(object):
"""CliBase class is the base class for any class that will implement
commands to be used by the command line interface.
Attributes:
_WALL (:any:`dict`) : Internal dictionary used to update commands defined\
in derived classes.
CLI_STYLE (:any:`dict`) : Dictionary with default styles to be used in the\
command line.
"""
_WALL = {}
CLI_STYLE = style_from_dict({Token.Toolbar: '#ffffff italic bg:#007777',
Token.RPrompt: 'bg:#ff0066 #ffffff', })
__MODES = []
class CliCompleter(Completer):
"""CliCompleter class provide completion to any entry in the command line.
This class should make use of every completer for command arguments.
"""
def __init__(self, cli):
"""CliCompleter initialization method.
Args:
cli (CliBase) : Cli instance.
"""
self._nodepath = None
self._cli = cli
def get_completions(self, document, complete_event):
"""Method that provides completion for any input in the command line.
Args:
document (:class:`Document`) : Document instance with command line input data.
compleEvent (:class:`CompleteEvent`) : Event with iinput information
Returns:
:class:`Completion` : Completion instance with data to be completed.
"""
# self._nodepath = None
word_before_cursor = document.get_word_before_cursor(WORD=True)
if ' ' not in document.text:
matches = [m for m in self._cli.commands if m.startswith(word_before_cursor)]
for m in matches:
yield Completion(m, start_position=-len(word_before_cursor))
else:
line_as_list = document.text.split()
if len(line_as_list) == 0:
return
last_token = line_as_list[-1] if document.text[-1] != ' ' else ' '
cmdlabel = line_as_list[0]
command = self._cli.get_command_cb(cmdlabel)
if command is not None:
# Required for partial methods
if hasattr(command, 'func'):
command = command.func
root = getattr(command, TREE_ATTR, None)
journal = self._cli.journal
_, cli_argos = journal.get_cmd_and_cli_args(command, None, " ".join(line_as_list[1:]))
nodepath = None
children_nodes = None
try:
nodepath = root.find_path(cli_argos)
except Exception as ex:
LOGGER.error('{0}, {1} | {2}'.format(ex, ex.__traceback__.tb_lineno, self._nodepath))
if not nodepath and self._nodepath is None:
# if there is not path being found and there is not any
# previous path, just get the completion under the root.
self._nodepath = [root, ]
elif nodepath and document.text[-1] == ' ':
# if there is a path found and the last character
# entered is a space, use that path.
self._nodepath = nodepath
if self._nodepath:
# Get children from the path found or the the last path
children_nodes = self._nodepath[-1].get_children_nodes() if self._nodepath[-1] else None
else:
# if there was not path or any last path, get children
# from the root.
children_nodes = root.get_children_nodes()
if children_nodes:
helps = [c.completer.help(last_token) for c in children_nodes]
self._cli.toolbar_str = " | ".join(helps)
for child in children_nodes:
LOGGER.debug('child is: {0}'.format(child.label))
matches = child.completer.complete(document, last_token)
if matches is None:
continue
for i, m in enumerate(matches):
yield Completion(m, start_position=-len(word_before_cursor))
# TODO: Remove help displayed in the completer
# yield Completion(m, start_position=-len(word_before_cursor), display_meta=helps[i])
# TODO: Trace and debug information to be removed or optimized.
LOGGER.debug('completer command: {0}'.format(command))
LOGGER.debug('document text is "{}"'.format(document.text))
LOGGER.debug('last document text is [{}]'.format(line_as_list[-1]))
LOGGER.debug('children nodes are {}'.format(children_nodes))
if children_nodes:
LOGGER.debug('children nodes are {}'.format([x.name for x in children_nodes]))
LOGGER.debug('nodepath is {}'.format(nodepath))
if nodepath:
LOGGER.debug('nodepath is {}'.format([x.name for x in nodepath]))
if self._nodepath and self._nodepath[-1] is not None:
LOGGER.debug('self._nodepath is {}'.format(self._nodepath))
LOGGER.debug('self._nodepath is {}'.format([x.name for x in self._nodepath]))
def __init__(self):
"""CliBase class initialization method.
"""
self.command = None
self.last_cmd = None
self.toolbar_str = ''
self.rprompt_str = ''
self.prompt_str = "> "
self.__commands = {}
self.journal = Journal()
self.setup_commands()
self.__recording = False
self.__record_data = []
@property
def commands(self):
"""Get property that returns keys for _cmdDict attribute
Returns:
:any:`list` : List with all command labels.
"""
return self.__commands.keys()
@property
def mode_stack(self):
return CliBase.__MODES
def get_command_cb(self, command):
"""Get the command callback for the given command label.
Args:
command (str) : String with the command label.
Returns:
:any:`function` : callback function for the given command.
"""
command_entry = self.__commands.get(command, (None, None))
return command_entry[0]
def get_command_desc(self, command):
"""Get the command description for the given command label.
Args:
command (str) : String with the command label.
Returns:
str : description for the given command.
"""
command_entry = self.__commands.get(command, (None, None))
return command_entry[1]
def is_command(self, command):
"""Returns if the given command label is found in the list of available
commands.
Args:
command (str) : Command label to check as an availbale command.
Returns:
bool : True if command label is found, False else.
"""
return command in self.commands
def add_command(self, command, command_cb, desc=""):
"""Adds a new entry to the command dictionary.
Args:
command (str) : String with the command label.
command_cb (:any:`function`) : Function with the command callback.
Returns:
bool : True if command was added.
"""
if self.is_command(command):
LOGGER.warning('[{}] Command [{}] already present.'.format(MODULE, command))
self.__commands[command] = (command_cb, desc)
# At this point, inject the context in every argument attributes using
# command_cb.func._arguments._arguments[#].completer. That should work
# only for those with _arguments attribute inside command_cb.func.
if hasattr(command_cb, 'func') and hasattr(command_cb.func, ARGOS_ATTR):
for argument in getattr(command_cb.func, ARGOS_ATTR).arguments:
argument.journal = self.journal
return True
def exec_command(self, command, user_input):
"""Executes the command callback for the given command label.
Args:
command (str) : Command label for the command to execute.
user_input (str) : String with the command line input.
Returns:
object : value returned by the command callback.
"""
command_cb = self.get_command_cb(command)
if command_cb:
return command_cb(user_input)
def empty_line(self):
"""Method that don't provide any action when <CR> is entered in an
empty line.
By default, the same command is executed when just <CR> is entered,
but we don't want that behavior.
Returns:
:any:`None`
"""
pass
def precmd(self, command, line):
"""Method to be called before any command is being processed.
Args:
command (str) : String with new command entered.
line (str): string entered in the command line.
Returns:
bool : False will skip command execution.
"""
return True
def onecmd(self, line):
"""Method to be called when any command is being processed.
Args:
line (str): string entered in the command line.
Returns:
bool : False will exit command loop.
"""
return True
def postcmd(self, command, line):
"""Method to be called after any command is being processed.
Args:
command (str) : String with new command entered.
line (str): string entered in the command line.
Returns:
bool : False will exit command loop.
"""
return True
def get_bottom_toolbar_tokens(self, cli):
"""Method that provides data and format to be displayed in the ToolBar.
Args:
cli (:class:`CommandLineInterface`) : CommandLineInterface instance.
Returns:
:any:`list` : list with data to be displayed in the ToolBar.
"""
return [(Token.Toolbar, '{}'.format(self.toolbar_str)), ]
def get_rprompt_tokens(self, cli):
"""Returns tokens for command line right prompt.
Args:
cli (:class:`CommandLineInterface`) : CommandLineInterface instance.
Returns:
:any:`list` : list with data to be displayed in the right prompt..
"""
return [(Token.RPrompt, '{}'.format(self.rprompt_str)), ]
def get_prompt_tokens(self, cli):
"""Returns tokens for command line prompt.
Args:
cli (:class:`CommandLineInterface`) : CommandLineInterface instance.
Returns:
:any:`list` : list with data to be displayed in the prompt.
"""
return [(Token.Prompt, '{}'.format(self.prompt_str)), ]
def extend_commands_from_class(self, classname):
"""Extends commands defined in a class to be included in the full
command line.
This is required only for commands defined in a class that is being
derived, and the derived class is the one being used in the command
line. This method allows to include all commands from the base
class.
Args:
classname (str) : String with class name for the class which\
methods should be imported.
Returns:
None
"""
for name, func_cb, desc in self._WALL.get(classname, []):
self.add_command(name, partial(func_cb, self), desc)
def setup_commands(self):
"""Register all commands to be used by the command line interface.
Returns:
None
"""
classname = self.__class__.__name__
calls = self._WALL.get(classname, [])
for name, func_cb, desc in calls:
LOGGER.debug('{0}::setup_commands add command {1}::{2}'.format(classname, name, func_cb))
self.add_command(name, partial(func_cb, self), desc)
def run_prompt(self, **kwargs):
"""Execute the command line.
Args:
prompt (:any:`str` or :any:`function`) : string or callback with prompt value
toolbar (:any:`str` or :any:`function`) : string or callback with toolbar value.
rprompt (:any:`str` or :any:`function`) : string or callback with right prompt value.
Returns:
str : String with the input entered by the user.
"""
toolbar = kwargs.get('toolbar', 'Enter a valid command')
self.toolbar_str = toolbar if isinstance(toolbar, str) else toolbar()
_prompt = kwargs.get('prompt', self.prompt_str)
self.prompt_str = _prompt if isinstance(_prompt, str) else _prompt()
rprompt = kwargs.get('rprompt', None)
if rprompt is not None:
self.rprompt_str = rprompt if isinstance(rprompt, str) else rprompt()
user_input = prompt(history=FileHistory('history.txt'),
auto_suggest=AutoSuggestFromHistory(),
completer=CliBase.CliCompleter(self),
# lexer=SqlLexer,
get_bottom_toolbar_tokens=self.get_bottom_toolbar_tokens,
get_rprompt_tokens=self.get_rprompt_tokens,
get_prompt_tokens=self.get_prompt_tokens,
style=self.CLI_STYLE,
# validator=CliValidator(),
refresh_interval=1)
return user_input
def start_recording(self):
"""Starts recording commands input in the command line.
Returns:
None
"""
self.__recording = True
def stop_recording(self):
"""Stops recording commands input in the command line.
Returns:
None
"""
self.__recording = False
if self.__record_data:
del self.__record_data[-1]
def clear_recording(self, from_record=None, to_record=None):
"""Clears the range of records recorded from the given range.
Args:
from_record (int) : First record to clear. Set to 0 if None.
to_record (int): Last record to clear. Set to last if None
"""
if from_record is None and to_record is None:
self.__record_data.clear()
elif from_record is None and to_record is not None:
if to_record < len(self.__record_data):
del self.__record_data[:to_record + 1]
elif from_record is not None and to_record is None:
if 0 <= from_record <= len(self.__record_data):
del self.__record_data[from_record:]
elif (0 <= from_record <= len(self.__record_data)) and\
to_record < len(self.__record_data) and\
from_record <= to_record:
del self.__record_data[from_record:to_record + 1]
else:
pass
def select_recording(self, from_record=None, to_record=None):
"""Selects the range of records recorded from the given range.
Args:
from_record (int) : First record to select. Set to 0 if None.
to_record (int): Last record to select. Set to last if None
Returns:
list : List of selected records.
"""
if from_record is None and to_record is None:
return self.__record_data
elif from_record is None and to_record is not None:
if to_record < len(self.__record_data):
return self.__record_data[:to_record + 1]
elif from_record is not None and to_record is None:
if 0 <= from_record <= len(self.__record_data):
return self.__record_data[from_record:]
elif (0 <= from_record <= len(self.__record_data)) and\
to_record < len(self.__record_data) and\
from_record < to_record:
return self.__record_data[from_record:to_record + 1]
else:
return []
def display_recording(self, from_record=None, to_record=None):
"""Displays the range of records recorded from the given range.
Args:
from_record (int) : First record to display. Set to 0 if None.
to_record (int): Last record to display. Set to last if None
Returns:
None
"""
records = self.select_recording(from_record, to_record)
for i, record in enumerate(records):
LOGGER.display('{0}: {1}'.format(i, record))
def save_recording(self, filename, from_record=None, to_record=None):
"""
"""
records = self.select_recording(from_record, to_record)
to_save = []
for record in records:
to_save.append({'command': record})
if to_save:
with open(filename, 'w') as f:
json.dump(to_save, f)
def record_command(self, user_input):
"""Saves in a JSON file the range of records recorded from the given
range.
Args:
from_record (int) : First record to save. Set to 0 if None.
to_record (int): Last record to save. Set to last if None
Returns:
None
"""
if self.__recording:
self.__record_data.append(user_input)
def exec_user_input(self, user_input, **kwargs):
"""Executes the string with the user input.
Args:
user_input (str) : String with the input entered by the user.
Keyword Args:
precmd (bool) : True if precmd shoud be called.
postcmd (bool) : True if postcmd should be called.
Returns:
bool : True if application shoudl continue, False else.
"""
pre_return = True
cb_return = True
post_return = True
if user_input:
line_as_list = user_input.split()
if len(line_as_list) == 0:
return True
command = line_as_list[0]
if self.is_command(command):
if kwargs.get('precmd', False):
pre_return = self.precmd(command, user_input)
# precmd callback return value can be used to skip command
# of it returns False.
if pre_return:
self.record_command(user_input)
cb_return = self.exec_command(command, ' '.join(line_as_list[1:]))
# postcmd callback return value can be used to exit the
# command loop if it returns False..
if kwargs.get('postcmd', False):
post_return = self.postcmd(command, user_input)
self.last_cmd = user_input
else:
post_return = self.onecmd(user_input)
return post_return if cb_return is not False else cb_return
def cmdloop(self, **kwargs):
"""Method that is called to wait for any user input.
Keyword Args:
prompt (:any:`str` or :any:`function`) : string or callback with prompt value
toolbar (:class:`str` or :any:`function`) : string or callback with toolbar value.
rprompt (:any:`str` or :any:`function`) : string or callback with right prompt value.
echo (bool) : True is command should be echoed.
precmd (bool) : True if precmd shoud be called.
postcmd (bool) : True if postcmd should be called.
Returns:
None
"""
while True:
user_input = self.run_prompt(**kwargs)
if kwargs.get('echo', False):
LOGGER.display(user_input)
if not self.exec_user_input(user_input, **kwargs):
return
def run(self, **kwargs):
"""Runs the command line interface for the given cli class.
Keyword Args:
prompt (:any:`str` or :any:`function`) : string or callback with prompt value
toolbar (:class:`str` or :any:`function`) : string or callback with toolbar value.
rprompt (:any:`str` or :any:`function`) : string or callback with right prompt value.
echo (bool) : True is command should be echoed.
precmd (bool) : True if precmd shoud be called.
postcmd (bool) : True if postcmd should be called.
Returns:
None
"""
try:
self.cmdloop(**kwargs)
except KeyboardInterrupt:
LOGGER.display("")
pass
def run_mode(self, **kwargs):
"""Enters in a new mode.
In a new mode, parent commands are not available and the new scope
is for commands defined in the created mode.
Returns:
None
"""
mode_name = self.__class__.__name__
CliBase.__MODES.append(mode_name)
self.run(**kwargs)
def leave_mode(self, **kwargs):
"""Exits the running mode.
Returns:
str : Mode exiting name.
"""
if CliBase.__MODES:
return CliBase.__MODES.pop()
return None
def load_commands_from_json(self, json_data):
"""Loads CLI commands from a JSON variable.
The content of the JSON data should be a list of dictionaries, where
every dictionary at least shoudl contain a field called 'command'
which will contains the command to be executed.
Args:
json_data (json) : Variable with JSON data.
Returns:
None
"""
lista = json.loads(json_data)
for entry in lista:
self.exec_user_input(entry['command'])
def load_commands_from_file(self, filename):
"""Loads a file with the given filename with CLI commands in
JSON format.
Args:
filename (string) : String with the filename that contains\
the json data.
Returns:
None
"""
try:
with open(filename, 'r') as f:
data = json.load(f)
self.load_commands_from_json(json.dumps(data))
except OSError:
LOGGER.error('File not found {}'.format(filename), out=True)
@staticmethod
def command(label=None, desc=None):
"""Decorator that setup a function as a command.
Args:
label (str) : command label that identifies the command in the\
command line (optional). If no value is entered, label is\
taken from the @syntax decorator.
desc (str) : command description (optional).
Returns:
func : Function wrapper.
"""
def f_command(f):
@wraps(f)
def _wrapper(self, *args, **kwargs):
return f(self, *args, **kwargs)
LOGGER.debug(f, "YELLOW")
module_name = sys._getframe(1).f_code.co_name
CliBase._WALL.setdefault(module_name, [])
if desc is not None:
_desc = desc
else:
# if the wrapper is not a <method> or a <function> it is a
# partial function, so the __doc__ is inside 'func' attribute.
if inspect.ismethod(_wrapper) or inspect.isfunction(_wrapper):
_desc = _wrapper.__doc__
else:
_desc = _wrapper.func.__doc__
label_from_syntax = getattr(f, SYNTAX_ATTR, None)
_label = f.__name__ if label_from_syntax is None else label_from_syntax.split()[0]
CliBase._WALL[module_name].append((label if label else _label, _wrapper, _desc))
return _wrapper
return f_command
|
mit
| 9,156,060,067,053,533,000
| 36.131728
| 117
| 0.529239
| false
| 4.41553
| false
| false
| false
|
ProjectQ-Framework/ProjectQ
|
projectq/setups/decompositions/cnot2cz.py
|
1
|
1282
|
# -*- coding: utf-8 -*-
# Copyright 2018 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Registers a decomposition to for a CNOT gate in terms of CZ and Hadamard.
"""
from projectq.cengines import DecompositionRule
from projectq.meta import Compute, get_control_count, Uncompute
from projectq.ops import CZ, H, X
def _decompose_cnot(cmd):
"""Decompose CNOT gates."""
ctrl = cmd.control_qubits
eng = cmd.engine
with Compute(eng):
H | cmd.qubits[0]
CZ | (ctrl[0], cmd.qubits[0][0])
Uncompute(eng)
def _recognize_cnot(cmd):
return get_control_count(cmd) == 1
#: Decomposition rules
all_defined_decomposition_rules = [DecompositionRule(X.__class__, _decompose_cnot, _recognize_cnot)]
|
apache-2.0
| 4,183,703,901,696,003,600
| 31.871795
| 100
| 0.711388
| false
| 3.474255
| false
| false
| false
|
haphaeu/yoshimi
|
EulerProject/061.py
|
1
|
5270
|
'''
THIS CODE IS WRONG - LOOK TO THE .C ONE
Problem 61
16 January 2004
Triangle, square, pentagonal, hexagonal, heptagonal, and octagonal numbers are
all figurate (polygonal) numbers and are generated by the following formulae:
Triangle P3,n = n(n+1)/2 1, 3, 6, 10, 15, ...
Square P4,n = n^2 1, 4, 9, 16, 25, ...
Pentagonal P5,n = n(3n-1)/2 1, 5, 12, 22, 35, ...
Hexagonal P6,n = n(2n-1) 1, 6, 15, 28, 45, ...
Heptagonal P7,n = n(5n-3)/2 1, 7, 18, 34, 55, ...
Octagonal P8,n = n(3n-2) 1, 8, 21, 40, 65, ...
The ordered set of three 4-digit numbers: 8128, 2882, 8281, has three
interesting properties.
1.The set is cyclic, in that the last two digits of each number is the first
two digits of the next number (including the last number with the first).
2.Each polygonal type: triangle (P3,127=8128), square (P4,91=8281), and
pentagonal (P5,44=2882), is represented by a different number in the set.
3.This is the only set of 4-digit numbers with this property.
Find the sum of the only ordered set of six cyclic 4-digit numbers for which
each polygonal type: triangle, square, pentagonal, hexagonal, heptagonal, and
octagonal, is represented by a different number in the set.
'''
def create_sets():
triang = [n*(n+1)/2 for n in range(1,150) if (n*(n+1)/2 > 999 and n*(n+1)/2 <= 9999) ]
square = [n*n for n in range(1,150) if (n*n > 999 and n*n <= 9999) ]
penta = [n*(3*n-1)/2 for n in range(1,150) if (n*(3*n-1)/2 > 999 and n*(3*n-1)/2 <= 9999) ]
hexa = [n*(2*n-1) for n in range(1,150) if (n*(2*n-1) > 999 and n*(2*n-1) <= 9999) ]
hepta = [n*(5*n-3)/2 for n in range(1,150) if (n*(5*n-3)/2 > 999 and n*(5*n-3)/2 <= 9999) ]
octa = [n*(3*n-2) for n in range(1,150) if (n*(3*n-2) > 999 and n*(3*n-2) <= 9999) ]
return [triang, square, penta, hexa, hepta, octa]
def check(nums,sets):
l1=set([x/100 for x in nums])
l2=set([x-100*(x/100) for x in nums])
if l1==l2:
if check_types(nums,sets): return True
return False
def check_types(nums,sets):
if set(nums) & set(sets[0]) != set() and \
set(nums) & set(sets[1]) != set() and \
set(nums) & set(sets[2]) != set() and \
set(nums) & set(sets[3]) != set() and \
set(nums) & set(sets[4]) != set() and \
set(nums) & set(sets[5]) != set():
return True
return False
from sys import stdout
from time import time
sets=create_sets()
it=len(sets[5])
jt=len(sets[4])
kt=len(sets[3])
intertot=it*jt*kt
print "Octa Hept Hexa"
stt=time()
for i, p5 in enumerate(sets[5]):
for j, p4 in enumerate(sets[4]):
for k, p3 in enumerate(sets[3]):
for p2 in sets[2]:
for p1 in sets[1]:
for p0 in sets[0]:
nums=[p5, p4, p3, p2, p1, p0]
if check(nums,sets):
print nums
et=time()-stt
rt=intertot/(k+1+j*kt+i*jt*kt) * et / 3600
stdout.write("%d %d %d - %.3fh remaining\r" % (p5,p4,p3,rt))
stdout.flush()
'''
Octa Hept Hexa
[1045, 2512, 1225, 4510, 1225, 1225]
[1045, 2512, 1225, 4510, 1225, 5050]
[1045, 2512, 1225, 4510, 1225, 5151]
[1045, 2512, 1225, 4510, 5625, 2556]
[1045, 2512, 2556, 4510, 5625, 1225]
[1045, 2512, 5151, 4510, 1225, 1225]
[1045, 2512, 5151, 4510, 1225, 5050]
[1045, 2512, 5151, 4510, 1225, 5151]
[1045, 2512, 5565, 4510, 1225, 6555]
[1045, 2839, 8128, 4510, 1681, 3916]
[1045, 4141, 2556, 4510, 5625, 2556]
[1045, 4141, 2556, 4510, 5625, 5050]
[1045, 4141, 2556, 4510, 5625, 5151]
[1045, 4141, 5151, 4510, 5041, 5050]
[1045, 4141, 5151, 4510, 5625, 2556]
[1045, 8910, 5151, 4510, 1089, 5050]
[1045, 8910, 5151, 4510, 1089, 5151]
[1045, 8910, 5565, 4510, 1089, 6555]
[1281, 2512, 1225, 2882, 8281, 8128]
[1281, 2512, 8128, 2882, 5625, 8256]
[1281, 2512, 8128, 2882, 8281, 1225]
[2133, 1651, 3321, 1717, 2116, 5151]
[2133, 1651, 3321, 5192, 9216, 3321]
[2133, 1651, 3321, 5192, 9216, 5050]
[2133, 1651, 3321, 5192, 9216, 5151]
[2133, 1651, 5151, 1717, 2116, 3321]
[2133, 1651, 5151, 5192, 9216, 3321]
[2133, 2512, 1225, 1717, 1225, 3321]
[2133, 2512, 3321, 1717, 1225, 1225]
[2133, 2512, 3321, 1717, 1225, 3321]
[2133, 2512, 3321, 1717, 1225, 5050]
[2133, 2512, 3321, 1717, 1225, 5151]
[2133, 2512, 5151, 1717, 1225, 3321]
[2133, 4141, 2556, 1717, 5625, 3321]
[2133, 4141, 3321, 1717, 1764, 6441]
[2133, 4141, 3321, 1717, 3364, 6441]
[2133, 4141, 3321, 1717, 5041, 5050]
[2133, 4141, 3321, 1717, 5625, 2556]
[2133, 4141, 3321, 2882, 8281, 8128]
[2133, 4141, 6441, 1717, 1764, 3321]
[2133, 4141, 6441, 1717, 3364, 3321]
[2133, 4141, 8128, 2882, 8281, 3321]
[2133, 8910, 2145, 4510, 1089, 3321]
[2133, 8910, 3321, 1717, 1089, 3321]
[2133, 8910, 3321, 1717, 1089, 5050]
[2133, 8910, 3321, 1717, 1089, 5151]
[2133, 8910, 3321, 4510, 1089, 2145]
[2133, 8910, 5151, 1717, 1089, 3321]
[2821, 4141, 1128, 1617, 2116, 1711]
[4033, 3367, 1540, 1717, 6724, 2415]
[4033, 3367, 2415, 1717, 6724, 1540]
[4720, 2512, 1225, 1247, 2025, 1225]
[4720, 2512, 1225, 1247, 2025, 5050]
[4720, 2512, 1225, 1247, 2025, 5151]
[4720, 2512, 5151, 1247, 2025, 1225]
[4720, 2512, 5151, 1247, 2025, 5050]
[4720, 2512, 5151, 1247, 2025, 5151]
[4720, 2512, 5565, 1247, 2025, 6555]
[4720, 8037, 5151, 2147, 3721, 2080]
[5985, 2059, 2556, 1520, 5625, 8515]
[8965, 8910, 5151, 5551, 1089, 6555]
[8965, 8910, 5565, 1717, 1089, 6555]
[9633, 3367, 2415, 1717, 6724, 1596]
|
lgpl-3.0
| -4,745,148,190,800,327,000
| 34.85034
| 93
| 0.624288
| false
| 2.113919
| false
| false
| false
|
rolandgeider/OpenSlides
|
openslides/utils/views.py
|
1
|
4506
|
from io import BytesIO
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.utils.translation import ugettext_lazy
from django.views import generic as django_views
from django.views.decorators.csrf import ensure_csrf_cookie
from reportlab.lib.units import cm
from reportlab.platypus import SimpleDocTemplate, Spacer
from rest_framework.response import Response
from rest_framework.views import APIView as _APIView
from .pdf import firstPage, laterPages
View = django_views.View
class SingleObjectMixin(django_views.detail.SingleObjectMixin):
"""
Mixin for single objects from the database.
"""
def dispatch(self, *args, **kwargs):
if not hasattr(self, 'object'):
# Save the object not only in the cache but in the public
# attribute self.object because Django expects this later.
# Because get_object() has an internal cache this line is not a
# performance problem.
self.object = self.get_object()
return super().dispatch(*args, **kwargs)
def get_object(self, *args, **kwargs):
"""
Returns the single object from database or cache.
"""
try:
obj = self._object
except AttributeError:
obj = super().get_object(*args, **kwargs)
self._object = obj
return obj
class CSRFMixin:
"""
Adds the csrf cookie to the response.
"""
@classmethod
def as_view(cls, *args, **kwargs):
view = super().as_view(*args, **kwargs)
return ensure_csrf_cookie(view)
class PDFView(View):
"""
View to generate an PDF.
"""
filename = ugettext_lazy('undefined-filename')
top_space = 3
document_title = None
required_permission = None
def check_permission(self, request, *args, **kwargs):
"""
Checks if the user has the required permission.
"""
if self.required_permission is None:
return True
else:
return request.user.has_perm(self.required_permission)
def dispatch(self, request, *args, **kwargs):
"""
Check if the user has the permission.
If the user is not logged in, redirect the user to the login page.
"""
if not self.check_permission(request, *args, **kwargs):
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
def get_top_space(self):
return self.top_space
def get_document_title(self):
if self.document_title:
return str(self.document_title)
else:
return ''
def get_filename(self):
return self.filename
def get_template(self, buffer):
return SimpleDocTemplate(buffer)
def build_document(self, pdf_document, story):
pdf_document.build(
story, onFirstPage=firstPage, onLaterPages=laterPages)
def render_to_response(self, filename):
response = HttpResponse(content_type='application/pdf')
filename = 'filename=%s.pdf;' % self.get_filename()
response['Content-Disposition'] = filename.encode('utf-8')
buffer = BytesIO()
pdf_document = self.get_template(buffer)
pdf_document.title = self.get_document_title()
story = [Spacer(1, self.get_top_space() * cm)]
self.append_to_pdf(story)
self.build_document(pdf_document, story)
pdf = buffer.getvalue()
buffer.close()
response.write(pdf)
return response
def get(self, request, *args, **kwargs):
return self.render_to_response(self.get_filename())
class APIView(_APIView):
"""
The Django Rest framework APIView with improvements for OpenSlides.
"""
http_method_names = []
"""
The allowed actions have to be explicitly defined.
Django allowes the following:
http_method_names = ['get', 'post', 'put', 'patch', 'delete', 'head', 'options', 'trace']
"""
def get_context_data(self, **context):
"""
Returns the context for the response.
"""
return context
def method_call(self, request, *args, **kwargs):
"""
Http method that returns the response object with the context data.
"""
return Response(self.get_context_data())
# Add the http-methods and delete the method "method_call"
get = post = put = patch = delete = head = options = trace = method_call
del method_call
|
mit
| -4,697,838,992,160,653,000
| 28.644737
| 93
| 0.628051
| false
| 4.227017
| false
| false
| false
|
probcomp/bdbcontrib
|
src/parallel.py
|
1
|
8878
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Speed up BDB queries by parallelizing them.
Intuition
---------
``ESTIMATE SIMILARITY FROM PAIRWISE t`` will run in ``O(n^2 m v)`` time, where
``n`` is the number of rows, ``m`` is the number of models, and ``v`` is the
average number of views per model. While this query is reasonable for small
datasets, on medium-large datasets the query slows down significantly and can
become intractable. Splitting the processing up among multiple cores can
greatly reduce computation time; this module provides functionality to assist
this multiprocessing.
Currently, a multiprocessing equivalent is provided only for
``ESTIMATE PAIRWISE SIMILARITY``. In fact, this is a query that is most likely
to require multiprocessing, as datasets frequently have many more rows than
columns.
Example
-------
Following are (very) informal timing statistics with a 200 rows by 4 column
.cvs file, run on a late 2012 MacBook Pro with a 2.5 GHz 2-core Intel Core i5::
id,one,two,three,four
0,2,3,4,two
1,1,5,4,three
2,5,1,5,one
...
197,0,5,0,five
198,5,3,0,three
199,4,5,2,three
After inserting this .csv data into a table ``t`` and analyzing it quickly::
bdb.execute('''
CREATE GENERATOR t_cc FOR t USING crosscat (
GUESS(*),
id IGNORE
)
''')
bdb.execute('INITIALIZE 3 MODELS FOR t_cc')
bdb.execute('ANALYZE t_cc MODELS 0-2 FOR 10 ITERATIONS WAIT')
The corresponding similarity table thus has 200^2 = 40000 rows::
In [72]: %timeit -n 10 cursor_to_df(bdb.execute('ESTIMATE SIMILARITY FROM PAIRWISE t_cc'))
10 loops, best of 3: 9.56 s per loop
In [73]: %timeit -n 10 parallel.estimate_pairwise_similarity(bdb_file.name, 't', 't_cc', overwrite=True)
10 loops, best of 3: 5.16 s per loop # And values are located in the t_similarity table.
The approximate 2x speed up is what would be expected from dividing the work
among two cores. Further speed increases are likely with more powerful
machines.
----
"""
from bayeslite.exception import BayesLiteException as BLE
from bdbcontrib.bql_utils import cursor_to_df
import multiprocessing as mp
from bayeslite import bayesdb_open, bql_quote_name
from bayeslite.util import cursor_value
def _query_into_queue(query_string, params, queue, bdb_file):
"""
Estimate pairwise similarity of a certain subset of the bdb according to
query_string; place it in the multiprocessing Manager.Queue().
For two technical reasons, this function is defined as a toplevel class and
independently creates a bdb handle:
1) Multiprocessing workers must be pickleable, and thus must be
declared as toplevel functions;
2) Multiple threads cannot access the same bdb handle, lest concurrency
issues arise with corrupt data.
Parameters
----------
query_string : str
Name of the query to execute, determined by estimate_similarity_mp.
queue : multiprocessing.Manager.Queue
Queue to place results into
bdb_file : str
File location of the BayesDB database. This function will
independently open a new BayesDB handler.
"""
bdb = bayesdb_open(pathname=bdb_file)
res = bdb.execute(query_string, params)
queue.put(cursor_to_df(res))
def _chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in xrange(0, len(l), n):
yield l[i:i+n]
def estimate_pairwise_similarity(bdb_file, table, model, sim_table=None,
cores=None, N=None, overwrite=False):
"""
Estimate pairwise similarity from the given model, splitting processing
across multiple processors, and save results into sim_table.
Because called methods in this function must also open up separate BayesDB
instances, this function accepts a BayesDB filename, rather than an actual
bayeslite.BayesDB object.
Parameters
----------
bdb_file : str
File location of the BayesDB database object. This function will
handle opening the file with bayeslite.bayesdb_open.
table : str
Name of the table containing the raw data.
model : str
Name of the metamodel to estimate from.
sim_table : str
Name of the table to insert similarity results into. Defaults to
table name + '_similarity'.
cores : int
Number of processors to use. Defaults to the number of cores as
identified by multiprocessing.num_cores.
N : int
Number of rows for which to estimate pairwise similarities (so
N^2 calculations are done). Should be used just to test small
batches; currently, there is no control over what specific pairwise
similarities are estimated with this parameter.
overwrite : bool
Whether to overwrite the sim_table if it already exists. If
overwrite=False and the table exists, function will raise
sqlite3.OperationalError. Default True.
"""
bdb = bayesdb_open(pathname=bdb_file)
if cores is None:
cores = mp.cpu_count()
if cores < 1:
raise BLE(ValueError(
"Invalid number of cores {}".format(cores)))
if sim_table is None:
sim_table = table + '_similarity'
# Get number of occurrences in the database
count_cursor = bdb.execute(
'SELECT COUNT(*) FROM {}'.format(bql_quote_name(table))
)
table_count = cursor_value(count_cursor)
if N is None:
N = table_count
elif N > table_count:
raise BLE(ValueError(
"Asked for N={} rows but {} rows in table".format(N, table_count)))
# Calculate the size (# of similarities to compute) and
# offset (where to start) calculation for each worker query.
# Divide sizes evenly, and make the last job finish the remainder
sizes = [(N * N) / cores for i in range(cores)]
sizes[-1] += (N * N) % cores
total = 0
offsets = [total]
for size in sizes[:-1]:
total += size
offsets.append(total)
# Create the similarity table. Assumes original table has rowid column.
# XXX: tables don't necessarily have an autoincrementing primary key
# other than rowid, which is implicit and can't be set as a foreign key.
# We ought to ask for an optional user-specified foreign key, but
# ESTIMATE SIMILARITY returns numerical values rather than row names, so
# changing numerical rownames into that foreign key would be finicky. For
# now, we eliminate REFERENCE {table}(foreign_key) from the rowid0 and
# rowid1 specs.
sim_table_q = bql_quote_name(sim_table)
if overwrite:
bdb.sql_execute('DROP TABLE IF EXISTS {}'.format(sim_table_q))
bdb.sql_execute('''
CREATE TABLE {} (
rowid0 INTEGER NOT NULL,
rowid1 INTEGER NOT NULL,
value DOUBLE NOT NULL
)
'''.format(sim_table_q))
# Define the helper which inserts data into table in batches
def insert_into_sim(df):
"""
Use the main thread bdb handle to successively insert results of
ESTIMATEs into the table.
"""
rows = map(list, df.values)
insert_sql = '''
INSERT INTO {} (rowid0, rowid1, value) VALUES (?, ?, ?)
'''.format(sim_table_q)
# Avoid sqlite3 500-insert limit by grouping insert statements
# into one transaction.
with bdb.transaction():
for row in rows:
bdb.sql_execute(insert_sql, row)
pool = mp.Pool(processes=cores)
manager = mp.Manager()
queue = manager.Queue()
# Construct the estimate query template.
q_template = '''
ESTIMATE SIMILARITY FROM PAIRWISE {} LIMIT ? OFFSET ?
''' .format(bql_quote_name(model))
for so in zip(sizes, offsets):
pool.apply_async(
_query_into_queue, args=(q_template, so, queue, bdb_file)
)
# Close pool and wait for processes to finish
# FIXME: This waits for all processes to finish before inserting
# into the table, which means that memory usage is potentially very
# high!
pool.close()
pool.join()
# Process returned results
while not queue.empty():
df = queue.get()
insert_into_sim(df)
|
apache-2.0
| -2,968,221,592,813,170,700
| 34.654618
| 108
| 0.669633
| false
| 3.895568
| false
| false
| false
|
brianwc/courtlistener
|
cl/users/management/commands/cl_account_management.py
|
1
|
4884
|
import datetime
import hashlib
import random
from cl.users.models import UserProfile
from cl.users.utils import emails
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.core.management import BaseCommand
from django.utils.timezone import now
class Command(BaseCommand):
help = ('Notify users of unconfirmed accounts and delete accounts that '
'were never confirmed')
def add_arguments(self, parser):
parser.add_argument(
'--notify',
action='store_true',
default=False,
help='Notify users with unconfirmed accounts older than five days, '
'and delete orphaned profiles.'
)
parser.add_argument(
'--delete',
action='store_true',
default=False,
help='Delete unconfirmed accounts older than two months'
)
parser.add_argument(
'--simulate',
action='store_true',
default=False,
help='Simulate the emails that would be sent, using the console '
'backend. Do not delete accounts.'
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help="Create more output."
)
def handle(self, *args, **options):
self.options = options
if options['delete']:
self.delete_old_accounts()
if options['notify']:
self.notify_unconfirmed_accounts()
if options['simulate']:
print "**************************************"
print "* NO EMAILS SENT OR ACCOUNTS DELETED *"
print "**************************************"
def delete_old_accounts(self):
"""Find accounts older than roughly two months that have not been
confirmed, and delete them. Should be run once a month, or so.
"""
two_months_ago = now() - datetime.timedelta(60)
unconfirmed_ups = UserProfile.objects.filter(
email_confirmed=False,
user__date_joined__lte=two_months_ago,
stub_account=False,
)
for up in unconfirmed_ups:
user = up.user.username
if self.options['verbose']:
print "User %s deleted" % user
if not self.options['simulate']:
# Gather their foreign keys, delete those
up.alert.all().delete()
up.donation.all().delete()
up.favorite.all().delete()
# delete the user then the profile.
up.user.delete()
up.delete()
def notify_unconfirmed_accounts(self):
"""This function will notify people who have not confirmed their
accounts that they must do so for fear of deletion.
This function should be run once a week, or so.
Because it updates the expiration date of the user's key, and also uses
that field to determine if the user should be notified in the first
place, the first week, a user will have an old enough key, and will be
notified, but the next week their key will have a very recent
expiration date (because it was just updated the prior week). This
means that they won't be selected the next week, but the one after,
their key will be old again, and they will be selected. It's not ideal,
but it's OK.
"""
# if your account is more than a week old, and you have not confirmed
# it, we will send you a notification, requesting that you confirm it.
a_week_ago = now() - datetime.timedelta(7)
unconfirmed_ups = UserProfile.objects.filter(
email_confirmed=False,
key_expires__lte=a_week_ago,
stub_account=False
)
for up in unconfirmed_ups:
if self.options['verbose']:
print "User %s will be notified" % up.user
if not self.options['simulate']:
# Build and save a new activation key for the account.
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
activation_key = hashlib.sha1(
salt + up.user.username).hexdigest()
key_expires = now() + datetime.timedelta(5)
up.activation_key = activation_key
up.key_expires = key_expires
up.save()
# Send the email.
current_site = Site.objects.get_current()
email = emails['email_not_confirmed']
send_mail(
email['subject'] % current_site.name,
email['body'] % (up.user.username, up.activation_key),
email['from'],
[up.user.email]
)
|
agpl-3.0
| 8,706,912,930,454,461,000
| 37.456693
| 80
| 0.558149
| false
| 4.732558
| false
| false
| false
|
jiaphuan/models
|
research/astronet/light_curve_util/util.py
|
1
|
7461
|
# Copyright 2018 The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Light curve utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import numpy as np
from six.moves import range # pylint:disable=redefined-builtin
def phase_fold_time(time, period, t0):
"""Creates a phase-folded time vector.
result[i] is the unique number in [-period / 2, period / 2)
such that result[i] = time[i] - t0 + k_i * period, for some integer k_i.
Args:
time: 1D numpy array of time values.
period: A positive real scalar; the period to fold over.
t0: The center of the resulting folded vector; this value is mapped to 0.
Returns:
A 1D numpy array.
"""
half_period = period / 2
result = np.mod(time + (half_period - t0), period)
result -= half_period
return result
def split(all_time, all_flux, gap_width=0.75):
"""Splits a light curve on discontinuities (gaps).
This function accepts a light curve that is either a single segment, or is
piecewise defined (e.g. split by quarter breaks or gaps in the in the data).
Args:
all_time: Numpy array or list of numpy arrays; each is a sequence of time
values.
all_flux: Numpy array or list of numpy arrays; each is a sequence of flux
values of the corresponding time array.
gap_width: Minimum gap size (in time units) for a split.
Returns:
out_time: List of numpy arrays; the split time arrays.
out_flux: List of numpy arrays; the split flux arrays.
"""
# Handle single-segment inputs.
# We must use an explicit length test on all_time because implicit conversion
# to bool fails if all_time is a numpy array, and all_time.size is not defined
# if all_time is a list of numpy arrays.
if len(all_time) > 0 and not isinstance(all_time[0], collections.Iterable): # pylint:disable=g-explicit-length-test
all_time = [all_time]
all_flux = [all_flux]
out_time = []
out_flux = []
for time, flux in itertools.izip(all_time, all_flux):
start = 0
for end in range(1, len(time) + 1):
# Choose the largest endpoint such that time[start:end] has no gaps.
if end == len(time) or time[end] - time[end - 1] > gap_width:
out_time.append(time[start:end])
out_flux.append(flux[start:end])
start = end
return out_time, out_flux
def remove_events(all_time, all_flux, events, width_factor=1.0):
"""Removes events from a light curve.
This function accepts either a single-segment or piecewise-defined light
curve (e.g. one that is split by quarter breaks or gaps in the in the data).
Args:
all_time: Numpy array or list of numpy arrays; each is a sequence of time
values.
all_flux: Numpy array or list of numpy arrays; each is a sequence of flux
values of the corresponding time array.
events: List of Event objects to remove.
width_factor: Fractional multiplier of the duration of each event to remove.
Returns:
output_time: Numpy array or list of numpy arrays; the time arrays with
events removed.
output_flux: Numpy array or list of numpy arrays; the flux arrays with
events removed.
"""
# Handle single-segment inputs.
# We must use an explicit length test on all_time because implicit conversion
# to bool fails if all_time is a numpy array and all_time.size is not defined
# if all_time is a list of numpy arrays.
if len(all_time) > 0 and not isinstance(all_time[0], collections.Iterable): # pylint:disable=g-explicit-length-test
all_time = [all_time]
all_flux = [all_flux]
single_segment = True
else:
single_segment = False
output_time = []
output_flux = []
for time, flux in itertools.izip(all_time, all_flux):
mask = np.ones_like(time, dtype=np.bool)
for event in events:
transit_dist = np.abs(phase_fold_time(time, event.period, event.t0))
mask = np.logical_and(mask,
transit_dist > 0.5 * width_factor * event.duration)
if single_segment:
output_time = time[mask]
output_flux = flux[mask]
else:
output_time.append(time[mask])
output_flux.append(flux[mask])
return output_time, output_flux
def interpolate_masked_spline(all_time, all_masked_time, all_masked_spline):
"""Linearly interpolates spline values across masked points.
Args:
all_time: List of numpy arrays; each is a sequence of time values.
all_masked_time: List of numpy arrays; each is a sequence of time values
with some values missing (masked).
all_masked_spline: List of numpy arrays; the masked spline values
corresponding to all_masked_time.
Returns:
interp_spline: List of numpy arrays; each is the masked spline with missing
points linearly interpolated.
"""
interp_spline = []
for time, masked_time, masked_spline in itertools.izip(
all_time, all_masked_time, all_masked_spline):
if len(masked_time) > 0: # pylint:disable=g-explicit-length-test
interp_spline.append(np.interp(time, masked_time, masked_spline))
else:
interp_spline.append(np.full_like(time, np.nan))
return interp_spline
def count_transit_points(time, event):
"""Computes the number of points in each transit of a given event.
Args:
time: Sorted numpy array of time values.
event: An Event object.
Returns:
A numpy array containing the number of time points "in transit" for each
transit occurring between the first and last time values.
Raises:
ValueError: If there are more than 10**6 transits.
"""
t_min = np.min(time)
t_max = np.max(time)
# Tiny periods or erroneous time values could make this loop take forever.
if (t_max - t_min) / event.period > 10**6:
raise ValueError(
"Too many transits! Time range is [%.2f, %.2f] and period is %.2e." %
(t_min, t_max, event.period))
# Make sure t0 is in [t_min, t_min + period).
t0 = np.mod(event.t0 - t_min, event.period) + t_min
# Prepare loop variables.
points_in_transit = []
i, j = 0, 0
for transit_midpoint in np.arange(t0, t_max, event.period):
transit_begin = transit_midpoint - event.duration / 2
transit_end = transit_midpoint + event.duration / 2
# Move time[i] to the first point >= transit_begin.
while time[i] < transit_begin:
# transit_begin is guaranteed to be < np.max(t) (provided duration >= 0).
# Therefore, i cannot go out of range.
i += 1
# Move time[j] to the first point > transit_end.
while time[j] <= transit_end:
j += 1
# j went out of range. We're finished.
if j >= len(time):
break
# The points in the current transit duration are precisely time[i:j].
# Since j is an exclusive index, there are exactly j-i points in transit.
points_in_transit.append(j - i)
return np.array(points_in_transit)
|
apache-2.0
| 6,828,795,574,697,506,000
| 34.193396
| 118
| 0.68275
| false
| 3.604348
| false
| false
| false
|
jean/sentry
|
tests/sentry/api/endpoints/test_organization_repositories.py
|
1
|
1493
|
from __future__ import absolute_import
import six
from django.core.urlresolvers import reverse
from sentry.models import Repository
from sentry.testutils import APITestCase
class OrganizationRepositoriesListTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
org = self.create_organization(owner=self.user, name='baz')
repo = Repository.objects.create(
name='example',
organization_id=org.id,
)
url = reverse('sentry-api-0-organization-repositories', args=[org.slug])
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]['id'] == six.text_type(repo.id)
class OrganizationRepositoriesCreateTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
org = self.create_organization(owner=self.user, name='baz')
url = reverse('sentry-api-0-organization-repositories', args=[org.slug])
response = self.client.post(
url, data={
'provider': 'dummy',
'name': 'getsentry/sentry',
}
)
assert response.status_code == 201, (response.status_code, response.content)
assert response.data['id']
repo = Repository.objects.get(id=response.data['id'])
assert repo.provider == 'dummy'
assert repo.name == 'getsentry/sentry'
|
bsd-3-clause
| -1,317,995,337,978,983,400
| 30.104167
| 84
| 0.636303
| false
| 4.002681
| true
| false
| false
|
davidbgk/udata
|
udata/core/discussions/api.py
|
1
|
6414
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
from flask_security import current_user
from flask_restplus.inputs import boolean
from udata.auth import admin_permission
from udata.api import api, API, fields
from udata.core.user.api_fields import user_ref_fields
from .forms import DiscussionCreateForm, DiscussionCommentForm
from .models import Message, Discussion
from .permissions import CloseDiscussionPermission
from .signals import (
on_new_discussion, on_new_discussion_comment, on_discussion_closed,
on_discussion_deleted
)
ns = api.namespace('discussions', 'Discussion related operations')
message_fields = api.model('DiscussionMessage', {
'content': fields.String(description='The message body'),
'posted_by': fields.Nested(user_ref_fields,
description='The message author'),
'posted_on': fields.ISODateTime(description='The message posting date'),
})
discussion_fields = api.model('Discussion', {
'id': fields.String(description='The discussion identifier'),
'subject': fields.Nested(api.model_reference,
description='The discussion target object'),
'class': fields.ClassName(description='The object class',
discriminator=True),
'title': fields.String(description='The discussion title'),
'user': fields.Nested(
user_ref_fields, description='The discussion author'),
'created': fields.ISODateTime(description='The discussion creation date'),
'closed': fields.ISODateTime(description='The discussion closing date'),
'closed_by': fields.String(
attribute='closed_by.id',
description='The user who closed the discussion'),
'discussion': fields.Nested(message_fields),
'url': fields.UrlFor('api.discussion',
description='The discussion API URI'),
})
start_discussion_fields = api.model('DiscussionStart', {
'title': fields.String(description='The title of the discussion to open',
required=True),
'comment': fields.String(description='The content of the initial comment',
required=True),
'subject': fields.Nested(api.model_reference,
description='The discussion target object',
required=True),
})
comment_discussion_fields = api.model('DiscussionResponse', {
'comment': fields.String(
description='The comment to submit', required=True),
'close': fields.Boolean(
description='Is this a closing response. Only subject owner can close')
})
discussion_page_fields = api.model('DiscussionPage',
fields.pager(discussion_fields))
parser = api.parser()
parser.add_argument(
'sort', type=str, default='-created', location='args',
help='The sorting attribute')
parser.add_argument(
'closed', type=boolean, location='args',
help='Filters discussions on their closed status if specified')
parser.add_argument(
'for', type=str, location='args', action='append',
help='Filter discussions for a given subject')
parser.add_argument(
'page', type=int, default=1, location='args', help='The page to fetch')
parser.add_argument(
'page_size', type=int, default=20, location='args',
help='The page size to fetch')
@ns.route('/<id>/', endpoint='discussion')
class DiscussionAPI(API):
'''
Base class for a discussion thread.
'''
@api.doc('get_discussion')
@api.marshal_with(discussion_fields)
def get(self, id):
'''Get a discussion given its ID'''
discussion = Discussion.objects.get_or_404(id=id)
return discussion
@api.secure
@api.doc('comment_discussion')
@api.expect(comment_discussion_fields)
@api.response(403, 'Not allowed to close this discussion')
@api.marshal_with(discussion_fields)
def post(self, id):
'''Add comment and optionnaly close a discussion given its ID'''
discussion = Discussion.objects.get_or_404(id=id)
form = api.validate(DiscussionCommentForm)
message = Message(
content=form.comment.data,
posted_by=current_user.id
)
discussion.discussion.append(message)
close = form.close.data
if close:
CloseDiscussionPermission(discussion).test()
discussion.closed_by = current_user._get_current_object()
discussion.closed = datetime.now()
discussion.save()
if close:
on_discussion_closed.send(discussion, message=message)
else:
on_new_discussion_comment.send(discussion, message=message)
return discussion
@api.secure(admin_permission)
@api.doc('delete_discussion')
@api.response(403, 'Not allowed to delete this discussion')
def delete(self, id):
'''Delete a discussion given its ID'''
discussion = Discussion.objects.get_or_404(id=id)
discussion.delete()
on_discussion_deleted.send(discussion)
return '', 204
@ns.route('/', endpoint='discussions')
class DiscussionsAPI(API):
'''
Base class for a list of discussions.
'''
@api.doc('list_discussions', parser=parser)
@api.marshal_with(discussion_page_fields)
def get(self):
'''List all Discussions'''
args = parser.parse_args()
discussions = Discussion.objects
if args['for']:
discussions = discussions.generic_in(subject=args['for'])
if args['closed'] is False:
discussions = discussions(closed=None)
elif args['closed'] is True:
discussions = discussions(closed__ne=None)
discussions = discussions.order_by(args['sort'])
return discussions.paginate(args['page'], args['page_size'])
@api.secure
@api.doc('create_discussion')
@api.expect(start_discussion_fields)
@api.marshal_with(discussion_fields)
def post(self):
'''Create a new Discussion'''
form = api.validate(DiscussionCreateForm)
message = Message(
content=form.comment.data,
posted_by=current_user.id)
discussion = Discussion(user=current_user.id, discussion=[message])
form.populate_obj(discussion)
discussion.save()
on_new_discussion.send(discussion)
return discussion, 201
|
agpl-3.0
| -1,390,360,502,718,842,000
| 36.508772
| 79
| 0.651232
| false
| 4.108905
| false
| false
| false
|
log2timeline/dftimewolf
|
tests/lib/collectors/grr_base.py
|
1
|
5042
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests the GRR base collector."""
import unittest
import mock
from grr_api_client import errors as grr_errors
from dftimewolf.lib import state
from dftimewolf.lib import errors
from dftimewolf.lib.collectors import grr_base
from dftimewolf import config
ACCESS_FORBIDDEN_MAX = 3
class MockGRRObject(object):
"""Fake GRR object that will be used in the access forbidden wrapper test"""
_access_forbidden_counter = 0
CreateApproval = mock.MagicMock()
ClientApproval = mock.MagicMock()
ClientApproval.client_id = "abcd"
ClientApproval.approval_id = "dcba"
ClientApproval.username = "nobody"
CreateApproval.return_value = ClientApproval
hunt_id = "123"
client_id = "321"
# pylint: disable=unused-argument
def ForbiddenFunction(self, random1, random2, random3=None, random4=None):
"""Will raise a grr_errors.AccessForbiddenError three times, and return."""
while ACCESS_FORBIDDEN_MAX > self._access_forbidden_counter:
self._access_forbidden_counter += 1
raise grr_errors.AccessForbiddenError
return 4
class GRRBaseModuleTest(unittest.TestCase):
"""Tests for the GRR base collector."""
def testInitialization(self):
"""Tests that the collector can be initialized."""
test_state = state.DFTimewolfState(config.Config)
grr_base_module = grr_base.GRRBaseModule(test_state)
self.assertIsNotNone(grr_base_module)
@mock.patch('tempfile.mkdtemp')
@mock.patch('grr_api_client.api.InitHttp')
def testSetup(self, mock_grr_inithttp, mock_mkdtemp):
"""Tests that setup works"""
test_state = state.DFTimewolfState(config.Config)
grr_base_module = grr_base.GRRBaseModule(test_state)
mock_mkdtemp.return_value = '/fake'
grr_base_module.SetUp(
reason='random reason',
grr_server_url='http://fake/endpoint',
grr_username='admin1',
grr_password='admin2',
approvers='approver1@example.com,approver2@example.com',
verify=True
)
mock_grr_inithttp.assert_called_with(
api_endpoint='http://fake/endpoint',
auth=('admin1', 'admin2'),
verify=True)
self.assertEqual(grr_base_module.approvers,
['approver1@example.com', 'approver2@example.com'])
self.assertEqual(grr_base_module.output_path, '/fake')
@mock.patch('grr_api_client.api.InitHttp')
def testApprovalWrapper(self, _):
"""Tests that the approval wrapper works correctly."""
test_state = state.DFTimewolfState(config.Config)
grr_base_module = grr_base.GRRBaseModule(test_state)
grr_base_module.SetUp(
reason='random reason',
grr_server_url='http://fake/endpoint',
grr_username='admin1',
grr_password='admin2',
approvers='approver1@example.com,approver2@example.com',
verify=True
)
# pylint: disable=protected-access
grr_base_module._CHECK_APPROVAL_INTERVAL_SEC = 0
mock_grr_object = MockGRRObject()
mock_forbidden_function = mock.Mock(
wraps=mock_grr_object.ForbiddenFunction)
result = grr_base_module._WrapGRRRequestWithApproval(
mock_grr_object,
mock_forbidden_function,
'random1',
'random2',
random3=4,
random4=4)
# Final result.
self.assertEqual(result, 4)
mock_forbidden_function.assert_called_with(
'random1', 'random2', random3=4, random4=4)
# Our forbidden function should be called 4 times, the last one succeeeding.
self.assertEqual(mock_forbidden_function.call_count, 4)
mock_grr_object.CreateApproval.assert_called_with(
reason='random reason',
notified_users=['approver1@example.com', 'approver2@example.com'])
@mock.patch('grr_api_client.api.InitHttp')
def testNoApproversErrorsOut(self, _):
"""Tests that an error is generated if no approvers are specified.
This should only error on unauthorized objects, which is how our mock
behaves.
"""
test_state = state.DFTimewolfState(config.Config)
grr_base_module = grr_base.GRRBaseModule(test_state)
grr_base_module.SetUp(
reason='random',
grr_server_url='http://fake/url',
grr_username='admin1',
grr_password='admin2',
approvers='',
verify=True
)
# pylint: disable=protected-access
grr_base_module._CHECK_APPROVAL_INTERVAL_SEC = 0
mock_grr_object = MockGRRObject()
mock_forbidden_function = mock.Mock(
wraps=mock_grr_object.ForbiddenFunction)
with self.assertRaises(errors.DFTimewolfError) as error:
grr_base_module._WrapGRRRequestWithApproval(
mock_grr_object,
mock_forbidden_function,
'random1',
'random2',
random3=4,
random4=4)
self.assertEqual('GRR needs approval but no approvers specified '
'(hint: use --approvers)', error.exception.message)
self.assertTrue(error.exception.critical)
self.assertEqual(len(test_state.errors), 1)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| -2,875,692,896,729,224,700
| 33.534247
| 80
| 0.678104
| false
| 3.560734
| true
| false
| false
|
tobias-lang/crawl
|
src/old/collection_parser.py
|
1
|
2434
|
import traceback
import codecs
import src.parse.article_parser
class CollectionParser():
def __init__(self, article_parser=src.parse.article_parser.ArticleParser()):
self.article_parser = src.parse.article_parser
def get_filelist(self, dir, maxNumber=None):
from os import listdir
from os.path import isfile, join
filenames = [join(dir, f) for f in listdir(dir) if isfile(join(dir, f))]
filenames = filter(lambda x: self._is_relevant_file(x), filenames)
filenames = filenames[:maxNumber]
return filenames
def _is_relevant_file(self, filename):
if filename.endswith("#comments"):
return False
return True
def parse_file(self, filename):
f = codecs.open(filename, "r", "utf-8")
lines = f.readlines()
big_data_string = "".join(lines)
self.article_parser.reset()
self.article_parser.feed(big_data_string)
c = self.article_parser.getContents()
return c
def parse_collection(self, filenames):
contents = set()
for idx, filename in enumerate(filenames):
# print "read", filename
c = self.parse_file(filename)
if len(c):
# print "->", len(c)
contents.add(c)
# if len(contents) > 10:
# break
contents = list(contents)
contents.sort()
return contents
def parse_and_write_collection(self, input_filenames, output_filename):
print "write contents to", output_filename
f = codecs.open(output_filename, "w", "utf-8")
n = 100
for i in range(len(input_filenames)/n):
print i*n,
batch_filenames = input_filenames[i*n:(i+1)*n]
batch_contents =self.parse_collection(batch_filenames)
for c in batch_contents:
try:
# print h
# h_ascii = h.encode('ascii', 'ignore')
f.write(c + "\n")
except Exception as e:
print(traceback.format_exc())
f.flush()
f.close()
def run_full(self, input_dir, output_filename, maxNumber=None):
filenames = self.get_filelist(input_dir, maxNumber)
contents = self.parse_and_write_collection(filenames, output_filename)
import os
os.system("wc " + output_filename)
|
gpl-3.0
| 3,377,325,573,410,735,000
| 29.810127
| 80
| 0.569433
| false
| 4.104553
| false
| false
| false
|
AravindK95/ee106b
|
project4/src/grasper_plan/src/transformations.py
|
2
|
66033
|
# -*- coding: utf-8 -*-
# transformations.py
# Copyright (c) 2006-2015, Christoph Gohlke
# Copyright (c) 2006-2015, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Homogeneous Transformation Matrices and Quaternions.
A library for calculating 4x4 matrices for translating, rotating, reflecting,
scaling, shearing, projecting, orthogonalizing, and superimposing arrays of
3D homogeneous coordinates as well as for converting between rotation matrices,
Euler angles, and quaternions. Also includes an Arcball control object and
functions to decompose transformation matrices.
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2015.07.18
Requirements
------------
* `CPython 2.7 or 3.4 <http://www.python.org>`_
* `Numpy 1.9 <http://www.numpy.org>`_
* `Transformations.c 2015.07.18 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for speedup of some functions)
Notes
-----
The API is not stable yet and is expected to change between revisions.
This Python code is not optimized for speed. Refer to the transformations.c
module for a faster implementation of some functions.
Documentation in HTML format can be generated with epydoc.
Matrices (M) can be inverted using numpy.linalg.inv(M), be concatenated using
numpy.dot(M0, M1), or transform homogeneous coordinate arrays (v) using
numpy.dot(M, v) for shape (4, \*) column vectors, respectively
numpy.dot(v, M.T) for shape (\*, 4) row vectors ("array of points").
This module follows the "column vectors on the right" and "row major storage"
(C contiguous) conventions. The translation components are in the right column
of the transformation matrix, i.e. M[:3, 3].
The transpose of the transformation matrices may have to be used to interface
with other graphics systems, e.g. with OpenGL's glMultMatrixd(). See also [16].
Calculations are carried out with numpy.float64 precision.
Vector, point, quaternion, and matrix function arguments are expected to be
"array like", i.e. tuple, list, or numpy arrays.
Return types are numpy arrays unless specified otherwise.
Angles are in radians unless specified otherwise.
Quaternions w+ix+jy+kz are represented as [w, x, y, z].
A triple of Euler angles can be applied/interpreted in 24 ways, which can
be specified using a 4 character string or encoded 4-tuple:
*Axes 4-string*: e.g. 'sxyz' or 'ryxy'
- first character : rotations are applied to 's'tatic or 'r'otating frame
- remaining characters : successive rotation axis 'x', 'y', or 'z'
*Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1)
- inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix.
- parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed
by 'z', or 'z' is followed by 'x'. Otherwise odd (1).
- repetition : first and last axis are same (1) or different (0).
- frame : rotations are applied to static (0) or rotating (1) frame.
Other Python packages and modules for 3D transformations and quaternions:
* `Transforms3d <https://pypi.python.org/pypi/transforms3d>`_
includes most code of this module.
* `Blender.mathutils <http://www.blender.org/api/blender_python_api>`_
* `numpy-dtypes <https://github.com/numpy/numpy-dtypes>`_
References
----------
(1) Matrices and transformations. Ronald Goldman.
In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990.
(2) More matrices and transformations: shear and pseudo-perspective.
Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(3) Decomposing a matrix into simple transformations. Spencer Thomas.
In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(4) Recovering the data from the transformation matrix. Ronald Goldman.
In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991.
(5) Euler angle conversion. Ken Shoemake.
In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994.
(6) Arcball rotation control. Ken Shoemake.
In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994.
(7) Representing attitude: Euler angles, unit quaternions, and rotation
vectors. James Diebel. 2006.
(8) A discussion of the solution for the best rotation to relate two sets
of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828.
(9) Closed-form solution of absolute orientation using unit quaternions.
BKP Horn. J Opt Soc Am A. 1987. 4(4):629-642.
(10) Quaternions. Ken Shoemake.
http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf
(11) From quaternion to matrix and back. JMP van Waveren. 2005.
http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm
(12) Uniform random rotations. Ken Shoemake.
In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992.
(13) Quaternion in molecular modeling. CFF Karney.
J Mol Graph Mod, 25(5):595-604
(14) New method for extracting the quaternion from a rotation matrix.
Itzhack Y Bar-Itzhack, J Guid Contr Dynam. 2000. 23(6): 1085-1087.
(15) Multiple View Geometry in Computer Vision. Hartley and Zissermann.
Cambridge University Press; 2nd Ed. 2004. Chapter 4, Algorithm 4.7, p 130.
(16) Column Vectors vs. Row Vectors.
http://steve.hollasch.net/cgindex/math/matrix/column-vec.html
Examples
--------
>>> alpha, beta, gamma = 0.123, -1.234, 2.345
>>> origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]
>>> I = identity_matrix()
>>> Rx = rotation_matrix(alpha, xaxis)
>>> Ry = rotation_matrix(beta, yaxis)
>>> Rz = rotation_matrix(gamma, zaxis)
>>> R = concatenate_matrices(Rx, Ry, Rz)
>>> euler = euler_from_matrix(R, 'rxyz')
>>> numpy.allclose([alpha, beta, gamma], euler)
True
>>> Re = euler_matrix(alpha, beta, gamma, 'rxyz')
>>> is_same_transform(R, Re)
True
>>> al, be, ga = euler_from_matrix(Re, 'rxyz')
>>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz'))
True
>>> qx = quaternion_about_axis(alpha, xaxis)
>>> qy = quaternion_about_axis(beta, yaxis)
>>> qz = quaternion_about_axis(gamma, zaxis)
>>> q = quaternion_multiply(qx, qy)
>>> q = quaternion_multiply(q, qz)
>>> Rq = quaternion_matrix(q)
>>> is_same_transform(R, Rq)
True
>>> S = scale_matrix(1.23, origin)
>>> T = translation_matrix([1, 2, 3])
>>> Z = shear_matrix(beta, xaxis, origin, zaxis)
>>> R = random_rotation_matrix(numpy.random.rand(3))
>>> M = concatenate_matrices(T, R, Z, S)
>>> scale, shear, angles, trans, persp = decompose_matrix(M)
>>> numpy.allclose(scale, 1.23)
True
>>> numpy.allclose(trans, [1, 2, 3])
True
>>> numpy.allclose(shear, [0, math.tan(beta), 0])
True
>>> is_same_transform(R, euler_matrix(axes='sxyz', *angles))
True
>>> M1 = compose_matrix(scale, shear, angles, trans, persp)
>>> is_same_transform(M, M1)
True
>>> v0, v1 = random_vector(3), random_vector(3)
>>> M = rotation_matrix(angle_between_vectors(v0, v1), vector_product(v0, v1))
>>> v2 = numpy.dot(v0, M[:3,:3].T)
>>> numpy.allclose(unit_vector(v1), unit_vector(v2))
True
"""
from __future__ import division, print_function
import math
import numpy
__version__ = '2015.07.18'
__docformat__ = 'restructuredtext en'
__all__ = ()
def identity_matrix():
"""Return 4x4 identity/unit matrix.
>>> I = identity_matrix()
>>> numpy.allclose(I, numpy.dot(I, I))
True
>>> numpy.sum(I), numpy.trace(I)
(4.0, 4.0)
>>> numpy.allclose(I, numpy.identity(4))
True
"""
return numpy.identity(4)
def translation_matrix(direction):
"""Return matrix to translate by direction vector.
>>> v = numpy.random.random(3) - 0.5
>>> numpy.allclose(v, translation_matrix(v)[:3, 3])
True
"""
M = numpy.identity(4)
M[:3, 3] = direction[:3]
return M
def translation_from_matrix(matrix):
"""Return translation vector from translation matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = translation_from_matrix(translation_matrix(v0))
>>> numpy.allclose(v0, v1)
True
"""
return numpy.array(matrix, copy=False)[:3, 3].copy()
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def reflection_from_matrix(matrix):
"""Return mirror plane point and normal vector from reflection matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = numpy.random.random(3) - 0.5
>>> M0 = reflection_matrix(v0, v1)
>>> point, normal = reflection_from_matrix(M0)
>>> M1 = reflection_matrix(point, normal)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
# normal: unit eigenvector corresponding to eigenvalue -1
w, V = numpy.linalg.eig(M[:3, :3])
i = numpy.where(abs(numpy.real(w) + 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue -1")
normal = numpy.real(V[:, i[0]]).squeeze()
# point: any unit eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return point, normal
def rotation_matrix(angle, direction, point=None):
"""Return matrix to rotate about axis defined by point and direction.
>>> R = rotation_matrix(math.pi/2, [0, 0, 1], [1, 0, 0])
>>> numpy.allclose(numpy.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1])
True
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(angle-2*math.pi, direc, point)
>>> is_same_transform(R0, R1)
True
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(-angle, -direc, point)
>>> is_same_transform(R0, R1)
True
>>> I = numpy.identity(4, numpy.float64)
>>> numpy.allclose(I, rotation_matrix(math.pi*2, direc))
True
>>> numpy.allclose(2, numpy.trace(rotation_matrix(math.pi/2,
... direc, point)))
True
"""
sina = math.sin(angle)
cosa = math.cos(angle)
direction = unit_vector(direction[:3])
# rotation matrix around unit vector
R = numpy.diag([cosa, cosa, cosa])
R += numpy.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += numpy.array([[ 0.0, -direction[2], direction[1]],
[ direction[2], 0.0, -direction[0]],
[-direction[1], direction[0], 0.0]])
M = numpy.identity(4)
M[:3, :3] = R
if point is not None:
# rotation not around origin
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
M[:3, 3] = point - numpy.dot(R, point)
return M
def rotation_from_matrix(matrix):
"""Return rotation angle and axis from rotation matrix.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> angle, direc, point = rotation_from_matrix(R0)
>>> R1 = rotation_matrix(angle, direc, point)
>>> is_same_transform(R0, R1)
True
"""
R = numpy.array(matrix, dtype=numpy.float64, copy=False)
R33 = R[:3, :3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
w, W = numpy.linalg.eig(R33.T)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
direction = numpy.real(W[:, i[-1]]).squeeze()
# point: unit eigenvector of R33 corresponding to eigenvalue of 1
w, Q = numpy.linalg.eig(R)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(Q[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on direction
cosa = (numpy.trace(R33) - 1.0) / 2.0
if abs(direction[2]) > 1e-8:
sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
elif abs(direction[1]) > 1e-8:
sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
else:
sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
angle = math.atan2(sina, cosa)
return angle, direction, point
def scale_matrix(factor, origin=None, direction=None):
"""Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v[3] = 1
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct)
"""
if direction is None:
# uniform scaling
M = numpy.diag([factor, factor, factor, 1.0])
if origin is not None:
M[:3, 3] = origin[:3]
M[:3, 3] *= 1.0 - factor
else:
# nonuniform scaling
direction = unit_vector(direction[:3])
factor = 1.0 - factor
M = numpy.identity(4)
M[:3, :3] -= factor * numpy.outer(direction, direction)
if origin is not None:
M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction
return M
def scale_from_matrix(matrix):
"""Return scaling factor, origin and direction from scaling matrix.
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S0 = scale_matrix(factor, origin)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
>>> S0 = scale_matrix(factor, origin, direct)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
factor = numpy.trace(M33) - 2.0
try:
# direction: unit eigenvector corresponding to eigenvalue factor
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - factor) < 1e-8)[0][0]
direction = numpy.real(V[:, i]).squeeze()
direction /= vector_norm(direction)
except IndexError:
# uniform scaling
factor = (factor + 2.0) / 3.0
direction = None
# origin: any eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
origin = numpy.real(V[:, i[-1]]).squeeze()
origin /= origin[3]
return factor, origin, direction
def projection_matrix(point, normal, direction=None,
perspective=None, pseudo=False):
"""Return matrix to project onto plane defined by point and normal.
Using either perspective point, projection direction, or none of both.
If pseudo is True, perspective projections will preserve relative depth
such that Perspective = dot(Orthogonal, PseudoPerspective).
>>> P = projection_matrix([0, 0, 0], [1, 0, 0])
>>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:])
True
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> P1 = projection_matrix(point, normal, direction=direct)
>>> P2 = projection_matrix(point, normal, perspective=persp)
>>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> is_same_transform(P2, numpy.dot(P0, P3))
True
>>> P = projection_matrix([3, 0, 0], [1, 1, 0], [1, 0, 0])
>>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(P, v0)
>>> numpy.allclose(v1[1], v0[1])
True
>>> numpy.allclose(v1[0], 3-v1[1])
True
"""
M = numpy.identity(4)
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
normal = unit_vector(normal[:3])
if perspective is not None:
# perspective projection
perspective = numpy.array(perspective[:3], dtype=numpy.float64,
copy=False)
M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal)
M[:3, :3] -= numpy.outer(perspective, normal)
if pseudo:
# preserve relative depth
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * (perspective+normal)
else:
M[:3, 3] = numpy.dot(point, normal) * perspective
M[3, :3] = -normal
M[3, 3] = numpy.dot(perspective, normal)
elif direction is not None:
# parallel projection
direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False)
scale = numpy.dot(direction, normal)
M[:3, :3] -= numpy.outer(direction, normal) / scale
M[:3, 3] = direction * (numpy.dot(point, normal) / scale)
else:
# orthogonal projection
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * normal
return M
def projection_from_matrix(matrix, pseudo=False):
"""Return projection plane and perspective point from projection matrix.
Return values are same as arguments for projection_matrix function:
point, normal, direction, perspective, and pseudo.
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, direct)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False)
>>> result = projection_from_matrix(P0, pseudo=False)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> result = projection_from_matrix(P0, pseudo=True)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not pseudo and len(i):
# point: any eigenvector corresponding to eigenvalue 1
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
# direction: unit eigenvector corresponding to eigenvalue 0
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 0")
direction = numpy.real(V[:, i[0]]).squeeze()
direction /= vector_norm(direction)
# normal: unit eigenvector of M33.T corresponding to eigenvalue 0
w, V = numpy.linalg.eig(M33.T)
i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
if len(i):
# parallel projection
normal = numpy.real(V[:, i[0]]).squeeze()
normal /= vector_norm(normal)
return point, normal, direction, None, False
else:
# orthogonal projection, where normal equals direction vector
return point, direction, None, None, False
else:
# perspective projection
i = numpy.where(abs(numpy.real(w)) > 1e-8)[0]
if not len(i):
raise ValueError(
"no eigenvector not corresponding to eigenvalue 0")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
normal = - M[3, :3]
perspective = M[:3, 3] / numpy.dot(point[:3], normal)
if pseudo:
perspective -= normal
return point, normal, None, perspective, pseudo
def clip_matrix(left, right, bottom, top, near, far, perspective=False):
"""Return matrix to obtain normalized device coordinates from frustum.
The frustum bounds are axis-aligned along x (left, right),
y (bottom, top) and z (near, far).
Normalized device coordinates are in range [-1, 1] if coordinates are
inside the frustum.
If perspective is True the frustum is a truncated pyramid with the
perspective point at origin and direction along z axis, otherwise an
orthographic canonical view volume (a box).
Homogeneous coordinates transformed by the perspective clip matrix
need to be dehomogenized (divided by w coordinate).
>>> frustum = numpy.random.rand(6)
>>> frustum[1] += frustum[0]
>>> frustum[3] += frustum[2]
>>> frustum[5] += frustum[4]
>>> M = clip_matrix(perspective=False, *frustum)
>>> numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1])
array([-1., -1., -1., 1.])
>>> numpy.dot(M, [frustum[1], frustum[3], frustum[5], 1])
array([ 1., 1., 1., 1.])
>>> M = clip_matrix(perspective=True, *frustum)
>>> v = numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1])
>>> v / v[3]
array([-1., -1., -1., 1.])
>>> v = numpy.dot(M, [frustum[1], frustum[3], frustum[4], 1])
>>> v / v[3]
array([ 1., 1., -1., 1.])
"""
if left >= right or bottom >= top or near >= far:
raise ValueError("invalid frustum")
if perspective:
if near <= _EPS:
raise ValueError("invalid frustum: near <= 0")
t = 2.0 * near
M = [[t/(left-right), 0.0, (right+left)/(right-left), 0.0],
[0.0, t/(bottom-top), (top+bottom)/(top-bottom), 0.0],
[0.0, 0.0, (far+near)/(near-far), t*far/(far-near)],
[0.0, 0.0, -1.0, 0.0]]
else:
M = [[2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)],
[0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)],
[0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)],
[0.0, 0.0, 0.0, 1.0]]
return numpy.array(M)
def shear_matrix(angle, direction, point, normal):
"""Return matrix to shear by angle along direction vector on shear plane.
The shear plane is defined by a point and normal vector. The direction
vector must be orthogonal to the plane's normal vector.
A point P is transformed by the shear matrix into P" such that
the vector P-P" is parallel to the direction vector and its extent is
given by the angle of P-P'-P", where P' is the orthogonal projection
of P onto the shear plane.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S = shear_matrix(angle, direct, point, normal)
>>> numpy.allclose(1, numpy.linalg.det(S))
True
"""
normal = unit_vector(normal[:3])
direction = unit_vector(direction[:3])
if abs(numpy.dot(normal, direction)) > 1e-6:
raise ValueError("direction and normal vectors are not orthogonal")
angle = math.tan(angle)
M = numpy.identity(4)
M[:3, :3] += angle * numpy.outer(direction, normal)
M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction
return M
def shear_from_matrix(matrix):
"""Return shear angle, direction and plane from shear matrix.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S0 = shear_matrix(angle, direct, point, normal)
>>> angle, direct, point, normal = shear_from_matrix(S0)
>>> S1 = shear_matrix(angle, direct, point, normal)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
# normal: cross independent eigenvectors corresponding to the eigenvalue 1
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-4)[0]
if len(i) < 2:
raise ValueError("no two linear independent eigenvectors found %s" % w)
V = numpy.real(V[:, i]).squeeze().T
lenorm = -1.0
for i0, i1 in ((0, 1), (0, 2), (1, 2)):
n = numpy.cross(V[i0], V[i1])
w = vector_norm(n)
if w > lenorm:
lenorm = w
normal = n
normal /= lenorm
# direction and angle
direction = numpy.dot(M33 - numpy.identity(3), normal)
angle = vector_norm(direction)
direction /= angle
angle = math.atan(angle)
# point: eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return angle, direction, point, normal
def decompose_matrix(matrix):
"""Return sequence of transformations from transformation matrix.
matrix : array_like
Non-degenerative homogeneous transformation matrix
Return tuple of:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
Raise ValueError if matrix is of wrong type or degenerative.
>>> T0 = translation_matrix([1, 2, 3])
>>> scale, shear, angles, trans, persp = decompose_matrix(T0)
>>> T1 = translation_matrix(trans)
>>> numpy.allclose(T0, T1)
True
>>> S = scale_matrix(0.123)
>>> scale, shear, angles, trans, persp = decompose_matrix(S)
>>> scale[0]
0.123
>>> R0 = euler_matrix(1, 2, 3)
>>> scale, shear, angles, trans, persp = decompose_matrix(R0)
>>> R1 = euler_matrix(*angles)
>>> numpy.allclose(R0, R1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=True).T
if abs(M[3, 3]) < _EPS:
raise ValueError("M[3, 3] is zero")
M /= M[3, 3]
P = M.copy()
P[:, 3] = 0.0, 0.0, 0.0, 1.0
if not numpy.linalg.det(P):
raise ValueError("matrix is singular")
scale = numpy.zeros((3, ))
shear = [0.0, 0.0, 0.0]
angles = [0.0, 0.0, 0.0]
if any(abs(M[:3, 3]) > _EPS):
perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T))
M[:, 3] = 0.0, 0.0, 0.0, 1.0
else:
perspective = numpy.array([0.0, 0.0, 0.0, 1.0])
translate = M[3, :3].copy()
M[3, :3] = 0.0
row = M[:3, :3].copy()
scale[0] = vector_norm(row[0])
row[0] /= scale[0]
shear[0] = numpy.dot(row[0], row[1])
row[1] -= row[0] * shear[0]
scale[1] = vector_norm(row[1])
row[1] /= scale[1]
shear[0] /= scale[1]
shear[1] = numpy.dot(row[0], row[2])
row[2] -= row[0] * shear[1]
shear[2] = numpy.dot(row[1], row[2])
row[2] -= row[1] * shear[2]
scale[2] = vector_norm(row[2])
row[2] /= scale[2]
shear[1:] /= scale[2]
if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0:
numpy.negative(scale, scale)
numpy.negative(row, row)
angles[1] = math.asin(-row[0, 2])
if math.cos(angles[1]):
angles[0] = math.atan2(row[1, 2], row[2, 2])
angles[2] = math.atan2(row[0, 1], row[0, 0])
else:
#angles[0] = math.atan2(row[1, 0], row[1, 1])
angles[0] = math.atan2(-row[2, 1], row[1, 1])
angles[2] = 0.0
return scale, shear, angles, translate, perspective
def compose_matrix(scale=None, shear=None, angles=None, translate=None,
perspective=None):
"""Return transformation matrix from sequence of transformations.
This is the inverse of the decompose_matrix function.
Sequence of transformations:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
>>> scale = numpy.random.random(3) - 0.5
>>> shear = numpy.random.random(3) - 0.5
>>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi)
>>> trans = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(4) - 0.5
>>> M0 = compose_matrix(scale, shear, angles, trans, persp)
>>> result = decompose_matrix(M0)
>>> M1 = compose_matrix(*result)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.identity(4)
if perspective is not None:
P = numpy.identity(4)
P[3, :] = perspective[:4]
M = numpy.dot(M, P)
if translate is not None:
T = numpy.identity(4)
T[:3, 3] = translate[:3]
M = numpy.dot(M, T)
if angles is not None:
R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
M = numpy.dot(M, R)
if shear is not None:
Z = numpy.identity(4)
Z[1, 2] = shear[2]
Z[0, 2] = shear[1]
Z[0, 1] = shear[0]
M = numpy.dot(M, Z)
if scale is not None:
S = numpy.identity(4)
S[0, 0] = scale[0]
S[1, 1] = scale[1]
S[2, 2] = scale[2]
M = numpy.dot(M, S)
M /= M[3, 3]
return M
def orthogonalization_matrix(lengths, angles):
"""Return orthogonalization matrix for crystallographic cell coordinates.
Angles are expected in degrees.
The de-orthogonalization matrix is the inverse.
>>> O = orthogonalization_matrix([10, 10, 10], [90, 90, 90])
>>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10)
True
>>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7])
>>> numpy.allclose(numpy.sum(O), 43.063229)
True
"""
a, b, c = lengths
angles = numpy.radians(angles)
sina, sinb, _ = numpy.sin(angles)
cosa, cosb, cosg = numpy.cos(angles)
co = (cosa * cosb - cosg) / (sina * sinb)
return numpy.array([
[ a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0],
[-a*sinb*co, b*sina, 0.0, 0.0],
[ a*cosb, b*cosa, c, 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True):
"""Return affine transform matrix to register two point sets.
v0 and v1 are shape (ndims, \*) arrays of at least ndims non-homogeneous
coordinates, where ndims is the dimensionality of the coordinate space.
If shear is False, a similarity transformation matrix is returned.
If also scale is False, a rigid/Euclidean transformation matrix
is returned.
By default the algorithm by Hartley and Zissermann [15] is used.
If usesvd is True, similarity and Euclidean transformation matrices
are calculated by minimizing the weighted sum of squared deviations
(RMSD) according to the algorithm by Kabsch [8].
Otherwise, and if ndims is 3, the quaternion based algorithm by Horn [9]
is used, which is slower when using this Python implementation.
The returned matrix performs rotation, translation and uniform scaling
(if specified).
>>> v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]]
>>> v1 = [[675, 826, 826, 677], [55, 52, 281, 277]]
>>> affine_matrix_from_points(v0, v1)
array([[ 0.14549, 0.00062, 675.50008],
[ 0.00048, 0.14094, 53.24971],
[ 0. , 0. , 1. ]])
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> S = scale_matrix(random.random())
>>> M = concatenate_matrices(T, R, S)
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-8, 300).reshape(3, -1)
>>> M = affine_matrix_from_points(v0[:3], v1[:3])
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
More examples in superimposition_matrix()
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=True)
v1 = numpy.array(v1, dtype=numpy.float64, copy=True)
ndims = v0.shape[0]
if ndims < 2 or v0.shape[1] < ndims or v0.shape != v1.shape:
raise ValueError("input arrays are of wrong shape or type")
# move centroids to origin
t0 = -numpy.mean(v0, axis=1)
M0 = numpy.identity(ndims+1)
M0[:ndims, ndims] = t0
v0 += t0.reshape(ndims, 1)
t1 = -numpy.mean(v1, axis=1)
M1 = numpy.identity(ndims+1)
M1[:ndims, ndims] = t1
v1 += t1.reshape(ndims, 1)
if shear:
# Affine transformation
A = numpy.concatenate((v0, v1), axis=0)
u, s, vh = numpy.linalg.svd(A.T)
vh = vh[:ndims].T
B = vh[:ndims]
C = vh[ndims:2*ndims]
t = numpy.dot(C, numpy.linalg.pinv(B))
t = numpy.concatenate((t, numpy.zeros((ndims, 1))), axis=1)
M = numpy.vstack((t, ((0.0,)*ndims) + (1.0,)))
elif usesvd or ndims != 3:
# Rigid transformation via SVD of covariance matrix
u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T))
# rotation matrix from SVD orthonormal bases
R = numpy.dot(u, vh)
if numpy.linalg.det(R) < 0.0:
# R does not constitute right handed system
R -= numpy.outer(u[:, ndims-1], vh[ndims-1, :]*2.0)
s[-1] *= -1.0
# homogeneous transformation matrix
M = numpy.identity(ndims+1)
M[:ndims, :ndims] = R
else:
# Rigid transformation matrix via quaternion
# compute symmetric matrix N
xx, yy, zz = numpy.sum(v0 * v1, axis=1)
xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1)
xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1)
N = [[xx+yy+zz, 0.0, 0.0, 0.0],
[yz-zy, xx-yy-zz, 0.0, 0.0],
[zx-xz, xy+yx, yy-xx-zz, 0.0],
[xy-yx, zx+xz, yz+zy, zz-xx-yy]]
# quaternion: eigenvector corresponding to most positive eigenvalue
w, V = numpy.linalg.eigh(N)
q = V[:, numpy.argmax(w)]
q /= vector_norm(q) # unit quaternion
# homogeneous transformation matrix
M = quaternion_matrix(q)
if scale and not shear:
# Affine transformation; scale is ratio of RMS deviations from centroid
v0 *= v0
v1 *= v1
M[:ndims, :ndims] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0))
# move centroids back
M = numpy.dot(numpy.linalg.inv(M1), numpy.dot(M, M0))
M /= M[ndims, ndims]
return M
def superimposition_matrix(v0, v1, scale=False, usesvd=True):
"""Return matrix to transform given 3D point set into second point set.
v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 points.
The parameters scale and usesvd are explained in the more general
affine_matrix_from_points function.
The returned matrix is a similarity or Euclidean transformation matrix.
This function has a fast C implementation in transformations.c.
>>> v0 = numpy.random.rand(3, 10)
>>> M = superimposition_matrix(v0, v0)
>>> numpy.allclose(M, numpy.identity(4))
True
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> v0 = [[1,0,0], [0,1,0], [0,0,1], [1,1,1]]
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> S = scale_matrix(random.random())
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> M = concatenate_matrices(T, R, S)
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-9, 300).reshape(3, -1)
>>> M = superimposition_matrix(v0, v1, scale=True)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v = numpy.empty((4, 100, 3))
>>> v[:, :, 0] = v0
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0]))
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3]
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3]
return affine_matrix_from_points(v0, v1, shear=False,
scale=scale, usesvd=usesvd)
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> numpy.allclose(numpy.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> numpy.allclose(numpy.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci*ck, ci*sk
sc, ss = si*ck, si*sk
M = numpy.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj*si
M[i, k] = sj*ci
M[j, i] = sj*sk
M[j, j] = -cj*ss+cc
M[j, k] = -cj*cs-sc
M[k, i] = -sj*ck
M[k, j] = cj*sc+cs
M[k, k] = cj*cc-ss
else:
M[i, i] = cj*ck
M[i, j] = sj*sc-cs
M[i, k] = sj*cc+ss
M[j, i] = cj*sk
M[j, j] = sj*ss+cc
M[j, k] = sj*cs-sc
M[k, i] = -sj
M[k, j] = cj*si
M[k, k] = cj*ci
return M
def euler_from_matrix(matrix, axes='sxyz'):
"""Return Euler angles from rotation matrix for specified axis sequence.
axes : One of 24 axis sequences as string or encoded tuple
Note that many Euler angle triplets can describe one matrix.
>>> R0 = euler_matrix(1, 2, 3, 'syxz')
>>> al, be, ga = euler_from_matrix(R0, 'syxz')
>>> R1 = euler_matrix(al, be, ga, 'syxz')
>>> numpy.allclose(R0, R1)
True
>>> angles = (4*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R0 = euler_matrix(axes=axes, *angles)
... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))
... if not numpy.allclose(R0, R1): print(axes, "failed")
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]
if repetition:
sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])
if sy > _EPS:
ax = math.atan2( M[i, j], M[i, k])
ay = math.atan2( sy, M[i, i])
az = math.atan2( M[j, i], -M[k, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2( sy, M[i, i])
az = 0.0
else:
cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])
if cy > _EPS:
ax = math.atan2( M[k, j], M[k, k])
ay = math.atan2(-M[k, i], cy)
az = math.atan2( M[j, i], M[i, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2(-M[k, i], cy)
az = 0.0
if parity:
ax, ay, az = -ax, -ay, -az
if frame:
ax, az = az, ax
return ax, ay, az
def euler_from_quaternion(quaternion, axes='sxyz'):
"""Return Euler angles from quaternion for specified axis sequence.
>>> angles = euler_from_quaternion([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(angles, [0.123, 0, 0])
True
"""
return euler_from_matrix(quaternion_matrix(quaternion), axes)
def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> numpy.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435])
True
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis + 1
j = _NEXT_AXIS[i+parity-1] + 1
k = _NEXT_AXIS[i-parity] + 1
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci*ck
cs = ci*sk
sc = si*ck
ss = si*sk
q = numpy.empty((4, ))
if repetition:
q[0] = cj*(cc - ss)
q[i] = cj*(cs + sc)
q[j] = sj*(cc + ss)
q[k] = sj*(cs - sc)
else:
q[0] = cj*cc + sj*ss
q[i] = cj*sc - sj*cs
q[j] = cj*ss + sj*cc
q[k] = cj*cs - sj*sc
if parity:
q[j] *= -1.0
return q
def quaternion_about_axis(angle, axis):
"""Return quaternion for rotation about axis.
>>> q = quaternion_about_axis(0.123, [1, 0, 0])
>>> numpy.allclose(q, [0.99810947, 0.06146124, 0, 0])
True
"""
q = numpy.array([0.0, axis[0], axis[1], axis[2]])
qlen = vector_norm(q)
if qlen > _EPS:
q *= math.sin(angle/2.0) / qlen
q[0] = math.cos(angle/2.0)
return q
def quaternion_matrix(quaternion):
"""Return homogeneous rotation matrix from quaternion.
>>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0]))
True
>>> M = quaternion_matrix([1, 0, 0, 0])
>>> numpy.allclose(M, numpy.identity(4))
True
>>> M = quaternion_matrix([0, 1, 0, 0])
>>> numpy.allclose(M, numpy.diag([1, -1, -1, 1]))
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
n = numpy.dot(q, q)
if n < _EPS:
return numpy.identity(4)
q *= math.sqrt(2.0 / n)
q = numpy.outer(q, q)
return numpy.array([
[1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],
[ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],
[ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def quaternion_from_matrix(matrix, isprecise=False):
"""Return quaternion from rotation matrix.
If isprecise is True, the input matrix is assumed to be a precise rotation
matrix and a faster algorithm is used.
>>> q = quaternion_from_matrix(numpy.identity(4), True)
>>> numpy.allclose(q, [1, 0, 0, 0])
True
>>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1]))
>>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0])
True
>>> R = rotation_matrix(0.123, (1, 2, 3))
>>> q = quaternion_from_matrix(R, True)
>>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786])
True
>>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0],
... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611])
True
>>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0],
... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603])
True
>>> R = random_rotation_matrix()
>>> q = quaternion_from_matrix(R)
>>> is_same_transform(R, quaternion_matrix(q))
True
>>> R = euler_matrix(0.0, 0.0, numpy.pi/2.0)
>>> numpy.allclose(quaternion_from_matrix(R, isprecise=False),
... quaternion_from_matrix(R, isprecise=True))
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
if isprecise:
q = numpy.empty((4, ))
t = numpy.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 1, 2, 3
if M[1, 1] > M[0, 0]:
i, j, k = 2, 3, 1
if M[2, 2] > M[i, i]:
i, j, k = 3, 1, 2
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = numpy.array([[m00-m11-m22, 0.0, 0.0, 0.0],
[m01+m10, m11-m00-m22, 0.0, 0.0],
[m02+m20, m12+m21, m22-m00-m11, 0.0],
[m21-m12, m02-m20, m10-m01, m00+m11+m22]])
K /= 3.0
# quaternion is eigenvector of K that corresponds to largest eigenvalue
w, V = numpy.linalg.eigh(K)
q = V[[3, 0, 1, 2], numpy.argmax(w)]
if q[0] < 0.0:
numpy.negative(q, q)
return q
def quaternion_multiply(quaternion1, quaternion0):
"""Return multiplication of two quaternions.
>>> q = quaternion_multiply([4, 1, -2, 3], [8, -5, 6, 7])
>>> numpy.allclose(q, [28, -44, -14, 48])
True
"""
w0, x0, y0, z0 = quaternion0
w1, x1, y1, z1 = quaternion1
return numpy.array([-x1*x0 - y1*y0 - z1*z0 + w1*w0,
x1*w0 + y1*z0 - z1*y0 + w1*x0,
-x1*z0 + y1*w0 + z1*x0 + w1*y0,
x1*y0 - y1*x0 + z1*w0 + w1*z0], dtype=numpy.float64)
def quaternion_conjugate(quaternion):
"""Return conjugate of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_conjugate(q0)
>>> q1[0] == q0[0] and all(q1[1:] == -q0[1:])
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q
def quaternion_inverse(quaternion):
"""Return inverse of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_inverse(q0)
>>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0])
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q / numpy.dot(q, q)
def quaternion_real(quaternion):
"""Return real part of quaternion.
>>> quaternion_real([3, 0, 1, 2])
3.0
"""
return float(quaternion[0])
def quaternion_imag(quaternion):
"""Return imaginary part of quaternion.
>>> quaternion_imag([3, 0, 1, 2])
array([ 0., 1., 2.])
"""
return numpy.array(quaternion[1:4], dtype=numpy.float64, copy=True)
def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True):
"""Return spherical linear interpolation between two quaternions.
>>> q0 = random_quaternion()
>>> q1 = random_quaternion()
>>> q = quaternion_slerp(q0, q1, 0)
>>> numpy.allclose(q, q0)
True
>>> q = quaternion_slerp(q0, q1, 1, 1)
>>> numpy.allclose(q, q1)
True
>>> q = quaternion_slerp(q0, q1, 0.5)
>>> angle = math.acos(numpy.dot(q0, q))
>>> numpy.allclose(2, math.acos(numpy.dot(q0, q1)) / angle) or \
numpy.allclose(2, math.acos(-numpy.dot(q0, q1)) / angle)
True
"""
q0 = unit_vector(quat0[:4])
q1 = unit_vector(quat1[:4])
if fraction == 0.0:
return q0
elif fraction == 1.0:
return q1
d = numpy.dot(q0, q1)
if abs(abs(d) - 1.0) < _EPS:
return q0
if shortestpath and d < 0.0:
# invert rotation
d = -d
numpy.negative(q1, q1)
angle = math.acos(d) + spin * math.pi
if abs(angle) < _EPS:
return q0
isin = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * isin
q1 *= math.sin(fraction * angle) * isin
q0 += q1
return q0
def random_quaternion(rand=None):
"""Return uniform random unit quaternion.
rand: array like or None
Three independent random variables that are uniformly distributed
between 0 and 1.
>>> q = random_quaternion()
>>> numpy.allclose(1, vector_norm(q))
True
>>> q = random_quaternion(numpy.random.random(3))
>>> len(q.shape), q.shape[0]==4
(1, True)
"""
if rand is None:
rand = numpy.random.rand(3)
else:
assert len(rand) == 3
r1 = numpy.sqrt(1.0 - rand[0])
r2 = numpy.sqrt(rand[0])
pi2 = math.pi * 2.0
t1 = pi2 * rand[1]
t2 = pi2 * rand[2]
return numpy.array([numpy.cos(t2)*r2, numpy.sin(t1)*r1,
numpy.cos(t1)*r1, numpy.sin(t2)*r2])
def random_rotation_matrix(rand=None):
"""Return uniform random rotation matrix.
rand: array like
Three independent random variables that are uniformly distributed
between 0 and 1 for each returned quaternion.
>>> R = random_rotation_matrix()
>>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4))
True
"""
return quaternion_matrix(random_quaternion(rand))
class Arcball(object):
"""Virtual Trackball Control.
>>> ball = Arcball()
>>> ball = Arcball(initial=numpy.identity(4))
>>> ball.place([320, 320], 320)
>>> ball.down([500, 250])
>>> ball.drag([475, 275])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 3.90583455)
True
>>> ball = Arcball(initial=[1, 0, 0, 0])
>>> ball.place([320, 320], 320)
>>> ball.setaxes([1, 1, 0], [-1, 1, 0])
>>> ball.constrain = True
>>> ball.down([400, 200])
>>> ball.drag([200, 400])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 0.2055924)
True
>>> ball.next()
"""
def __init__(self, initial=None):
"""Initialize virtual trackball control.
initial : quaternion or rotation matrix
"""
self._axis = None
self._axes = None
self._radius = 1.0
self._center = [0.0, 0.0]
self._vdown = numpy.array([0.0, 0.0, 1.0])
self._constrain = False
if initial is None:
self._qdown = numpy.array([1.0, 0.0, 0.0, 0.0])
else:
initial = numpy.array(initial, dtype=numpy.float64)
if initial.shape == (4, 4):
self._qdown = quaternion_from_matrix(initial)
elif initial.shape == (4, ):
initial /= vector_norm(initial)
self._qdown = initial
else:
raise ValueError("initial not a quaternion or matrix")
self._qnow = self._qpre = self._qdown
def place(self, center, radius):
"""Place Arcball, e.g. when window size changes.
center : sequence[2]
Window coordinates of trackball center.
radius : float
Radius of trackball in window coordinates.
"""
self._radius = float(radius)
self._center[0] = center[0]
self._center[1] = center[1]
def setaxes(self, *axes):
"""Set axes to constrain rotations."""
if axes is None:
self._axes = None
else:
self._axes = [unit_vector(axis) for axis in axes]
@property
def constrain(self):
"""Return state of constrain to axis mode."""
return self._constrain
@constrain.setter
def constrain(self, value):
"""Set state of constrain to axis mode."""
self._constrain = bool(value)
def down(self, point):
"""Set initial cursor window coordinates and pick constrain-axis."""
self._vdown = arcball_map_to_sphere(point, self._center, self._radius)
self._qdown = self._qpre = self._qnow
if self._constrain and self._axes is not None:
self._axis = arcball_nearest_axis(self._vdown, self._axes)
self._vdown = arcball_constrain_to_axis(self._vdown, self._axis)
else:
self._axis = None
def drag(self, point):
"""Update current cursor window coordinates."""
vnow = arcball_map_to_sphere(point, self._center, self._radius)
if self._axis is not None:
vnow = arcball_constrain_to_axis(vnow, self._axis)
self._qpre = self._qnow
t = numpy.cross(self._vdown, vnow)
if numpy.dot(t, t) < _EPS:
self._qnow = self._qdown
else:
q = [numpy.dot(self._vdown, vnow), t[0], t[1], t[2]]
self._qnow = quaternion_multiply(q, self._qdown)
def next(self, acceleration=0.0):
"""Continue rotation in direction of last drag."""
q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False)
self._qpre, self._qnow = self._qnow, q
def matrix(self):
"""Return homogeneous rotation matrix."""
return quaternion_matrix(self._qnow)
def arcball_map_to_sphere(point, center, radius):
"""Return unit sphere coordinates from window coordinates."""
v0 = (point[0] - center[0]) / radius
v1 = (center[1] - point[1]) / radius
n = v0*v0 + v1*v1
if n > 1.0:
# position outside of sphere
n = math.sqrt(n)
return numpy.array([v0/n, v1/n, 0.0])
else:
return numpy.array([v0, v1, math.sqrt(1.0 - n)])
def arcball_constrain_to_axis(point, axis):
"""Return sphere point perpendicular to axis."""
v = numpy.array(point, dtype=numpy.float64, copy=True)
a = numpy.array(axis, dtype=numpy.float64, copy=True)
v -= a * numpy.dot(a, v) # on plane
n = vector_norm(v)
if n > _EPS:
if v[2] < 0.0:
numpy.negative(v, v)
v /= n
return v
if a[2] == 1.0:
return numpy.array([1.0, 0.0, 0.0])
return unit_vector([-a[1], a[0], 0.0])
def arcball_nearest_axis(point, axes):
"""Return axis, which arc is nearest to point."""
point = numpy.array(point, dtype=numpy.float64, copy=False)
nearest = None
mx = -1.0
for axis in axes:
t = numpy.dot(arcball_constrain_to_axis(point, axis), point)
if t > mx:
nearest = axis
mx = t
return nearest
# epsilon for testing whether a number is close to zero
_EPS = numpy.finfo(float).eps * 4.0
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
# map axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())
def vector_norm(data, axis=None, out=None):
"""Return length, i.e. Euclidean norm, of ndarray along axis.
>>> v = numpy.random.random(3)
>>> n = vector_norm(v)
>>> numpy.allclose(n, numpy.linalg.norm(v))
True
>>> v = numpy.random.rand(6, 5, 3)
>>> n = vector_norm(v, axis=-1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))
True
>>> n = vector_norm(v, axis=1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> v = numpy.random.rand(5, 4, 3)
>>> n = numpy.empty((5, 3))
>>> vector_norm(v, axis=1, out=n)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> vector_norm([])
0.0
>>> vector_norm([1])
1.0
"""
data = numpy.array(data, dtype=numpy.float64, copy=True)
if out is None:
if data.ndim == 1:
return math.sqrt(numpy.dot(data, data))
data *= data
out = numpy.atleast_1d(numpy.sum(data, axis=axis))
numpy.sqrt(out, out)
return out
else:
data *= data
numpy.sum(data, axis=axis, out=out)
numpy.sqrt(out, out)
def unit_vector(data, axis=None, out=None):
"""Return ndarray normalized by length, i.e. Euclidean norm, along axis.
>>> v0 = numpy.random.random(3)
>>> v1 = unit_vector(v0)
>>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))
True
>>> v0 = numpy.random.rand(5, 4, 3)
>>> v1 = unit_vector(v0, axis=-1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)
>>> numpy.allclose(v1, v2)
True
>>> v1 = unit_vector(v0, axis=1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)
>>> numpy.allclose(v1, v2)
True
>>> v1 = numpy.empty((5, 4, 3))
>>> unit_vector(v0, axis=1, out=v1)
>>> numpy.allclose(v1, v2)
True
>>> list(unit_vector([]))
[]
>>> list(unit_vector([1]))
[1.0]
"""
if out is None:
data = numpy.array(data, dtype=numpy.float64, copy=True)
if data.ndim == 1:
data /= math.sqrt(numpy.dot(data, data))
return data
else:
if out is not data:
out[:] = numpy.array(data, copy=False)
data = out
length = numpy.atleast_1d(numpy.sum(data*data, axis))
numpy.sqrt(length, length)
if axis is not None:
length = numpy.expand_dims(length, axis)
data /= length
if out is None:
return data
def random_vector(size):
"""Return array of random doubles in the half-open interval [0.0, 1.0).
>>> v = random_vector(10000)
>>> numpy.all(v >= 0) and numpy.all(v < 1)
True
>>> v0 = random_vector(10)
>>> v1 = random_vector(10)
>>> numpy.any(v0 == v1)
False
"""
return numpy.random.random(size)
def vector_product(v0, v1, axis=0):
"""Return vector perpendicular to vectors.
>>> v = vector_product([2, 0, 0], [0, 3, 0])
>>> numpy.allclose(v, [0, 0, 6])
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> v = vector_product(v0, v1)
>>> numpy.allclose(v, [[0, 0, 0, 0], [0, 0, 6, 6], [0, -6, 0, -6]])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> v = vector_product(v0, v1, axis=1)
>>> numpy.allclose(v, [[0, 0, 6], [0, -6, 0], [6, 0, 0], [0, -6, 6]])
True
"""
return numpy.cross(v0, v1, axis=axis)
def angle_between_vectors(v0, v1, directed=True, axis=0):
"""Return angle between vectors.
If directed is False, the input vectors are interpreted as undirected axes,
i.e. the maximum angle is pi/2.
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3])
>>> numpy.allclose(a, math.pi)
True
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False)
>>> numpy.allclose(a, 0)
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> a = angle_between_vectors(v0, v1)
>>> numpy.allclose(a, [0, 1.5708, 1.5708, 0.95532])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> a = angle_between_vectors(v0, v1, axis=1)
>>> numpy.allclose(a, [1.5708, 1.5708, 1.5708, 0.95532])
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)
dot = numpy.sum(v0 * v1, axis=axis)
dot /= vector_norm(v0, axis=axis) * vector_norm(v1, axis=axis)
return numpy.arccos(dot if directed else numpy.fabs(dot))
def inverse_matrix(matrix):
"""Return inverse of square transformation matrix.
>>> M0 = random_rotation_matrix()
>>> M1 = inverse_matrix(M0.T)
>>> numpy.allclose(M1, numpy.linalg.inv(M0.T))
True
>>> for size in range(1, 7):
... M0 = numpy.random.rand(size, size)
... M1 = inverse_matrix(M0)
... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print(size)
"""
return numpy.linalg.inv(matrix)
def concatenate_matrices(*matrices):
"""Return concatenation of series of transformation matrices.
>>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5
>>> numpy.allclose(M, concatenate_matrices(M))
True
>>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))
True
"""
M = numpy.identity(4)
for i in matrices:
M = numpy.dot(M, i)
return M
def is_same_transform(matrix0, matrix1):
"""Return True if two matrices perform same transformation.
>>> is_same_transform(numpy.identity(4), numpy.identity(4))
True
>>> is_same_transform(numpy.identity(4), random_rotation_matrix())
False
"""
matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True)
matrix0 /= matrix0[3, 3]
matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True)
matrix1 /= matrix1[3, 3]
return numpy.allclose(matrix0, matrix1)
def _import_module(name, package=None, warn=True, prefix='_py_', ignore='_'):
"""Try import all public attributes from module into global namespace.
Existing attributes with name clashes are renamed with prefix.
Attributes starting with underscore are ignored by default.
Return True on successful import.
"""
import warnings
from importlib import import_module
try:
if not package:
module = import_module(name)
else:
module = import_module('.' + name, package=package)
except ImportError:
if warn:
warnings.warn("failed to import module %s" % name)
else:
for attr in dir(module):
if ignore and attr.startswith(ignore):
continue
if prefix:
if attr in globals():
globals()[prefix + attr] = globals()[attr]
elif warn:
warnings.warn("no Python implementation of " + attr)
globals()[attr] = getattr(module, attr)
return True
_import_module('transformations')
if __name__ == "__main__":
import doctest
import random # used in doctests
numpy.set_printoptions(suppress=True, precision=5)
doctest.testmod()
|
mit
| -8,569,947,633,209,522,000
| 33.410109
| 79
| 0.579559
| false
| 3.015619
| false
| false
| false
|
drivnal/drivnal
|
drivnal/handlers/task.py
|
1
|
1800
|
from drivnal.constants import *
from drivnal.client import Client
from drivnal.task import Task
from drivnal.event import Event
import drivnal.utils as utils
from drivnal import server
import os
import flask
import time
@server.app.route('/task/<volume_id>', methods=['GET'])
def task_get(volume_id):
client = Client()
volume = client.get_volume(volume_id)
if not volume:
return utils.jsonify({
'error': VOLUME_NOT_FOUND,
'error_msg': VOLUME_NOT_FOUND_MSG,
}, 404)
tasks = []
for task in reversed(volume.get_tasks()):
task_data = {
'id': task.id,
'volume': volume.id,
'volume_name': volume.name,
'type': task.type,
'state': task.state,
'time': task.time,
'has_log': False,
}
if task.log_path:
if os.path.isfile(task.log_path):
task_data['has_log'] = True
if task.snapshot_id:
task_data['snapshot_id'] = task.snapshot_id
tasks.append(task_data)
return utils.jsonify(tasks)
@server.app.route('/task/<volume_id>/<task_id>', methods=['PUT'])
def task_put(volume_id, task_id):
client = Client()
volume = client.get_volume(volume_id)
if not volume:
return utils.jsonify({
'error': VOLUME_NOT_FOUND,
'error_msg': VOLUME_NOT_FOUND_MSG,
}, 404)
task = Task(id=task_id.encode())
if 'abort' in flask.request.json and flask.request.json['abort']:
task.abort()
return utils.jsonify({})
@server.app.route('/task/<volume_id>/<task_id>', methods=['DELETE'])
def task_delete(volume_id, task_id):
client = Client()
task = Task(id=task_id.encode())
task.remove()
return utils.jsonify({})
|
agpl-3.0
| 4,385,070,478,506,404,400
| 24.714286
| 69
| 0.583889
| false
| 3.468208
| false
| false
| false
|
coll-gate/collgate
|
server/settings/staging.py
|
1
|
4562
|
# -*- coding: utf-8; -*-
#
# @file staging.py
# @brief Staging specific settings.
# @author Frédéric SCHERMA (INRA UMR1095)
# @date 2016-09-01
# @copyright Copyright (c) 2016 INRA/CIRAD
# @license MIT (see LICENSE file)
# @details
import os
from .base import *
DEBUG = False
ADMINS = (
('admin_fscherma', 'frederic.scherma@inra.fr'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'HOST': '',
'PORT': '',
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'collgate',
'USER': 'collgate',
'PASSWORD': 'collgate',
'CONN_MAX_AGE': 86400
}
}
ALLOWED_HOSTS = ['staging.gdec.clermont.inra.fr', 'localhost', '127.0.0.1']
# session cookie path
SESSION_COOKIE_PATH = "/coll-gate/"
# CRSF cookie path
CSRF_COOKIE_PATH = "/coll-gate/"
MAX_UPLOAD_SIZE = 3145728 # 3Mio
CONTENT_TYPES = ['text/plain']
MEDIA_URL = 'media/'
STATIC_ROOT = 'static/'
STATIC_URL = '/coll-gate/static/'
TEMPLATES[0]['OPTIONS']['debug'] = False
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
# 'debug_panel.middleware.DebugPanelMiddleware',
'igdectk.rest.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'igdectk.rest.restmiddleware.RestMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
INSTALLED_APPS = (
'bootstrap3',
'django.contrib.postgres',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'guardian',
'igdectk.common',
'igdectk.jquery',
'igdectk.bootstrap',
'main',
'messenger',
'audit',
'permission',
'descriptor',
'medialibrary',
'geonames',
'geolocation',
'organisation',
'classification',
'accession',
'printer'
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'()': 'logging.Formatter',
'format': '[%(asctime)s] <%(levelname)s> %(name)s : %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
},
'colored': {
'()': 'igdectk.common.logging.ColoredFormatter',
'format': '[%(asctime)s] <%(levelname)s> %(name)s : %(message)s',
'datefmt': '%d/%b/%Y %H:%M:%S',
}
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'colored',
},
'file': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, '..', '..', 'logs', 'collgate.log'),
'formatter': 'standard',
'maxBytes': 1024*1024*16, # 16MB
'backupCount': 10,
},
},
'loggers': {
'django': {
'handlers': ['console', 'file'],
'level': 'WARNING',
'propagate': True,
},
'django.request': {
'handlers': ['mail_admins', 'console'],
'level': 'WARNING',
'propagate': True,
},
'collgate': {
'handlers': ['console', 'file'],
'level': 'DEBUG',
'propagate': True,
},
'igdectk': {
'handlers': ['console', 'file'],
'level': 'DEBUG',
'propagate': True,
}
}
}
DEFAULT_FROM_EMAIL = "frederic.scherma@inra.fr"
EMAIL_HOST = "smtp.clermont.inra.fr"
#EMAIL_USE_TLS = True
EMAIL_PORT = 25 # 465
EMAIL_HOST_USER = "fscherma"
EMAIL_HOST_PASSWORD = ""
#EMAIL_USE_SSL = True
APPLICATIONS['geonames'] = {
'DB_DEFAULT_SETTINGS': {
'geonames_username': "demo",
}
}
APPLICATIONS['medialibrary'] = {
'DB_DEFAULT_SETTINGS': {
'storage_location': "/coll-gate/media",
'storage_path': "/var/lib/collgate/media"
}
}
|
mit
| 8,841,889,773,940,665,000
| 24.47486
| 83
| 0.555702
| false
| 3.418291
| false
| false
| false
|
katacarbix/pyhp
|
resources/filebrowser.py
|
1
|
1330
|
<?py
import os
from datetime import datetime
path = postvars['path']
dir = postvars['dir']
def fsize(num, suffix='B'):
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1024.0:
return "%3.0f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Y', suffix)
?>
<html>
<head>
<title>Index of <?py print dir ?></title>
<style>
td, th {
padding: 0px 10px;
text-align: left;
}
</style>
</head>
<body>
<h1>Index of <?py print dir ?></h1>
<table border=0>
<tr>
<th style="min-width:200px">Name</th>
<th>Type</th>
<th>Last Modified</th>
<th>Size</th>
</tr>
<tr><th colspan=4><hr></th></tr>
<?py
tree = os.listdir(path)
if dir != "/":
print "<tr>"
print "<td><a href=\"../\">Parent Directory</a></td>"
print "</tr>"
for branch in tree:
branch = str(branch)
print "<tr>"
print "<td><a href=\""+dir+branch+['','/'][os.path.isdir(path+branch)]+"\">"+ branch +"</a></td>"
if os.path.isdir(path+branch):
print "<td>dir</td>"
elif os.path.isfile(path+branch):
print "<td>file</td>"
else:
print "<td>-</td>"
print "<td>"+ datetime.fromtimestamp(os.path.getmtime(path+branch)).isoformat() +"</td>"
print "<td>"+ fsize(os.path.getsize(path+branch)) +"</td>"
print "</tr>"
?>
<tr><th colspan=4><hr></th></tr>
</table>
</body>
</html>
|
gpl-2.0
| 1,932,784,922,014,772,200
| 22.350877
| 98
| 0.558647
| false
| 2.449355
| false
| false
| false
|
MediaKraken/MediaKraken_Deployment
|
docker/alpine/ComposeMediaKrakenLDAP/root/app/nginx-ldap-auth-daemon.py
|
1
|
12042
|
#!/bin/sh
''''[ -z $LOG ] && export LOG=/dev/stdout # '''
''''which python2 >/dev/null && exec python2 -u "$0" "$@" >> $LOG 2>&1 # '''
''''which python >/dev/null && exec python -u "$0" "$@" >> $LOG 2>&1 # '''
# Copyright (C) 2014-2015 Nginx, Inc.
# Copyright (C) 2018 LinuxServer.io
import sys, os, signal, base64, ldap, Cookie, argparse
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from cryptography.fernet import Fernet
from cryptography.fernet import InvalidToken
#Listen = ('localhost', 8888)
#Listen = "/tmp/auth.sock" # Also uncomment lines in 'Requests are
# processed with UNIX sockets' section below
# -----------------------------------------------------------------------------
# Different request processing models: select one
# -----------------------------------------------------------------------------
# Requests are processed in separate thread
import threading
from SocketServer import ThreadingMixIn
class AuthHTTPServer(ThreadingMixIn, HTTPServer):
pass
# -----------------------------------------------------------------------------
# Requests are processed in separate process
#from SocketServer import ForkingMixIn
#class AuthHTTPServer(ForkingMixIn, HTTPServer):
# pass
# -----------------------------------------------------------------------------
# Requests are processed with UNIX sockets
#import threading
#from SocketServer import ThreadingUnixStreamServer
#class AuthHTTPServer(ThreadingUnixStreamServer, HTTPServer):
# pass
# -----------------------------------------------------------------------------
class AuthHandler(BaseHTTPRequestHandler):
# Return True if request is processed and response sent, otherwise False
# Set ctx['user'] and ctx['pass'] for authentication
def do_GET(self):
ctx = self.ctx
ctx['action'] = 'input parameters check'
for k, v in self.get_params().items():
ctx[k] = self.headers.get(v[0], v[1])
if ctx[k] == None:
self.auth_failed(ctx, 'required "%s" header was not passed' % k)
return True
ctx['action'] = 'performing authorization'
auth_header = self.headers.get('Authorization')
auth_cookie = self.get_cookie(ctx['cookiename'])
if auth_cookie != None and auth_cookie != '':
auth_header = "Basic " + auth_cookie
self.log_message("using username/password from cookie %s" %
ctx['cookiename'])
else:
self.log_message("using username/password from authorization header")
if auth_header is None or not auth_header.lower().startswith('basic '):
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm="' + ctx['realm'] + '"')
self.send_header('Cache-Control', 'no-cache')
self.end_headers()
return True
ctx['action'] = 'decoding credentials'
try:
cipher_suite = Fernet('REPLACEWITHFERNETKEY')
self.log_message('Trying to dechipher credentials...')
auth_decoded = cipher_suite.decrypt(auth_header[6:])
user, passwd = auth_decoded.split(':', 1)
except InvalidToken:
self.log_message('Incorrect token. Trying to decode credentials from BASE64...')
auth_decoded = base64.b64decode(auth_header[6:])
user, passwd = auth_decoded.split(':', 1)
except Exception as e:
self.auth_failed(ctx)
self.log_error(e)
return True
ctx['user'] = user
ctx['pass'] = passwd
# Continue request processing
return False
def get_cookie(self, name):
cookies = self.headers.get('Cookie')
if cookies:
authcookie = Cookie.BaseCookie(cookies).get(name)
if authcookie:
return authcookie.value
else:
return None
else:
return None
# Log the error and complete the request with appropriate status
def auth_failed(self, ctx, errmsg = None):
msg = 'Error while ' + ctx['action']
if errmsg:
msg += ': ' + errmsg
ex, value, trace = sys.exc_info()
if ex != None:
msg += ": " + str(value)
if ctx.get('url'):
msg += ', server="%s"' % ctx['url']
if ctx.get('user'):
msg += ', login="%s"' % ctx['user']
self.log_error(msg)
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm="' + ctx['realm'] + '"')
self.send_header('Cache-Control', 'no-cache')
self.end_headers()
def get_params(self):
return {}
def log_message(self, format, *args):
if len(self.client_address) > 0:
addr = BaseHTTPRequestHandler.address_string(self)
else:
addr = "-"
if not hasattr(self, 'ctx'):
user = '-'
else:
user = self.ctx['user']
sys.stdout.write("%s - %s [%s] %s\n" % (addr, user,
self.log_date_time_string(), format % args))
def log_error(self, format, *args):
self.log_message(format, *args)
# Verify username/password against LDAP server
class LDAPAuthHandler(AuthHandler):
# Parameters to put into self.ctx from the HTTP header of auth request
params = {
# parameter header default
'realm': ('X-Ldap-Realm', 'Restricted'),
'url': ('X-Ldap-URL', None),
'starttls': ('X-Ldap-Starttls', 'false'),
'basedn': ('X-Ldap-BaseDN', None),
'template': ('X-Ldap-Template', '(cn=%(username)s)'),
'binddn': ('X-Ldap-BindDN', ''),
'bindpasswd': ('X-Ldap-BindPass', ''),
'cookiename': ('X-CookieName', '')
}
@classmethod
def set_params(cls, params):
cls.params = params
def get_params(self):
return self.params
# GET handler for the authentication request
def do_GET(self):
ctx = dict()
self.ctx = ctx
ctx['action'] = 'initializing basic auth handler'
ctx['user'] = '-'
if AuthHandler.do_GET(self):
# request already processed
return
ctx['action'] = 'empty password check'
if not ctx['pass']:
self.auth_failed(ctx, 'attempt to use empty password')
return
try:
# check that uri and baseDn are set
# either from cli or a request
if not ctx['url']:
self.log_message('LDAP URL is not set!')
return
if not ctx['basedn']:
self.log_message('LDAP baseDN is not set!')
return
ctx['action'] = 'initializing LDAP connection'
ldap_obj = ldap.initialize(ctx['url']);
# Python-ldap module documentation advises to always
# explicitely set the LDAP version to use after running
# initialize() and recommends using LDAPv3. (LDAPv2 is
# deprecated since 2003 as per RFC3494)
#
# Also, the STARTTLS extension requires the
# use of LDAPv3 (RFC2830).
ldap_obj.protocol_version=ldap.VERSION3
# Establish a STARTTLS connection if required by the
# headers.
if ctx['starttls'] == 'true':
ldap_obj.start_tls_s()
# See http://www.python-ldap.org/faq.shtml
# uncomment, if required
# ldap_obj.set_option(ldap.OPT_REFERRALS, 0)
ctx['action'] = 'binding as search user'
ldap_obj.bind_s(ctx['binddn'], ctx['bindpasswd'], ldap.AUTH_SIMPLE)
ctx['action'] = 'preparing search filter'
searchfilter = ctx['template'] % { 'username': ctx['user'] }
self.log_message(('searching on server "%s" with base dn ' + \
'"%s" with filter "%s"') %
(ctx['url'], ctx['basedn'], searchfilter))
ctx['action'] = 'running search query'
results = ldap_obj.search_s(ctx['basedn'], ldap.SCOPE_SUBTREE,
searchfilter, ['objectclass'], 1)
ctx['action'] = 'verifying search query results'
if len(results) < 1:
self.auth_failed(ctx, 'no objects found')
return
ctx['action'] = 'binding as an existing user'
ldap_dn = results[0][0]
ctx['action'] += ' "%s"' % ldap_dn
ldap_obj.bind_s(ldap_dn, ctx['pass'], ldap.AUTH_SIMPLE)
self.log_message('Auth OK for user "%s"' % (ctx['user']))
# Successfully authenticated user
self.send_response(200)
self.end_headers()
except Exception as e:
self.auth_failed(ctx)
self.log_error(str(e))
raise
def exit_handler(signal, frame):
global Listen
if isinstance(Listen, basestring):
try:
os.unlink(Listen)
except:
ex, value, trace = sys.exc_info()
sys.stderr.write('Failed to remove socket "%s": %s\n' %
(Listen, str(value)))
sys.stderr.flush()
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Simple Nginx LDAP authentication helper.""")
# Group for listen options:
group = parser.add_argument_group("Listen options")
group.add_argument('--host', metavar="hostname",
default="localhost", help="host to bind (Default: localhost)")
group.add_argument('-p', '--port', metavar="port", type=int,
default=8888, help="port to bind (Default: 8888)")
# ldap options:
group = parser.add_argument_group(title="LDAP options")
group.add_argument('-u', '--url', metavar="URL",
default="ldap://localhost:389",
help=("LDAP URI to query (Default: ldap://localhost:389)"))
group.add_argument('-s', '--starttls', metavar="starttls",
default="false",
help=("Establish a STARTTLS protected session (Default: false)"))
group.add_argument('-b', metavar="baseDn", dest="basedn", default='',
help="LDAP base dn (Default: unset)")
group.add_argument('-D', metavar="bindDn", dest="binddn", default='',
help="LDAP bind DN (Default: anonymous)")
group.add_argument('-w', metavar="passwd", dest="bindpw", default='',
help="LDAP password for the bind DN (Default: unset)")
group.add_argument('-f', '--filter', metavar='filter',
default='(cn=%(username)s)',
help="LDAP filter (Default: cn=%%(username)s)")
# http options:
group = parser.add_argument_group(title="HTTP options")
group.add_argument('-R', '--realm', metavar='"Restricted Area"',
default="Restricted", help='HTTP auth realm (Default: "Restricted")')
group.add_argument('-c', '--cookie', metavar="cookiename",
default="", help="HTTP cookie name to set in (Default: unset)")
args = parser.parse_args()
global Listen
Listen = (args.host, args.port)
auth_params = {
'realm': ('X-Ldap-Realm', args.realm),
'url': ('X-Ldap-URL', args.url),
'starttls': ('X-Ldap-Starttls', args.starttls),
'basedn': ('X-Ldap-BaseDN', args.basedn),
'template': ('X-Ldap-Template', args.filter),
'binddn': ('X-Ldap-BindDN', args.binddn),
'bindpasswd': ('X-Ldap-BindPass', args.bindpw),
'cookiename': ('X-CookieName', args.cookie)
}
LDAPAuthHandler.set_params(auth_params)
server = AuthHTTPServer(Listen, LDAPAuthHandler)
signal.signal(signal.SIGINT, exit_handler)
signal.signal(signal.SIGTERM, exit_handler)
sys.stdout.write("Start listening on %s:%d...\n" % Listen)
sys.stdout.flush()
server.serve_forever()
|
gpl-3.0
| -7,031,179,566,730,927,000
| 35.93865
| 92
| 0.546836
| false
| 4.115516
| false
| false
| false
|
wisechengyi/pants
|
contrib/go/src/python/pants/contrib/go/register.py
|
1
|
2555
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.build_graph.build_file_aliases import BuildFileAliases, TargetMacro
from pants.goal.task_registrar import TaskRegistrar as task
from pants.contrib.go.targets.go_binary import GoBinary
from pants.contrib.go.targets.go_library import GoLibrary
from pants.contrib.go.targets.go_protobuf_library import GoProtobufLibrary
from pants.contrib.go.targets.go_remote_library import GoRemoteLibrary
from pants.contrib.go.targets.go_thrift_library import GoThriftLibrary
from pants.contrib.go.tasks.go_binary_create import GoBinaryCreate
from pants.contrib.go.tasks.go_buildgen import GoBuildgen
from pants.contrib.go.tasks.go_checkstyle import GoCheckstyle
from pants.contrib.go.tasks.go_compile import GoCompile
from pants.contrib.go.tasks.go_fetch import GoFetch
from pants.contrib.go.tasks.go_fmt import GoFmt
from pants.contrib.go.tasks.go_go import GoEnv, GoGo
from pants.contrib.go.tasks.go_protobuf_gen import GoProtobufGen
from pants.contrib.go.tasks.go_run import GoRun
from pants.contrib.go.tasks.go_test import GoTest
from pants.contrib.go.tasks.go_thrift_gen import GoThriftGen
def build_file_aliases():
return BuildFileAliases(
targets={
GoBinary.alias(): TargetMacro.Factory.wrap(GoBinary.create, GoBinary),
GoLibrary.alias(): TargetMacro.Factory.wrap(GoLibrary.create, GoLibrary),
GoProtobufLibrary.alias(): GoProtobufLibrary,
GoThriftLibrary.alias(): GoThriftLibrary,
"go_remote_libraries": TargetMacro.Factory.wrap(
GoRemoteLibrary.from_packages, GoRemoteLibrary
),
"go_remote_library": TargetMacro.Factory.wrap(
GoRemoteLibrary.from_package, GoRemoteLibrary
),
}
)
def register_goals():
task(name="go-thrift", action=GoThriftGen).install("gen")
task(name="go-protobuf", action=GoProtobufGen).install("gen")
task(name="go", action=GoBuildgen).install("buildgen")
task(name="go", action=GoGo).install("go")
task(name="go-env", action=GoEnv).install()
task(name="go", action=GoFetch).install("resolve")
task(name="go", action=GoCompile).install("compile")
task(name="go", action=GoBinaryCreate).install("binary")
task(name="go", action=GoRun).install("run")
task(name="go", action=GoCheckstyle).install("lint")
task(name="go", action=GoTest).install("test")
task(name="go", action=GoFmt).install("fmt")
|
apache-2.0
| 1,653,328,518,813,307,600
| 46.314815
| 85
| 0.735029
| false
| 3.568436
| false
| false
| false
|
SKIRT/PTS
|
core/prep/dustgrids.py
|
1
|
23186
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.fitting.dustgrids Contains the DustGridGenerator class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import math
from collections import OrderedDict
# Import astronomical modules
from astropy.units import dimensionless_angles
# Import the relevant PTS classes and modules
from ..simulation.grids import BinaryTreeDustGrid, OctTreeDustGrid, CartesianDustGrid
from ...core.basics.log import log
from ...core.basics.range import zip_linear
from ..basics.configurable import Configurable
from ..basics.table import SmartTable
from ..basics.range import RealRange, QuantityRange, IntegerRange
from ..tools import types
# -----------------------------------------------------------------
class DustGridsTable(SmartTable):
"""
This class ...
"""
# Add column info
_column_info = OrderedDict()
_column_info["Type"] = (str, None, "grid type")
_column_info["Min x"] = (float, "pc", "minimum x")
_column_info["Max x"] = (float, "pc", "maximum x")
_column_info["Min y"] = (float, "pc", "minimum y")
_column_info["Max y"] = (float, "pc", "maximum y")
_column_info["Min z"] = (float, "pc", "minimum z")
_column_info["Max z"] = (float, "pc", "maximum z")
_column_info["Smallest scale"] = (float, "pc", "Smallest scale")
_column_info["Min level"] = (int, None, "Minimum level")
_column_info["Max mass fraction"] = (float, None, "Maximum mass fraction")
# -----------------------------------------------------------------
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param args:
:param kwargs:
"""
# Call the constructor of the base class
super(DustGridsTable, self).__init__(*args, **kwargs)
# Add column info
self.add_all_column_info(self._column_info)
# -----------------------------------------------------------------
def add_entry(self, grid_type, x_range, y_range, z_range, scale, min_level, max_mass_fraction):
"""
This function ...
:param grid_type:
:param x_range:
:param y_range:
:param z_range:
:param scale:
:param min_level:
:param max_mass_fraction:
:return:
"""
# Add a row to the table
self.add_row([grid_type, x_range.min, x_range.max, y_range.min, y_range.max, z_range.min, z_range.max, scale, min_level, max_mass_fraction])
# -----------------------------------------------------------------
class DustGridGenerator(Configurable):
"""
This class...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param kwargs:
:return:
"""
# Call the constructor of the base class
super(DustGridGenerator, self).__init__(*args, **kwargs)
# -- Attributes --
# Settings
self.scale_range = None
self.level_range = None
self.mass_fraction_range = None
self.ngrids = None
self._grid_type = None
self.x_radius = None
self.y_radius = None
self.z_radius = None
# The dust grids
self.grids = []
# The dust grid property table
self.table = None
# -----------------------------------------------------------------
@property
def grid_type(self):
return self._grid_type
# -----------------------------------------------------------------
@grid_type.setter
def grid_type(self, grid_type):
"""
This function ...
:return:
"""
if not grid_type in ["cartesian", "bintree", "octtree"]: raise RuntimeError("Grid type '" + str(grid_type) + "' invalid. Must be either 'cartesian', 'bintree', or 'octtree'.")
self._grid_type = grid_type
# -----------------------------------------------------------------
@property
def x_min(self):
return - self.x_radius
# -----------------------------------------------------------------
@property
def x_max(self):
return self.x_radius
# -----------------------------------------------------------------
@property
def x_extent(self):
return self.x_max - self.x_min
# -----------------------------------------------------------------
@property
def y_min(self):
return - self.y_radius
# -----------------------------------------------------------------
@property
def y_max(self):
return self.y_radius
# -----------------------------------------------------------------
@property
def y_extent(self):
return self.y_max - self.y_min
# -----------------------------------------------------------------
@property
def z_min(self):
return - self.z_radius
# -----------------------------------------------------------------
@property
def z_max(self):
return self.z_radius
# -----------------------------------------------------------------
@property
def z_extent(self):
return self.z_max - self.z_min
# -----------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# 2. Generate the dust grids
self.generate()
# 3. Show
if self.config.show: self.show()
# 4. Write
if self.config.write: self.write()
# -----------------------------------------------------------------
@property
def single_grid(self):
"""
This function ...
:return:
"""
if len(self.grids) == 0: raise RuntimeError("No grid")
elif len(self.grids) == 1: return self.grids[0]
else: raise RuntimeError("More than one grid")
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the setup function of the base class
super(DustGridGenerator, self).setup(**kwargs)
# Get settings
self.ngrids = kwargs.pop("ngrids")
if self.ngrids == 1:
self.scale_range = QuantityRange.infinitesimal(kwargs.pop("scale"))
self.level_range = IntegerRange.infinitesimal(kwargs.pop("level"))
self.mass_fraction_range = RealRange.infinitesimal(kwargs.pop("mass_fraction"))
else:
self.scale_range = kwargs.pop("scale_range")
self.level_range = kwargs.pop("level_range")
self.mass_fraction_range = kwargs.pop("mass_fraction_range")
# Initialize the table
self.table = DustGridsTable()
# -----------------------------------------------------------------
def generate(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating the grids ...")
# Loop over the different grid parameter values
for scale, min_level, mass_fraction in zip_linear(self.scale_range, self.level_range, self.mass_fraction_range, npoints=self.ngrids):
# Create the grid and add it to the list
if self.grid_type == "cartesian": self.create_cartesian_dust_grid(scale)
elif self.grid_type == "bintree": self.create_binary_tree_dust_grid(scale, min_level, mass_fraction)
elif self.grid_type == "octtree": self.create_octtree_dust_grid(scale, min_level, mass_fraction)
else: raise ValueError("Invalid grid type: " + self.grid_type)
# -----------------------------------------------------------------
def create_cartesian_dust_grid(self, scale):
"""
This function ...
:param scale:
:return:
"""
# Create the grid
grid = create_one_cartesian_dust_grid(scale, self.x_extent, self.y_extent, self.z_extent, self.x_min, self.x_max, self.y_min, self.y_max, self.z_min, self.z_max)
# Add the grid
self.grids.append(grid)
# Debugging
log.debug("Created a cartesian dust grid with:")
if log.is_debug:
print("")
print(grid)
print("")
# Add a row to the table
self.table.add_row([self.grid_type, self.x_min, self.x_max, self.y_min, self.y_max, self.z_min, self.z_max, scale, None, None])
# -----------------------------------------------------------------
def create_binary_tree_dust_grid(self, scale, min_level, max_mass_fraction):
"""
This function ...
:param scale:
:param min_level:
:param max_mass_fraction:
:return:
"""
# Create the grid
grid = create_one_bintree_dust_grid(scale, self.x_extent, self.x_min, self.x_max, self.y_min, self.y_max, self.z_min, self.z_max, min_level, max_mass_fraction)
# Add the grid
self.grids.append(grid)
# Debugging
log.debug("Created a binary tree dust grid with:")
if log.is_debug:
print("")
print(grid)
print("")
# Add a row to the table
self.table.add_row([self.grid_type, self.x_min, self.x_max, self.y_min, self.y_max, self.z_min, self.z_max, scale, min_level, max_mass_fraction])
# -----------------------------------------------------------------
def create_octtree_dust_grid(self, scale, min_level, max_mass_fraction):
"""
This function ...
:param scale:
:param min_level:
:param max_mass_fraction:
:return:
"""
# Create the grid
grid = create_one_octtree_dust_grid(scale, self.x_extent, self.x_min, self.x_max, self.y_min, self.y_max, self.z_min, self.z_max, min_level, max_mass_fraction)
# Add the grid
self.grids.append(grid)
# Debugging
log.debug("Created an octtree dust grid with:")
if log.is_debug:
print("")
print(grid)
print("")
# Add entry to the table
x_range = RealRange(self.x_min, self.x_max)
y_range = RealRange(self.y_min, self.y_max)
z_range = RealRange(self.z_min, self.z_max)
self.table.add_entry(self, self.grid_type, x_range, y_range, z_range, scale, min_level, max_mass_fraction)
# -----------------------------------------------------------------
def show(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
def write(self):
"""
This function ..
:return:
"""
# Inform the user
log.info("Writing ...")
# Write the grids
self.write_grids()
# Write table
self.write_table()
# -----------------------------------------------------------------
def write_grids(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing grids ...")
# -----------------------------------------------------------------
def write_table(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing table ...")
# -----------------------------------------------------------------
def create_one_dust_grid_for_galaxy_from_deprojection(grid_type, deprojection, distance, sky_ellipse, min_level,
max_mass_fraction, max_ndivisions_per_pixel=2, nscaleheights=10.):
"""
This function ...
:param grid_type:
:param deprojection:
:param distance:
:param sky_ellipse:
:param min_level:
:param max_mass_fraction:
:param max_ndivisions_per_pixel:
:param nscaleheights:
:return:
"""
if sky_ellipse is not None:
# Calculate the major radius of the truncation ellipse in physical coordinates (pc)
semimajor_angular = sky_ellipse.semimajor # semimajor axis length of the sky ellipse
radius_physical = (semimajor_angular * distance).to("pc", equivalencies=dimensionless_angles())
else:
x_radius_physical = deprojection.x_range.radius
y_radius_physical = deprojection.y_range.radius
radius_physical = max(x_radius_physical, y_radius_physical)
# Get properties
average_pixelscale = deprojection.pixelscale
scaleheight = deprojection.scale_height
# Get the pixelscale in physical units
if types.is_angle(average_pixelscale):
pixelscale_angular = average_pixelscale.to("deg")
# pixelscale_angular = self.reference_wcs.average_pixelscale.to("deg") # in deg
pixelscale = (pixelscale_angular * distance).to("pc", equivalencies=dimensionless_angles())
elif types.is_length_quantity(average_pixelscale): pixelscale = average_pixelscale.to("pc") # normally it should be this case (deprojections should have their pixelscale defined in physical units)
else: raise ValueError("Pixelscale should be an angle or a length quantity")
# Determine the minimum physical scale
min_scale = pixelscale / float(max_ndivisions_per_pixel)
# Create the dust grid
return create_one_dust_grid_for_galaxy(grid_type, radius_physical, scaleheight, min_scale, min_level, max_mass_fraction, nscaleheights=nscaleheights)
# -----------------------------------------------------------------
def create_one_dust_grid_for_galaxy(grid_type, radius, scaleheight, min_scale, min_level, max_mass_fraction, nscaleheights=10.):
"""
This function ...
:param grid_type:
:param radius: IN PHYSICAL COORDINATES
:param scaleheight: IN PHYSICAL COORDINATES
:param min_scale:
:param min_level:
:param max_mass_fraction:
:param nscaleheights: REAL NUMBER
:return:
"""
# Determine x, y and z radius
x_radius = radius
y_radius = radius
z_radius = scaleheight * nscaleheights
# X
x_min = - x_radius
x_max = x_radius
x_extent = x_max - x_min
# Y
y_min = - y_radius
y_max = y_radius
y_extent = y_max - y_min
# Z
z_min = - z_radius
z_max = z_radius
z_extent = z_max - z_min
# Create the dust grid
return create_one_dust_grid(grid_type, min_scale, x_extent, y_extent, z_extent, x_min, x_max, y_min, y_max, z_min, z_max, min_level, max_mass_fraction)
# -----------------------------------------------------------------
def create_one_dust_grid(grid_type, scale, x_extent, y_extent, z_extent, x_min, x_max, y_min, y_max, z_min, z_max, min_level, max_mass_fraction):
"""
This function ...
:param grid_type:
:param scale:
:param x_extent:
:param y_extent:
:param z_extent:
:param x_min:
:param x_max:
:param y_min:
:param y_max:
:param z_min:
:param z_max:
:param min_level:
:param max_mass_fraction:
:return:
"""
# Create the specified type of grid
if grid_type == "cartesian": return create_one_cartesian_dust_grid(scale, x_extent, y_extent, z_extent, x_min, x_max, y_min, y_max, z_min, z_max)
elif grid_type == "bintree": return create_one_bintree_dust_grid(scale, x_extent, x_min, x_max, y_min, y_max, z_min, z_max, min_level, max_mass_fraction)
elif grid_type == "octtree": return create_one_octtree_dust_grid(scale, x_extent, x_min, x_max, y_min, y_max, z_min, z_max, min_level, max_mass_fraction)
else: raise ValueError("Unknown dust grid type: " + grid_type)
# -----------------------------------------------------------------
def create_one_cartesian_dust_grid(scale, x_extent, y_extent, z_extent, x_min, x_max, y_min, y_max, z_min, z_max):
"""
This function ...
:param scale:
:param x_extent:
:param y_extent:
:param z_extent:
:param x_min:
:param x_max:
:param y_min:
:param y_max:
:param z_min:
:param z_max:
:return:
"""
# Inform the user
log.info("Creating a cartesian dust grid with a physical scale of " + str(scale) + " ...")
# Calculate the number of bins in each direction
x_bins = int(math.ceil(x_extent.to("pc").value / scale.to("pc").value))
y_bins = int(math.ceil(y_extent.to("pc").value / scale.to("pc").value))
z_bins = int(math.ceil(z_extent.to("pc").value / scale.to("pc").value))
# Create the grid
grid = CartesianDustGrid(x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max, z_min=z_min, z_max=z_max,
x_bins=x_bins, y_bins=y_bins, z_bins=z_bins)
# Return the grid
return grid
# -----------------------------------------------------------------
def create_one_bintree_dust_grid(scale, x_extent, x_min, x_max, y_min, y_max, z_min, z_max, min_level, max_mass_fraction):
"""
This function ...
:param scale:
:param x_extent:
:param x_min:
:param x_max:
:param y_min:
:param y_max:
:param z_min:
:param z_max:
:param min_level:
:param max_mass_fraction:
:return:
"""
# Inform the user
log.info("Creating a binary tree dust grid with a smallest physical scale of " + str(scale) + ", with a minimum division level of " + str(min_level) + " and a maximum mass fraction of " + str(max_mass_fraction) + " ...")
# Calculate the maximum division level that is necessary to resolve the smallest scale of the input maps
extent_x = x_extent.to("pc").value
smallest_scale = scale.to("pc").value
max_level = max_level_for_smallest_scale_bintree(extent_x, smallest_scale)
# Check arguments
if x_min is None: raise ValueError("'x_min' is undefined")
if x_max is None: raise ValueError("'x_max' is undefined")
if y_min is None: raise ValueError("'y_min' is undefined")
if y_max is None: raise ValueError("'y_max' is undefined")
if z_min is None: raise ValueError("'z_min' is undefined")
if z_max is None: raise ValueError("'z_max' is undefined")
if min_level is None: raise ValueError("'min_level' is undefined")
if max_mass_fraction is None: raise ValueError("'max_mass_fraction' is undefined")
# Create the dust grid
grid = BinaryTreeDustGrid(min_x=x_min, max_x=x_max, min_y=y_min, max_y=y_max, min_z=z_min, max_z=z_max,
min_level=min_level, max_level=max_level, max_mass_fraction=max_mass_fraction)
# Return the grid
return grid
# -----------------------------------------------------------------
def create_one_octtree_dust_grid(scale, x_extent, x_min, x_max, y_min, y_max, z_min, z_max, min_level, max_mass_fraction):
"""
This function ...
:param scale:
:param x_extent:
:param x_min:
:param x_max:
:param y_min:
:param y_max:
:param z_min:
:param z_max:
:param min_level:
:param max_mass_fraction:
:return:
"""
# Inform the user
log.info("Creating a octtree dust grid with a smallest physical scale of " + str(scale) + ", with a minimum division level of " + str(min_level) + " and a maximum mass fraction of " + str(max_mass_fraction) + " ...")
# Calculate the minimum division level that is necessary to resolve the smallest scale of the input maps
extent_x = x_extent.to("pc").value
smallest_scale = scale.to("pc").value
max_level = max_level_for_smallest_scale_octtree(extent_x, smallest_scale)
# Check arguments
if x_min is None: raise ValueError("'x_min' is undefined")
if x_max is None: raise ValueError("'x_max' is undefined")
if y_min is None: raise ValueError("'y_min' is undefined")
if y_max is None: raise ValueError("'y_max' is undefined")
if z_min is None: raise ValueError("'z_min' is undefined")
if z_max is None: raise ValueError("'z_max' is undefined")
if min_level is None: raise ValueError("'min_level' is undefined")
if max_mass_fraction is None: raise ValueError("'max_mass_fraction' is undefined")
# Create the dust grid
grid = OctTreeDustGrid(min_x=x_min, max_x=x_max, min_y=y_min, max_y=y_max, min_z=z_min, max_z=z_max,
min_level=min_level, max_level=max_level, max_mass_fraction=max_mass_fraction)
# Return the grid
return grid
# -----------------------------------------------------------------
def max_level_for_smallest_scale_bintree(extent, smallest_scale):
"""
This function ...
:param extent:
:param smallest_scale:
:return:
"""
ratio = extent / smallest_scale
octtree_level = int(math.ceil(math.log(ratio, 2)))
level = int(3 * octtree_level)
return level
# -----------------------------------------------------------------
def smallest_scale_for_max_level_bintree(extent, max_level):
"""
This function ...
:param extent:
:param max_level:
:return:
"""
octtree_level = max_level / 3
max_ratio = 2**octtree_level
min_scale = extent / max_ratio
return min_scale
# -----------------------------------------------------------------
def max_level_for_smallest_scale_octtree(extent, smallest_scale):
"""
This function ...
:param extent:
:param smallest_scale:
:return:
"""
ratio = extent / smallest_scale
octtree_level = int(math.ceil(math.log(ratio, 2)))
return octtree_level
# -----------------------------------------------------------------
def smallest_scale_for_max_level_octtree(extent, max_level):
"""
This function ...
:param extent:
:param max_level:
:return:
"""
max_ratio = 2**max_level
min_scale = extent / max_ratio
return min_scale
# -----------------------------------------------------------------
def smallest_scale_for_dust_grid(grid):
"""
This function ...
:param grid:
:return:
"""
# Cartesian grid
if isinstance(grid, CartesianDustGrid):
min_x_scale = grid.x_extent / float(grid.x_bins)
min_y_scale = grid.y_extent / float(grid.y_bins)
min_z_scale = grid.z_extent / float(grid.z_bins)
# Return the minimum scale
return min(min_x_scale, min_y_scale, min_z_scale)
# Octtree
elif isinstance(grid, OctTreeDustGrid):
extent = grid.smallest_extent
max_level = grid.max_level
return smallest_scale_for_max_level_octtree(extent, max_level)
# Binary tree
elif isinstance(grid, BinaryTreeDustGrid):
extent = grid.smallest_extent
max_level = grid.max_level
return smallest_scale_for_max_level_bintree(extent, max_level)
# Other
else: raise NotImplementedError("Other dust grids not implemented")
# -----------------------------------------------------------------
|
agpl-3.0
| 7,276,239,575,000,865,000
| 29.913333
| 224
| 0.528575
| false
| 3.771759
| false
| false
| false
|
ticosax/django-fsm-log
|
setup.py
|
1
|
1452
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='django-fsm-log',
version='1.7.0dev',
description='Logging for django-fsm',
author='Gizmag',
author_email='tech@gizmag.com',
url='https://github.com/gizmag/django-fsm-log',
license='MIT',
packages=find_packages(),
install_requires=['django>=1.8', 'django_fsm>=2', 'django_appconf'],
extras_require={
'testing': [
'pytest',
'pytest-cov',
'pytest-django',
'pytest-mock',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
mit
| 2,992,084,338,474,701,300
| 32
| 72
| 0.554408
| false
| 4.148571
| false
| false
| false
|
Microsoft/PTVS-Samples
|
PollsDjango/PollsDjango/urls.py
|
1
|
1210
|
"""
Definition of urls for $safeprojectname$.
"""
from datetime import datetime
from django.conf.urls import url, include
from django.contrib import admin
import django.contrib.auth.views
import app.forms
import app.views
admin.autodiscover()
urlpatterns = [
url(r'^', include('app.urls', namespace="app")),
url(r'^contact$', app.views.contact, name='contact'),
url(r'^about', app.views.about, name='about'),
url(r'^seed', app.views.seed, name='seed'),
url(r'^login/$',
django.contrib.auth.views.login,
{
'template_name': 'app/login.html',
'authentication_form': app.forms.BootstrapAuthenticationForm,
'extra_context':
{
'title': 'Log in',
'year': datetime.now().year,
}
},
name='login'),
url(r'^logout$',
django.contrib.auth.views.logout,
{
'next_page': '/',
},
name='logout'),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
]
|
apache-2.0
| 3,477,789,548,203,435,000
| 26.139535
| 73
| 0.561983
| false
| 3.829114
| false
| false
| false
|
VirusTotal/msticpy
|
msticpy/nbtools/security_event.py
|
1
|
3507
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Module for SecurityEvent class."""
from typing import List, Dict, Any
import pandas as pd
from .entityschema import Entity, Host, Process, Account, IpAddress, HostLogonSession
from .security_base import SecurityBase
from ..common.utility import export
from .._version import VERSION
__version__ = VERSION
__author__ = "Ian Hellen"
@export
class SecurityEvent(SecurityBase):
"""SecurityEvent class."""
def __init__(self, src_row: pd.Series = None):
"""
Instantiate new instance of SecurityEvent.
:param src_row: Pandas series containing single security event
"""
self._source_data = src_row
super().__init__(src_row=src_row)
self._extract_entities(src_row)
self._find_os_family()
# Properties
@property
def entities(self) -> List[Entity]:
"""
Return the list of entities extracted from the event.
Returns
-------
List[Entity]
The list of entities extracted from the event.
"""
return list(self._entities)
@property
def query_params(self) -> Dict[str, Any]:
"""
Query parameters derived from alert.
Returns
-------
Dict[str, Any]
Dictionary of parameter names
"""
return super().query_params
# index operator override
def __getattr__(self, name):
"""Return the value of the named property 'name'."""
if name is not None and name in self._source_data:
return self._source_data[name]
return None
def _extract_entities(self, src_row):
if "EventID" in src_row:
host = Host(src_event=src_row)
self._entities.append(host)
event_id = str(src_row["EventID"])
if event_id == "4688":
event_proc = Process(src_event=src_row, role="new")
self._entities.append(event_proc)
event_proc["Host"] = host
if "ParentProcess" in event_proc:
self._entities.append(event_proc.ParentProcess)
if "ImageFile" in event_proc.ParentProcess:
self._entities.append(event_proc.ParentProcess.ImageFile)
logon_session = HostLogonSession(src_event=src_row)
logon_session.Host = host
if "Account" in event_proc:
logon_session.Account = event_proc.Account
event_proc.Account.Host = host
self._entities.append(event_proc.Account)
self._entities.append(logon_session)
if "ImageFile" in event_proc:
self._entities.append(event_proc.ImageFile)
if event_id in ("4624", "4625"):
subj_account = Account(src_event=src_row, role="subject")
subj_account.Host = host
self._entities.append(subj_account)
tgt_account = Account(src_event=src_row, role="target")
tgt_account.Host = host
self._entities.append(tgt_account)
self._entities.append(IpAddress(src_event=src_row))
|
mit
| -7,031,642,204,383,718,000
| 33.722772
| 85
| 0.551468
| false
| 4.496154
| false
| false
| false
|
frollo/EquationReader
|
equationReader.py
|
1
|
3520
|
#!/usr/bin/python
import sys
import re
import string
variables = {}
outs = {}
monomial = "([a-zA-z]+\d+)"
mn = re.compile(monomial)
def extractValues(strin):
xAddr1 = strin[2].strip()
xAddr2 = strin[4].strip()
if xAddr1 in variables:
x1 = variables[xAddr1]
else:
raise Exception("equationReader: variable " + xAddr1 + " not found")
if mn.match(xAddr2):
if xAddr2 in variables:
x2 = variables[xAddr2]
else:
raise Exception("equationReader: variable " + xAddr2 + " not found")
else:
x2 = bool(int(xAddr2))
return {'x1':x1, 'x2':x2}
if len(sys.argv) != 3:
raise Exception("Usage: equationReader <input file> <output file>")
fin = open(sys.argv[1], "r")
lines = fin.readlines()
inputs = re.compile("\d+ inputs")
outputs = re.compile("\d+ outputs")
for index, line in enumerate(lines):
if inputs.match(line):
#Creation of the x set
xLine = lines[index + 1]
xValueLine = lines[index + 2]
ins = string.split(xLine)
insValues = string.split(xValueLine)
if len(ins) != len(insValues):
print(line + xLine + xValueLine)
raise Exception("equationReader: you need to provide a starting value for each x inserted")
for i in range(len(ins)):
x = ins[i].strip()
variables[x] = bool(int(insValues[i]))
else:
if outputs.match(line):
#Creation of the y set
yLine = lines[index + 1]
ins = string.split(yLine, " ")
for y in ins:
y.strip()
outs[y] = None
else:
if line == "begin":
#When the equations start we get to the next cicle which performs the calculations
break
#y = x + z
equation_XOR = re.compile(monomial + " = " + monomial + " \+ (" + monomial + "|(0|1))")
#y = x * z
equation_AND = re.compile(monomial + " = " + monomial + " \* (" + monomial + "|(0|1))")
#y = x
equation_ASSIGNEMENT = re.compile(monomial + " = (" + monomial + "|(0|1))")
for index, line in enumerate(lines):
tmp = string.split(line, " ")
print(line)
if equation_XOR.match(line):
xdict = extractValues(tmp)
yAddr = tmp[0]
y = xdict['x1'] ^ xdict['x2']
variables[yAddr] = y
if yAddr in outs:
outs[yAddr] = y
else:
if equation_AND.match(line):
xdict = extractValues(tmp)
yAddr = tmp[0]
y = xdict['x1'] & xdict['x2']
variables[yAddr] = y
if yAddr in outs:
outs[yAddr] = y
else:
if equation_ASSIGNEMENT.match(line):
yAddr = tmp[0].strip()
xAddr = tmp[2].strip()
if mn.match(xAddr):
if xAddr in variables:
x = variables[xAddr]
else:
raise Exception("equationReader: variable " + xAddr + " not found")
else:
x = bool(xAddr)
y = x
variables[yAddr] = y
if yAddr in outs:
outs[yAddr] = y
else:
print("Skipping malformed equation:" + line)
#Printing out the results
fin.close()
fout = open(sys.argv[2], "w")
for key, value in outs.items():
fout.write(key + " = {}\n".format(int(value)))
fout.close()
|
gpl-3.0
| 5,119,402,926,935,882,000
| 31
| 103
| 0.509091
| false
| 3.610256
| false
| false
| false
|
DavideCanton/Python3
|
concur/AABABB.py
|
1
|
1342
|
import contextlib
import threading
import io
from concur import KamiSemaphore
class A(threading.Thread):
def __init__(self, semA, semB, mutex):
threading.Thread.__init__(self)
self.setName("A")
self.semA = semA
self.semB = semB
self.mutex = mutex
def run(self):
self.semA.acquire(2)
with self.mutex:
print("A", end="")
self.semB.release(2)
class B(threading.Thread):
def __init__(self, semA, semB, mutex):
threading.Thread.__init__(self)
self.setName("B")
self.semA = semA
self.semB = semB
self.mutex = mutex
def run(self):
self.semB.acquire()
with self.mutex:
print("B", end="")
self.semA.release()
if __name__ == "__main__":
output = io.StringIO()
with contextlib.redirect_stdout(output):
semA = KamiSemaphore.KamiSemaphoreT(5)
semB = KamiSemaphore.KamiSemaphoreT(-3)
par = {"semA": semA, "semB": semB, "mutex": threading.Lock()}
threads = [A(**par) for i in range(3)] + [B(**par) for j in range(3)]
for t in threads:
t.start()
for t in threads:
t.join()
result = output.getvalue()
if result == "AABABB":
print("OK")
else:
print("NO: {}".format(result))
|
gpl-3.0
| 6,614,634,021,793,715,000
| 23.851852
| 77
| 0.544709
| false
| 3.476684
| false
| false
| false
|
Talvalin/server-client-python
|
tableauserverclient/models/project_item.py
|
1
|
2738
|
import xml.etree.ElementTree as ET
from .property_decorators import property_is_enum, property_not_empty
from .. import NAMESPACE
class ProjectItem(object):
class ContentPermissions:
LockedToProject = 'LockedToProject'
ManagedByOwner = 'ManagedByOwner'
def __init__(self, name, description=None, content_permissions=None):
self._content_permissions = None
self._id = None
self.description = description
self.name = name
self.content_permissions = content_permissions
@property
def content_permissions(self):
return self._content_permissions
@content_permissions.setter
@property_is_enum(ContentPermissions)
def content_permissions(self, value):
self._content_permissions = value
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@name.setter
@property_not_empty
def name(self, value):
self._name = value
def is_default(self):
return self.name.lower() == 'default'
def _parse_common_tags(self, project_xml):
if not isinstance(project_xml, ET.Element):
project_xml = ET.fromstring(project_xml).find('.//t:project', namespaces=NAMESPACE)
if project_xml is not None:
(_, name, description, content_permissions) = self._parse_element(project_xml)
self._set_values(None, name, description, content_permissions)
return self
def _set_values(self, project_id, name, description, content_permissions):
if project_id is not None:
self._id = project_id
if name:
self._name = name
if description:
self.description = description
if content_permissions:
self._content_permissions = content_permissions
@classmethod
def from_response(cls, resp):
all_project_items = list()
parsed_response = ET.fromstring(resp)
all_project_xml = parsed_response.findall('.//t:project', namespaces=NAMESPACE)
for project_xml in all_project_xml:
(id, name, description, content_permissions) = cls._parse_element(project_xml)
project_item = cls(name)
project_item._set_values(id, name, description, content_permissions)
all_project_items.append(project_item)
return all_project_items
@staticmethod
def _parse_element(project_xml):
id = project_xml.get('id', None)
name = project_xml.get('name', None)
description = project_xml.get('description', None)
content_permissions = project_xml.get('contentPermissions', None)
return id, name, description, content_permissions
|
mit
| 9,043,964,154,326,998,000
| 32.390244
| 95
| 0.642805
| false
| 4.258165
| false
| false
| false
|
cmuphyscomp/hmv-s16
|
Grasshopper/MocapDemo/GestureLogic.py
|
1
|
19991
|
# GestureLogic - state machine for interface logic for the gesture-based editing.
#
# This encompasses all the logic for the editor which is easier to write in
# Python than Grasshopper objects. Only one GestureLogic instance is expected to
# exist since it holds and tracks user inputs.
#
# Objectives for this block:
#
# 1. Only hold transient user state. All persistent data is read from the
# RhinoDoc, manipulated in a transient way, and then written back to the
# RhinoDoc or discarded.
#
# 2. Emit signals to read and write the model rather than directly manipulate
# the RhinoDoc. This does increase the number of I/O variables, but
# is intended to make the operation easier to observe, debug, and extend.
#
# inputs:
# name - name token string for sticky
# reset - bool to reset to lowest value
# gesture - None or relative sample index (integer) of detected gesture event
# cursor - list of Planes with recent cursor object trajectory
# poses - list of Planes saved at gesture events
# mode
# update
# selection
# selguids
# setselect
# clear
# all_names
# layer_name - name of the editable layer in the RhinoDoc
# create_interval - integer number of cycles between creating new objects
# Note: the following must have 'List Access' set: cursor, poses, selection, selguids, all_names
#
# outputs:
# out - log string output for display; the log is persistent to reduce the amount of flickering
# add
# move
# names
# newloc
# status
# objects
# guids
# xform
################################################################
import scriptcontext as sc
import clr
import System.Guid
import math
import Rhino
import pythonlibpath; pythonlibpath.add_library_path()
import ghutil.ghrhinodoc as ghrhinodoc
################################################################
class EditorLogic(object):
"""Utility class to manage the state of the interactive editor."""
def __init__(self, _layer_name = 'Cards', _all_names = []):
self.layer_name = _layer_name
self._last_setselect = False # debounce variable
self.attached = False
self.interval_counter = 0
self.mocap_dt = 1.0 / 120 # sampling rate of the motion capture stream
# list of strings in which to accumulate messages for output
self.log = ["Editor initialized for layer %s." % self.layer_name]
# initialize the default sets of object names based on those found on the RhinoDoc layer
self._update_namesets(_all_names)
# freshly created objects: poses and names
self.new_object_poses = list()
self.new_object_names = list()
# the current selection
self.selection = None
self.docguids = None
self.selection_bb = None
self.selection_bb_size = None
# coordinate transformation for group edits
self.transform = None
self.motion = None
self.xforms = [] # list of transforms, one per selected object
return
def add_new_object_pose(self, plane):
if plane is not None:
name = self.choose_new_name()
if name is not None:
self.new_object_poses.append(plane)
self.new_object_names.append(name)
return
def clear_edits(self):
"""Reset all transient editor state, either at user request or after editing cycle is complete."""
self.new_object_poses = list()
self.new_object_names = list()
self.attached = False
self.selection = None
self.docguids = None
return
def logprint(self, msg):
self.log.append(msg)
def clear_log(self):
self.log = []
def set_namesets(self, all_names, used_names):
"""Update the name manager given a list of all possible object names and the list of object names currently in use."""
self.all_names = set(all_names)
self.used_names = set()
# check for duplicate names
for used in used_names:
if used in self.used_names:
self.logprint("Warning: object name %s appears more than once." % used)
else:
self.used_names.add(used)
# check for used names not listed in the all_names set
invalid_names = self.used_names - self.all_names
if invalid_names:
self.logprint("Warning: invalid names in use: %s" % invalid_names)
# compute the list of available names
self.unused_names = self.all_names - self.used_names
self.logprint("Found the following unused object names: %s" % self.unused_names)
return
def choose_new_name(self):
"""Pick an name arbitrarily from the set of unused names."""
if len(self.unused_names) == 0:
self.logprint("Warning: no more object names available.")
return None
# return new names in numerical order for clarity
new_name = sorted(self.unused_names)[0]
self.unused_names.remove(new_name)
return new_name
def _update_namesets(self, all_names):
all_objects = ghrhinodoc.all_doc_objects(layer_name)
names = [obj.Attributes.Name for obj in all_objects]
self.set_namesets(all_names, names)
return
def _compute_set_bounding_box(self, selection):
if selection is None or len(selection) == 0:
return None
# compute bounding box for all objects in a set
boxes = [obj.GetBoundingBox(True) for obj in selection]
# compute union of all boxes
union = boxes[0]
# destructively merge this with the other boxes
for b in boxes[1:]:
union.Union(b)
return union, union.Diagonal.Length
def manage_user_selection(self, setselect, selection, selguids, all_names):
"""Process the user 'Set Selection' input, updating the editor state for any new
objects and names as needed."""
if setselect != self._last_setselect:
# debounce input to just trigger once
self._last_setselect = setselect
if setselect == True:
self.selection = selection
self.docguids = selguids
self.selection_bb, self.selection_bb_size = self._compute_set_bounding_box(selection)
self.logprint("Updated selection bounding box to %s, diagonal size %f" % (self.selection_bb, self.selection_bb_size))
# reset the pick and place state
self.attached = False
self.transform = None
self.xforms = []
self.logprint("Selection set with %d objects." % len(selection))
self._update_namesets(all_names)
#================================================================
def read_objects_from_layer(self, layer_name):
"""Read the user-visible names of all objects on a specific RhinoDoc layer.
Returns a tuple (geoms, guids, names) with lists of all geometry
objects, RhinoDoc GUID strings, and name attributes.
"""
layer_index = ghrhinodoc.fetch_or_create_layer_index(layer_name)
# Fetch all objects on the layer and report out individual properties.
all_objects = ghrhinodoc.all_doc_objects(layer_name)
geoms = [obj.Geometry for obj in all_objects]
guids = [str(obj.Id) for obj in all_objects]
names = [obj.Attributes.Name for obj in all_objects]
return geoms, guids, names
#================================================================
def update_tap_create_mode(self, gesture, cursor, update, clear):
"""Update state for the 'tap create' mode in which gestures create individual cards.
gesture - the integer gesture sample index or None
cursor - the list of recent cursor poses
returns newloc, names, add
"""
# default outputs
names, add = None, None
# tap create mode: each gesture creates a new object
if gesture is not None:
self.logprint("Gesture detected with sample offset %d." % gesture)
# gesture is an integer sample where zero is the most recent pose;
# index the current cursor poses from the end to select the correct
# pose
self.add_new_object_pose(cursor[gesture-1])
if clear == True:
self.logprint( "Abandoning editor changes (dropping new object poses).")
self.clear_edits()
# by default, always emit the new poses so they can be visualized
newloc = self.new_object_poses
if update == True:
self.logprint("Writing new objects to RhinoDoc: %s" % self.new_object_names)
names = self.new_object_names
add = True
self.clear_edits() # note: the current list has already been emitted, this just resets the buffer
return newloc, names, add
#================================================================
def update_path_create_mode(self, _gesture, _cursor, _update, _clear, _all_names, _create_rate):
"""Update state for the 'symbol sprayer' mode which places new objects along a
cursor path. Each gesture event toggles the creation events on or off.
returns newloc, names, add
"""
# default outputs
names, add = None, False
# detect the singular gesture events (for now, a flick of the wand)
if _gesture is not None:
if self.attached:
self.logprint("Creation path ended.")
else:
self.logprint("Creation path beginning.")
self.interval_counter = 0
self._update_namesets(_all_names)
# toggle the 'attached' state
self.attached = not self.attached
# while 'attached' to the sprayer, create new objects at regular intervals
if self.attached:
self.interval_counter += 1
if self.interval_counter > _create_rate:
self.interval_counter = 0
self.add_new_object_pose(_cursor[-1])
if _clear == True:
self.logprint( "Abandoning editor changes (dropping new object poses).")
self.clear_edits()
# by default, always emit the new poses so they can be visualized
newloc = self.new_object_poses
if _update == True:
self.logprint("Writing new objects to RhinoDoc: %s" % self.new_object_names)
names = self.new_object_names
add = True
self.clear_edits() # note: the current list has already been emitted, this just resets the buffer
return newloc, names, add
#================================================================
def update_block_move_mode(self, gesture, cursor, poses, update, clear):
"""Update state for the 'block move' mode in which each gesture alternately
attaches or detaches the selection from the cursor.
returns objects, guids, xform, move
"""
# set default outputs
objects = self.selection
guids = self.docguids
move = False
motion = Rhino.Geometry.Transform(1) # motion transform is identity value by default
if clear == True:
self.logprint("Abandoning editor changes (clearing movement).")
self.transform = None
# detect the singular gesture events (for now, a flick of the wand)
if gesture is not None:
# if we are ending a motion segment, save the most recent transformation as the new base transform
if self.attached:
self.transform = self.transform * self.motion
self.logprint("Motion ended, new transform saved.")
else:
self.logprint("Motion beginning.")
# toggle the 'attached' state
self.attached = not self.attached
if self.attached:
if len(poses) > 0 and len(cursor) > 0:
# compute a tranform the from most recent saved pose to the newest cursor position
motion = Rhino.Geometry.Transform.PlaneToPlane(poses[-1], cursor[-1])
# compute an output transformation from the accumulated transform plus any transient movement
if self.transform is None:
self.transform = Rhino.Geometry.Transform(1) # identity
xform = self.transform * motion
self.motion = motion
if update == True:
self.logprint("Updating RhinoDoc selection with new poses.")
move = True
self.clear_edits()
return objects, guids, xform, move
#================================================================
def update_path_move_mode(self, gesture, cursor, poses, update, clear):
"""Update state for the 'path move' mode in which each gesture toggles the
enable, and the cursor velocity affects object positions within a 'brush'
radius.
returns objects, guids, xform, move
"""
# set default outputs
objects = self.selection
guids = self.docguids
move = False
delta = Rhino.Geometry.Transform(1) # motion transform is identity value by default
# FIXME: this is probably moot
if self.transform is None:
self.transform = Rhino.Geometry.Transform(1) # identity
if self.selection is not None and (self.xforms is None or len(self.xforms) != len(self.selection)):
self.xforms = [Rhino.Geometry.Transform(1) for x in self.selection]
if clear == True:
self.logprint("Abandoning editor changes (clearing movement).")
self.transform = Rhino.Geometry.Transform(1)
# detect the singular gesture events (for now, a flick of the wand)
if gesture is not None:
# if we are ending a motion segment
if self.attached:
self.logprint("Motion deactivated.")
else:
self.logprint("Motion activated.")
# toggle the 'attached' state
self.attached = not self.attached
if self.attached:
if len(cursor) > 1 and cursor[-1] is not None and cursor[-2] is not None:
# Compute separate translation and rotation thresholds to
# determine whether the velocity is high enough to be a gesture.
# Find the rotation and translation between the last pair of samples:
rot = Rhino.Geometry.Quaternion.Rotation(cursor[-2], cursor[-1])
delta = cursor[-1].Origin - cursor[-2].Origin
displacement = delta.Length
# Convert the rotation to axis-angle form to find the magnitude. The function uses C# call by reference to return
# the parameters as 'out' values:
angle = clr.Reference[float]()
axis = clr.Reference[Rhino.Geometry.Vector3d]()
rot.GetRotation(angle, axis)
angle = angle.Value # get rid of the StrongBox around the number
axis = axis.Value # get rid of the StrongBox around the vector
# The angle is returned on (0,2*pi); manage the wraparound
if angle > math.pi:
angle -= 2*math.pi
# normalize to a velocity measure: m/sec, radians/sec
speed = displacement / self.mocap_dt
omega = angle / self.mocap_dt
# compute object to cursor distances
boxes = [obj.GetBoundingBox(False) for obj in self.selection]
center = cursor[-1].Origin
distances = [box.Center.DistanceTo(center) for box in boxes]
# Apply thresholds to determine whether the gesture represents intentional motion:
if speed > 1.0 and True:
self.transform = self.transform * Rhino.Geometry.Transform.Translation(delta)
if abs(omega) > 2.0 and True:
# self.logprint("detected motion on speed %f and angular rate %f" % (speed, omega))
# apply the movement to the output tranform
# FIXME: transform should be a list, one per object, selective via a spherical cursor
# choose a specific method from the set of overloaded signatures
Rotation_Factory = Rhino.Geometry.Transform.Rotation.Overloads[float, Rhino.Geometry.Vector3d, Rhino.Geometry.Point3d]
rot_xform = Rotation_Factory(angle, axis, center)
self.transform = self.transform * rot_xform
# Apply a weighted displacement to each object transform. The scaling matches the rolloff of the
# effect to be proportional to the size of the bounding box of the moving objects.
scale = 0.1 * self.selection_bb_size * self.selection_bb_size
weights = [min(1.0, scale/(dist*dist)) if dist > 0.0 else 1.0 for dist in distances]
# self.logprint("Weights: %s" % (weights,))
rotations = [Rotation_Factory(angle*weight, axis, center) for weight in weights]
self.xforms = [xform*rot for xform,rot in zip(self.xforms, rotations)]
if update == True:
self.logprint("Updating RhinoDoc selection with new poses.")
move = True
self.clear_edits()
return objects, guids, self.xforms, move
################################################################
# create or re-create the editor state as needed
editor = sc.sticky.get(name)
if editor is None or reset:
editor = EditorLogic('Cards', all_names)
sc.sticky[name] = editor
# set default output values
add = False
move = False
names = None
newloc = None
objects = None
guids = None
xform = None
if reset:
print "Interaction logic in reset state."
status = "Reset"
else:
# for all modes, record the set of selected objects when indicated
editor.manage_user_selection(setselect, selection, selguids, all_names)
# handle the state update for each individual mode
if mode == 1:
newloc, names, add = editor.update_tap_create_mode(gesture, cursor, update, clear)
elif mode == 2:
newloc, names, add = editor.update_path_create_mode(gesture, cursor, update, clear, all_names, create_interval)
elif mode == 3:
objects, guids, xform, move = editor.update_block_move_mode(gesture, cursor, poses, update, clear)
elif mode == 4:
objects, guids, xform, move = editor.update_path_move_mode(gesture, cursor, poses, update, clear)
# emit terse status for remote panel
status = "M:%s C:%d P:%d N:%d" % (editor.attached, len(cursor), len(poses), len(editor.new_object_poses))
# re-emit the log output
for str in editor.log: print str
|
bsd-3-clause
| -4,635,597,503,302,952,000
| 40.170886
| 144
| 0.573615
| false
| 4.347325
| false
| false
| false
|
marianotepper/nmu_rfit
|
rnmu/pme/sampling.py
|
1
|
2096
|
from __future__ import absolute_import
import numpy as np
import collections
try:
from itertools import imap
except ImportError:
imap = map
try:
from functools import reduce
except ImportError:
pass
class SampleSet(collections.MutableSet):
def __init__(self):
self._dict = {}
self._len = 0
def __contains__(self, sample):
try:
return reduce(lambda d, k: d[k], sample, self._dict)
except KeyError:
return False
def __len__(self):
return self._len
def add(self, sample):
d = self._dict
for i, s in enumerate(sample):
if i == len(sample) - 1:
d[s] = True
continue
if s not in d:
d[s] = {}
d = d[s]
self._len += 1
def discard(self, sample):
pass
def __iter__(self):
pass
class UniformSampler(object):
def __init__(self, n_samples=None, seed=None):
self.n_samples = n_samples
self.sample_set = SampleSet()
if seed is not None:
np.random.seed(seed)
def generate(self, x, min_sample_size):
n_elements = len(x)
all_elems = np.arange(n_elements)
for _ in range(self.n_samples):
sample = np.random.choice(all_elems, size=min_sample_size,
replace=False)
if sample not in self.sample_set:
self.sample_set.add(sample)
yield sample
class ModelGenerator(object):
def __init__(self, model_class, sampler):
self._sampler = sampler
self.model_class = model_class
self.elements = None
@property
def n_samples(self):
return self._sampler.n_samples
def __iter__(self):
def generate(s):
ms_set = np.take(self.elements, s, axis=0)
return self.model_class(ms_set)
samples = self._sampler.generate(self.elements,
self.model_class().min_sample_size)
return imap(generate, samples)
|
bsd-3-clause
| -3,478,362,334,029,783,600
| 25.2
| 76
| 0.537214
| false
| 4.038536
| false
| false
| false
|
wimberosa/samba
|
source4/scripting/python/samba/netcmd/common.py
|
1
|
2466
|
#!/usr/bin/env python
#
# common functions for samba-tool python commands
#
# Copyright Andrew Tridgell 2010
# Copyright Giampaolo Lauria 2011 <lauria2@yahoo.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import re
from samba.dcerpc import nbt
from samba.net import Net
def _get_user_realm_domain(user):
""" get the realm or the domain and the base user
from user like:
* username
* DOMAIN\username
* username@REALM
"""
baseuser = user
realm = ""
domain = ""
m = re.match(r"(\w+)\\(\w+$)", user)
if m:
domain = m.group(1)
baseuser = m.group(2)
return (baseuser.lower(), domain.upper(), realm)
m = re.match(r"(\w+)@(\w+)", user)
if m:
baseuser = m.group(1)
realm = m.group(2)
return (baseuser.lower(), domain, realm.upper())
def netcmd_dnsname(lp):
'''return the full DNS name of our own host. Used as a default
for hostname when running status queries'''
return lp.get('netbios name').lower() + "." + lp.get('realm').lower()
def netcmd_finddc(lp, creds, realm=None):
'''Return domain-name of a writable/ldap-capable DC for the default
domain (parameter "realm" in smb.conf) unless another realm has been
specified as argument'''
net = Net(creds=creds, lp=lp)
if realm is None:
realm = lp.get('realm')
cldap_ret = net.finddc(domain=realm,
flags=nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_DS | nbt.NBT_SERVER_WRITABLE)
return cldap_ret.pdc_dns_name
def netcmd_get_domain_infos_via_cldap(lp, creds, address=None):
'''Return domain informations (CLDAP record) of the ldap-capable
DC with the specified address'''
net = Net(creds=creds, lp=lp)
cldap_ret = net.finddc(address=address,
flags=nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_DS)
return cldap_ret
|
gpl-3.0
| 3,366,601,980,079,461,000
| 32.780822
| 88
| 0.663017
| false
| 3.42025
| false
| false
| false
|
jptomo/rpython-lang-scheme
|
rpython/rlib/parsing/makepackrat.py
|
1
|
24419
|
from __future__ import with_statement
import py
import sys
from rpython.rlib.parsing.tree import Nonterminal, Symbol, RPythonVisitor
from rpython.rlib.parsing.codebuilder import Codebuilder
from rpython.rlib.objectmodel import we_are_translated
class BacktrackException(Exception):
def __init__(self, error=None):
self.error = error
if not we_are_translated():
Exception.__init__(self, error)
class TreeOptimizer(RPythonVisitor):
def visit_or(self, t):
if len(t.children) == 1:
return self.dispatch(t.children[0])
return self.general_nonterminal_visit(t)
visit_commands = visit_or
def visit_negation(self, t):
child = self.dispatch(t.children[0])
if child.symbol == "negation":
child.symbol = "lookahead"
return child
t.children[0] = child
return t
def general_nonterminal_visit(self, t):
for i in range(len(t.children)):
t.children[i] = self.dispatch(t.children[i])
return t
def general_visit(self, t):
return t
syntax = r"""
NAME:
`[a-zA-Z_][a-zA-Z0-9_]*`;
SPACE:
' ';
COMMENT:
`( *#[^\n]*\n)+`;
IGNORE:
`(#[^\n]*\n)|\n|\t| `;
newline:
COMMENT
| `( *\n *)*`;
REGEX:
r = `\`[^\\\`]*(\\.[^\\\`]*)*\``
return {Symbol('REGEX', r, None)};
QUOTE:
r = `'[^\']*'`
return {Symbol('QUOTE', r, None)};
PYTHONCODE:
r = `\{[^\n\}]*\}`
return {Symbol('PYTHONCODE', r, None)};
EOF:
!__any__;
file:
IGNORE*
list
[EOF];
list:
content = production+
return {Nonterminal('list', content)};
production:
name = NAME
SPACE*
args = productionargs
':'
IGNORE*
what = or_
IGNORE*
';'
IGNORE*
return {Nonterminal('production', [name, args, what])};
productionargs:
'('
IGNORE*
args = (
NAME
[
IGNORE*
','
IGNORE*
]
)*
arg = NAME
IGNORE*
')'
IGNORE*
return {Nonterminal('productionargs', args + [arg])}
| return {Nonterminal('productionargs', [])};
or_:
l = (commands ['|' IGNORE*])+
last = commands
return {Nonterminal('or', l + [last])}
| commands;
commands:
cmd = command
newline
cmds = (command [newline])+
return {Nonterminal('commands', [cmd] + cmds)}
| command;
command:
simplecommand;
simplecommand:
return_
| if_
| named_command
| repetition
| choose
| negation;
return_:
'return'
SPACE*
code = PYTHONCODE
IGNORE*
return {Nonterminal('return', [code])};
if_:
'do'
newline
cmd = command
SPACE*
'if'
SPACE*
condition = PYTHONCODE
IGNORE*
return {Nonterminal('if', [cmd, condition])}
| 'if'
SPACE*
condition = PYTHONCODE
IGNORE*
return {Nonterminal('if', [condition])};
choose:
'choose'
SPACE*
name = NAME
SPACE*
'in'
SPACE*
expr = PYTHONCODE
IGNORE*
cmds = commands
return {Nonterminal('choose', [name, expr, cmds])};
commandchain:
result = simplecommand+
return {Nonterminal('commands', result)};
named_command:
name = NAME
SPACE*
'='
SPACE*
cmd = command
return {Nonterminal('named_command', [name, cmd])};
repetition:
what = enclosed
SPACE* '?' IGNORE*
return {Nonterminal('maybe', [what])}
| what = enclosed
SPACE*
repetition = ('*' | '+')
IGNORE*
return {Nonterminal('repetition', [repetition, what])};
negation:
'!'
SPACE*
what = negation
IGNORE*
return {Nonterminal('negation', [what])}
| enclosed;
enclosed:
'<'
IGNORE*
what = primary
IGNORE*
'>'
IGNORE*
return {Nonterminal('exclusive', [what])}
| '['
IGNORE*
what = or_
IGNORE*
']'
IGNORE*
return {Nonterminal('ignore', [what])}
| ['(' IGNORE*] or_ [')' IGNORE*]
| primary;
primary:
call | REGEX [IGNORE*] | QUOTE [IGNORE*];
call:
x = NAME
args = arguments
IGNORE*
return {Nonterminal("call", [x, args])};
arguments:
'('
IGNORE*
args = (
PYTHONCODE
[IGNORE* ',' IGNORE*]
)*
last = PYTHONCODE
')'
IGNORE*
return {Nonterminal("args", args + [last])}
| return {Nonterminal("args", [])};
"""
class ErrorInformation(object):
def __init__(self, pos, expected=None):
if expected is None:
expected = []
self.expected = expected
self.pos = pos
def __str__(self):
return "ErrorInformation(%s, %s)" % (self.pos, self.expected)
def get_line_column(self, source):
pos = self.pos
assert pos >= 0
uptoerror = source[:pos]
lineno = uptoerror.count("\n")
columnno = pos - uptoerror.rfind("\n")
return lineno, columnno
def nice_error_message(self, filename='<filename>', source=""):
if source:
lineno, columnno = self.get_line_column(source)
result = [" File %s, line %s" % (filename, lineno + 1)]
result.append(source.split("\n")[lineno])
result.append(" " * columnno + "^")
else:
result.append("<couldn't get source>")
if self.expected:
failure_reasons = self.expected
if len(failure_reasons) > 1:
all_but_one = failure_reasons[:-1]
last = failure_reasons[-1]
expected = "%s or '%s'" % (
", ".join(["'%s'" % e for e in all_but_one]), last)
else:
expected = failure_reasons[0]
result.append("ParseError: expected %s" % (expected, ))
else:
result.append("ParseError")
return "\n".join(result)
class Status(object):
# status codes:
NORMAL = 0
ERROR = 1
INPROGRESS = 2
LEFTRECURSION = 3
SOMESOLUTIONS = 4
_annspecialcase_ = 'specialize:ctr_location' # polymorphic
def __repr__(self):
return "Status(%s, %s, %s, %s)" % (self.pos, self.result, self.error,
self.status)
def __init__(self):
self.pos = 0
self.error = None
self.status = self.INPROGRESS
self.result = None
class ParserBuilder(RPythonVisitor, Codebuilder):
def __init__(self):
Codebuilder.__init__(self)
self.initcode = []
self.names = {}
self.matchers = {}
def make_parser(self):
m = {'Status': Status,
'Nonterminal': Nonterminal,
'Symbol': Symbol,}
exec py.code.Source(self.get_code()).compile() in m
return m['Parser']
def memoize_header(self, name, args):
dictname = "_dict_%s" % (name, )
self.emit_initcode("self.%s = {}" % (dictname, ))
if args:
self.emit("_key = (self._pos, %s)" % (", ".join(args)))
else:
self.emit("_key = self._pos")
self.emit("_status = self.%s.get(_key, None)" % (dictname, ))
with self.block("if _status is None:"):
self.emit("_status = self.%s[_key] = Status()" % (
dictname, ))
with self.block("else:"):
self.emit("_statusstatus = _status.status")
with self.block("if _statusstatus == _status.NORMAL:"):
self.emit("self._pos = _status.pos")
self.emit("return _status")
with self.block("elif _statusstatus == _status.ERROR:"):
self.emit("raise BacktrackException(_status.error)")
if self.have_call:
with self.block(
"elif (_statusstatus == _status.INPROGRESS or\n"
" _statusstatus == _status.LEFTRECURSION):"):
self.emit("_status.status = _status.LEFTRECURSION")
with self.block("if _status.result is not None:"):
self.emit("self._pos = _status.pos")
self.emit("return _status")
with self.block("else:"):
self.emit("raise BacktrackException(None)")
with self.block(
"elif _statusstatus == _status.SOMESOLUTIONS:"):
self.emit("_status.status = _status.INPROGRESS")
self.emit("_startingpos = self._pos")
self.start_block("try:")
self.emit("_result = None")
self.emit("_error = None")
def memoize_footer(self, name, args):
dictname = "_dict_%s" % (name, )
if self.have_call:
with self.block(
"if _status.status == _status.LEFTRECURSION:"):
with self.block("if _status.result is not None:"):
with self.block("if _status.pos >= self._pos:"):
self.emit("_status.status = _status.NORMAL")
self.emit("self._pos = _status.pos")
self.emit("return _status")
self.emit("_status.pos = self._pos")
self.emit("_status.status = _status.SOMESOLUTIONS")
self.emit("_status.result = %s" % (self.resultname, ))
self.emit("_status.error = _error")
self.emit("self._pos = _startingpos")
self.emit("return self._%s(%s)" % (name, ', '.join(args)))
else:
self.emit("assert _status.status != _status.LEFTRECURSION")
self.emit("_status.status = _status.NORMAL")
self.emit("_status.pos = self._pos")
self.emit("_status.result = %s" % (self.resultname, ))
self.emit("_status.error = _error")
self.emit("return _status")
self.end_block("try")
with self.block("except BacktrackException, _exc:"):
self.emit("_status.pos = -1")
self.emit("_status.result = None")
self.combine_error('_exc.error')
self.emit("_status.error = _error")
self.emit("_status.status = _status.ERROR")
self.emit("raise BacktrackException(_error)")
def choice_point(self, name=None):
var = "_choice%s" % (self.namecount, )
self.namecount += 1
self.emit("%s = self._pos" % (var, ))
return var
def revert(self, var):
self.emit("self._pos = %s" % (var, ))
def visit_list(self, t):
self.start_block("class Parser(object):")
for elt in t.children:
self.dispatch(elt)
with self.block("def __init__(self, inputstream):"):
for line in self.initcode:
self.emit(line)
self.emit("self._pos = 0")
self.emit("self._inputstream = inputstream")
if self.matchers:
self.emit_regex_code()
self.end_block("class")
def emit_regex_code(self):
for regex, matcher in self.matchers.iteritems():
with self.block(
"def _regex%s(self):" % (abs(hash(regex)), )):
c = self.choice_point()
self.emit("_runner = self._Runner(self._inputstream, self._pos)")
self.emit("_i = _runner.recognize_%s(self._pos)" % (
abs(hash(regex)), ))
self.start_block("if _runner.last_matched_state == -1:")
self.revert(c)
self.emit("raise BacktrackException")
self.end_block("if")
self.emit("_upto = _runner.last_matched_index + 1")
self.emit("_pos = self._pos")
self.emit("assert _pos >= 0")
self.emit("assert _upto >= 0")
self.emit("_result = self._inputstream[_pos: _upto]")
self.emit("self._pos = _upto")
self.emit("return _result")
with self.block("class _Runner(object):"):
with self.block("def __init__(self, text, pos):"):
self.emit("self.text = text")
self.emit("self.pos = pos")
self.emit("self.last_matched_state = -1")
self.emit("self.last_matched_index = -1")
self.emit("self.state = -1")
for regex, matcher in self.matchers.iteritems():
matcher = str(matcher).replace(
"def recognize(runner, i)",
"def recognize_%s(runner, i)" % (abs(hash(regex)), ))
self.emit(str(matcher))
def visit_production(self, t):
name = t.children[0]
if name in self.names:
raise Exception("name %s appears twice" % (name, ))
self.names[name] = True
otherargs = t.children[1].children
argswithself = ", ".join(["self"] + otherargs)
argswithoutself = ", ".join(otherargs)
with self.block("def %s(%s):" % (name, argswithself)):
self.emit("return self._%s(%s).result" % (name, argswithoutself))
self.start_block("def _%s(%s):" % (name, argswithself, ))
self.namecount = 0
self.resultname = "_result"
self.have_call = False
self.created_error = False
allother = self.store_code_away()
self.dispatch(t.children[-1])
subsequent = self.restore_code(allother)
self.memoize_header(name, otherargs)
self.add_code(subsequent)
self.memoize_footer(name, otherargs)
self.end_block("def")
def visit_or(self, t, first=False):
possibilities = t.children
if len(possibilities) > 1:
self.start_block("while 1:")
for i, p in enumerate(possibilities):
c = self.choice_point()
with self.block("try:"):
self.dispatch(p)
self.emit("break")
with self.block("except BacktrackException, _exc:"):
self.combine_error('_exc.error')
self.revert(c)
if i == len(possibilities) - 1:
self.emit("raise BacktrackException(_error)")
self.dispatch(possibilities[-1])
if len(possibilities) > 1:
self.emit("break")
self.end_block("while")
def visit_commands(self, t):
for elt in t.children:
self.dispatch(elt)
def visit_maybe(self, t):
c = self.choice_point()
with self.block("try:"):
self.dispatch(t.children[0])
with self.block("except BacktrackException:"):
self.revert(c)
def visit_repetition(self, t):
name = "_all%s" % (self.namecount, )
self.namecount += 1
self.emit("%s = []" % (name, ))
if t.children[0] == '+':
self.dispatch(t.children[1])
self.emit("%s.append(_result)" % (name, ))
with self.block("while 1:"):
c = self.choice_point()
with self.block("try:"):
self.dispatch(t.children[1])
self.emit("%s.append(_result)" % (name, ))
with self.block("except BacktrackException, _exc:"):
self.combine_error('_exc.error')
self.revert(c)
self.emit("break")
self.emit("_result = %s" % (name, ))
def visit_exclusive(self, t):
self.resultname = "_enclosed"
self.dispatch(t.children[0])
self.emit("_enclosed = _result")
def visit_ignore(self, t):
resultname = "_before_discard%i" % (self.namecount, )
self.namecount += 1
self.emit("%s = _result" % (resultname, ))
self.dispatch(t.children[0])
self.emit("_result = %s" % (resultname, ))
def visit_negation(self, t):
c = self.choice_point()
resultname = "_stored_result%i" % (self.namecount, )
self.namecount += 1
child = t.children[0]
self.emit("%s = _result" % (resultname, ))
with self.block("try:"):
self.dispatch(child)
with self.block("except BacktrackException:"):
self.revert(c)
self.emit("_result = %s" % (resultname, ))
with self.block("else:"):
# heuristic to get nice error messages sometimes
if isinstance(child, Symbol) and child.symbol == "QUOTE":
error = "self._ErrorInformation(%s, ['NOT %s'])" % (
c, child.additional_info[1:-1], )
else:
error = "None"
self.emit("raise BacktrackException(%s)" % (error, ))
def visit_lookahead(self, t):
resultname = "_stored_result%i" % (self.namecount, )
self.emit("%s = _result" % (resultname, ))
c = self.choice_point()
self.dispatch(t.children[0])
self.revert(c)
self.emit("_result = %s" % (resultname, ))
def visit_named_command(self, t):
name = t.children[0]
self.dispatch(t.children[1])
self.emit("%s = _result" % (name, ))
def visit_return(self, t):
self.emit("_result = (%s)" % (t.children[0].additional_info[1:-1], ))
def visit_if(self, t):
if len(t.children) == 2:
self.dispatch(t.children[0])
with self.block("if not (%s):" % (
t.children[-1].additional_info[1:-1], )):
self.emit("raise BacktrackException(")
self.emit(" self._ErrorInformation(")
self.emit(" _startingpos, ['condition not met']))")
def visit_choose(self, t):
with self.block("for %s in (%s):" % (
t.children[0], t.children[1].additional_info[1:-1], )):
with self.block("try:"):
self.dispatch(t.children[2])
self.emit("break")
with self.block("except BacktrackException, _exc:"):
self.combine_error('_exc.error')
with self.block("else:"):
self.emit("raise BacktrackException(_error)")
def visit_call(self, t):
self.have_call = True
args = ", ".join(['(%s)' % (arg.additional_info[1:-1], )
for arg in t.children[1].children])
if t.children[0].startswith("_"):
callname = t.children[0]
self.emit("_result = self.%s(%s)" % (callname, args))
else:
callname = "_" + t.children[0]
self.emit("_call_status = self.%s(%s)" % (callname, args))
self.emit("_result = _call_status.result")
self.combine_error('_call_status.error')
def visit_REGEX(self, t):
r = t.additional_info[1:-1].replace('\\`', '`')
matcher = self.get_regex(r)
self.emit("_result = self._regex%s()" % (abs(hash(r)), ))
def visit_QUOTE(self, t):
self.emit("_result = self.__chars__(%r)" % (
str(t.additional_info[1:-1]), ))
def get_regex(self, r):
from rpython.rlib.parsing.regexparse import parse_regex
if r in self.matchers:
return self.matchers[r]
regex = parse_regex(r)
if regex is None:
raise ValueError(
"%s is not a valid regular expression" % regextext)
automaton = regex.make_automaton().make_deterministic()
automaton.optimize()
matcher = automaton.make_lexing_code()
self.matchers[r] = py.code.Source(matcher)
return matcher
def combine_error(self, newerror):
if self.created_error:
self.emit(
"_error = self._combine_errors(_error, %s)" % (newerror, ))
else:
self.emit("_error = %s" % (newerror, ))
self.created_error = True
class MetaPackratParser(type):
def __new__(cls, name_, bases, dct):
if '__doc__' not in dct or dct['__doc__'] is None:
return type.__new__(cls, name_, bases, dct)
from pypackrat import PyPackratSyntaxParser
import sys, new, inspect
frame = sys._getframe(1)
source = dct['__doc__']
p = PyPackratSyntaxParser(source)
try:
t = p.file()
except BacktrackException, exc:
print exc.error.nice_error_message("<docstring>", source)
lineno, _ = exc.error.get_line_column(source)
errorline = source.split("\n")[lineno]
try:
code = frame.f_code
source = inspect.getsource(code)
lineno_in_orig = source.split("\n").index(errorline)
if lineno_in_orig >= 0:
print "probable error position:"
print "file:", code.co_filename
print "line:", lineno_in_orig + code.co_firstlineno + 1
except (IOError, ValueError):
pass
raise exc
t = t.visit(TreeOptimizer())
visitor = ParserBuilder()
t.visit(visitor)
pcls = visitor.make_parser()
forbidden = dict.fromkeys(("__weakref__ __doc__ "
"__dict__ __module__").split())
initthere = "__init__" in dct
#XXX XXX XXX
if 'BacktrackException' not in frame.f_globals:
raise Exception("must import BacktrackException")
if 'Status' not in frame.f_globals:
raise Exception("must import Status")
result = type.__new__(cls, name_, bases, dct)
for key, value in pcls.__dict__.iteritems():
if isinstance(value, type):
value.__module__ = result.__module__ #XXX help the annotator
if isinstance(value, type(lambda: None)):
value = new.function(value.func_code, frame.f_globals)
if not hasattr(result, key) and key not in forbidden:
setattr(result, key, value)
if result.__init__ == object.__init__:
result.__init__ = pcls.__dict__['__init__']
result.init_parser = pcls.__dict__['__init__']
result._code = visitor.get_code()
return result
class PackratParser(object):
__metaclass__ = MetaPackratParser
_ErrorInformation = ErrorInformation
_BacktrackException = BacktrackException
def __chars__(self, chars):
#print '__chars__(%s)' % (chars, ), self._pos
try:
for i in range(len(chars)):
if self._inputstream[self._pos + i] != chars[i]:
raise BacktrackException(
self._ErrorInformation(self._pos, [chars]))
self._pos += len(chars)
return chars
except IndexError:
raise BacktrackException(
self._ErrorInformation(self._pos, [chars]))
def __any__(self):
try:
result = self._inputstream[self._pos]
self._pos += 1
return result
except IndexError:
raise BacktrackException(
self._ErrorInformation(self._pos, ['anything']))
def _combine_errors(self, error1, error2):
if error1 is None:
return error2
if (error2 is None or error1.pos > error2.pos or
len(error2.expected) == 0):
return error1
elif error2.pos > error1.pos or len(error1.expected) == 0:
return error2
expected = []
already_there = {}
for ep in [error1.expected, error2.expected]:
for reason in ep:
if reason not in already_there:
already_there[reason] = True
expected.append(reason)
return ErrorInformation(error1.pos, expected)
def test_generate():
f = py.path.local(__file__).dirpath().join("pypackrat.py")
from pypackrat import PyPackratSyntaxParser
p = PyPackratSyntaxParser(syntax)
t = p.file()
t = t.visit(TreeOptimizer())
visitor = ParserBuilder()
t.visit(visitor)
code = visitor.get_code()
content = """
from rpython.rlib.parsing.tree import Nonterminal, Symbol
from makepackrat import PackratParser, BacktrackException, Status
%s
class PyPackratSyntaxParser(PackratParser):
def __init__(self, stream):
self.init_parser(stream)
forbidden = dict.fromkeys(("__weakref__ __doc__ "
"__dict__ __module__").split())
initthere = "__init__" in PyPackratSyntaxParser.__dict__
for key, value in Parser.__dict__.iteritems():
if key not in PyPackratSyntaxParser.__dict__ and key not in forbidden:
setattr(PyPackratSyntaxParser, key, value)
PyPackratSyntaxParser.init_parser = Parser.__init__.im_func
""" % (code, )
print content
f.write(content)
|
mit
| -3,646,075,986,021,694,500
| 31.645722
| 81
| 0.528113
| false
| 3.803583
| false
| false
| false
|
lkmnds/dickord
|
console.py
|
1
|
1195
|
import logging
import traceback
import asyncio
import requests
import dickord
route = dickord.route
import config
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('userbot')
benis = dickord.Dicker(user_pass=('luna@localhost', 'fuck'))
@benis.sensor('READY')
async def ready_for_work(payload):
u = benis.user
logger.info(f"We are ready! name = {u.username}#{u.discriminator}, id = {u.id}")
logger.info("Requesting channel")
req = requests.get(route('channels/150501171201'), headers=benis.http.headers)
print(req)
print(req.json())
await asyncio.sleep(1)
logger.info('aaaa')
await benis.select_ass('portal 2 pinball')
await asyncio.sleep(0.5)
logger.info('sending typing')
req = requests.post(route('channels/150501171201/typing'), headers=benis.http.headers)
print(req)
print(req.text)
await asyncio.sleep(1)
logger.info('meme')
req = await benis.http.insert_benis('channels/150501171201/messages', \
{'content': 'meme'})
print(req)
print(req.text)
res, err_msg = benis.infinite_insert()
if not res:
print(f"Errored somewhere: {err_msg}")
else:
print("Exited with success")
|
mit
| -1,654,092,541,386,538,200
| 24.425532
| 90
| 0.68954
| false
| 3.111979
| false
| false
| false
|
samuelefiorini/cgm-tools
|
scripts/run_kf.py
|
1
|
2960
|
"""KF experiments development."""
from cgmtools import utils
from cgmtools import plotting
from cgmtools.forecast import kf
import datetime
import numpy as np
import pickle as pkl
###############################################################################
# Load full data set from pickle file (see data_wrangler.py)
dfs_full = pkl.load(open('../../data/dfs_py3.pkl', 'rb'))
# Keep only patients with more than `THRESHOLD` days of CGM acquisition
_threshold = datetime.timedelta(days=3.5) # default
dfs = utils.filter_patients(dfs_full, _threshold)
burn_in = 300 # burn-in samples used to learn the best order via cv
n_splits = 15
ph = 18 # prediction horizon
# State-space model:
# transition matrix (double integration model)
F = np.array([[2, -1], [1, 0]])
# measures matrix
H = np.array([1, 0])
# Get patients list
patients = list(dfs.keys())
for idx in patients:
df = utils.gluco_extract(dfs[idx], return_df=True)
# Learn the best order via cv
# lambda2_range = np.logspace(-12, -4, 10)
lambda2_range = np.logspace(-12, -4, 3)
sigma2_range = np.linspace(1, 40, 3)
# sigma2_range = np.linspace(1, 40, 10)
out = kf.grid_search(df, lambda2_range, sigma2_range, burn_in=burn_in,
n_splits=15, F=F, H=H,
return_mean_vld_error=True,
return_initial_state_mean=True,
return_initial_state_covariance=True,
verbose=False)
lambda2, sigma2, mse, X0, P0 = out
print("[{}]:\tBest lambda {:2.12f}, sigma {:2.0f}".format(idx, lambda2,
sigma2))
Q = np.array([[lambda2, 0], [0, 0]]) # transition_covariance
R = sigma2 # observation (co)variance
df = df.iloc[burn_in:] # don't mix-up training/test
_kf = kf.cgmkalmanfilter(F=F, Q=Q, R=R, X0=X0, P0=P0)
errs, forecast = kf.online_forecast(df, _kf, H, ph=18, lambda2=lambda2,
sigma2=sigma2, verbose=True)
# Save results reports
error_summary = utils.forecast_report(errs)
print(error_summary)
# import matplotlib.pyplot as plt
# plotting.cgm(df, forecast['ts'], title='Patient '+idx,
# savefig=False)
# plotting.residuals(df, forecast['ts'], skip_first=burn_in,
# skip_last=ph, title='Patient '+idx,
# savefig=False)
# plt.show()
# break
# # dump it into a pkl
pkl.dump(error_summary, open(idx+'.pkl', 'wb'))
try:
# Plot signal and its fit
plotting.cgm(df, forecast['ts'], title='Patient '+idx,
savefig=True)
# Plot residuals
plotting.residuals(df, forecast['ts'], skip_first=burn_in,
skip_last=ph, title='Patient '+idx,
savefig=True)
except:
print("Plotting failed for patient {}".format(idx))
|
gpl-3.0
| 4,648,840,799,209,392,000
| 35.54321
| 79
| 0.565203
| false
| 3.406214
| false
| false
| false
|
ftomassetti/plaid
|
app/views/decorators.py
|
1
|
1552
|
from functools import wraps
from flask import render_template, request, url_for
from app.models import PatchState
def filterable(f):
"""Filter a query"""
@wraps(f)
def wrapped(*args, **kwargs):
d = f(*args, **kwargs)
q = d['query']
state = request.args.get('state', None, type=str)
if state:
q = q.filter_by(state=PatchState.from_string(state))
# add more filters later
d['query'] = q
return d
return wrapped
def paginable(pagename, max_per_page=50):
"""Paginate a query"""
def decorator(f):
@wraps(f)
def wrapped(*args, **kwargs):
d = f(*args, **kwargs)
q = d['query']
page = request.args.get('page', 1, type=int)
per_page = request.args.get('per_page', max_per_page, type=int)
p = q.paginate(page, per_page, False)
if not p.items:
d['page'] = None
d[pagename] = q.paginate(1, per_page, False)
else:
d[pagename] = p
return d
return wrapped
return decorator
def render(template):
"""render a query"""
def decorator(f):
@wraps(f)
def wrapped(*args, **kwargs):
d = f(*args, **kwargs)
def endpoint(**up):
kwargs.update(up)
return url_for(request.endpoint, **kwargs)
d['endpoint'] = endpoint
return render_template(template, **d)
return wrapped
return decorator
|
gpl-2.0
| -1,112,586,424,140,818,000
| 24.442623
| 75
| 0.52384
| false
| 3.860697
| false
| false
| false
|
citizenlabsgr/voter-engagement
|
api/core/management/commands/gendata.py
|
1
|
4551
|
import random
from contextlib import suppress
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.management.base import BaseCommand
from django.db.utils import IntegrityError
from faker import Faker
from api.elections.models import Election
from api.voters.models import Status, Voter
User = get_user_model()
fake = Faker()
def p(value):
return value > random.random()
class Command(BaseCommand):
help = "Generate data for automated testing and manual review"
def add_arguments(self, parser):
parser.add_argument(
'emails',
nargs='?',
type=lambda value: value.split(','),
default=[],
)
def handle(self, *, emails, **_options): # pylint: disable=arguments-differ
self.update_site()
admin = self.get_or_create_superuser()
users = [self.get_or_create_user(email) for email in emails]
self.generate_review_data(admin, *users)
def update_site(self):
site = Site.objects.get(id=1)
site.name = f"Voter Engagement {settings.BASE_NAME}"
site.domain = settings.BASE_DOMAIN
site.save()
self.stdout.write(f"Updated site: {site}")
def get_or_create_superuser(self, username="admin", password="password"):
try:
user = User.objects.create_superuser(
username=username,
email=f"{username}@{settings.BASE_DOMAIN}",
password=password,
)
self.stdout.write(f"Created new superuser: {user}")
except IntegrityError:
user = User.objects.get(username=username)
self.stdout.write(f"Found existing superuser: {user}")
return user
def get_or_create_user(self, base_email, password="password"):
username, email_domain = base_email.split('@')
user, created = User.objects.get_or_create(username=username)
user.email = f"{username}+{settings.BASE_NAME}@{email_domain}"
user.set_password(password)
user.save()
if created:
self.stdout.write(f"Created new user: {user}")
else:
self.stdout.write(f"Update user: {user}")
return user
def generate_review_data(self, *_users):
while User.objects.count() < 10:
with suppress(IntegrityError):
username = fake.name().replace(' ', '')
user = User.objects.create(
username=username.lower() if p(0.30) else username,
email=fake.email(),
first_name=fake.first_name(),
last_name=fake.last_name(),
)
self.stdout.write(f"Created user: {user}")
while Election.objects.count() < 5:
with suppress(IntegrityError):
name, date = self.fake_election()
election = Election.objects.create(
name=name,
date=date,
)
self.stdout.write(f"Created election: {election}")
while Voter.objects.count() < 50:
with suppress(IntegrityError):
voter = Voter.objects.create(
first_name=fake.first_name(),
last_name=fake.last_name(),
birth_date=fake.date(),
zip_code=fake.zipcode(),
email=fake.email(),
)
self.stdout.write(f"Created voter: {voter}")
while Status.objects.count() < 50:
with suppress(IntegrityError):
status = Status.objects.create(
voter=self.random_voter(),
election=self.random_election(),
registered=True if p(0.90) else None,
read_sample_ballot=True if p(0.80) else None,
located_polling_location=True if p(0.70) else None,
voted=True if p(0.60) else None,
)
self.stdout.write(f"Created status: {status}")
@staticmethod
def fake_election():
date = fake.future_date(end_date="+2y")
kind = random.choice(["General", "Midterm", "Special"])
return f"{date.year} {kind} Election", date
@staticmethod
def random_voter():
return random.choice(Voter.objects.all())
@staticmethod
def random_election():
return random.choice(Election.objects.all())
|
mit
| -1,028,694,120,705,746,800
| 32.711111
| 80
| 0.566249
| false
| 4.202216
| false
| false
| false
|
RCPRG-ros-pkg/control_subsystem
|
common/set_big_stiffness.py
|
1
|
5158
|
#!/usr/bin/env python
# Copyright (c) 2014, Robot Control and Pattern Recognition Group, Warsaw University of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Warsaw University of Technology nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYright HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import roslib
roslib.load_manifest('velma_controller')
import rospy
from geometry_msgs.msg import *
from cartesian_trajectory_msgs.msg import *
import actionlib
from actionlib_msgs.msg import *
import tf
import tf_conversions.posemath as pm
import PyKDL
def moveImpedance(k, t):
global action_impedance_client
action_impedance_goal = CartesianImpedanceGoal()
action_impedance_goal.trajectory.header.stamp = rospy.Time.now() + rospy.Duration(0.2)
action_impedance_goal.trajectory.points.append(CartesianImpedanceTrajectoryPoint(
rospy.Duration(t),
CartesianImpedance(k,Wrench(Vector3(0.7, 0.7, 0.7),Vector3(0.7, 0.7, 0.7)))))
action_impedance_client.send_goal(action_impedance_goal)
def moveWrist( wrist_frame, tool_frame, t, max_wrench):
global action_trajectory_client
# we are moving the tool, so: T_B_Wd*T_W_T
wrist_pose = pm.toMsg(wrist_frame*tool_frame)
action_trajectory_goal = CartesianTrajectoryGoal()
action_trajectory_goal.trajectory.header.stamp = rospy.Time.now() + rospy.Duration(0.01)
action_trajectory_goal.trajectory.points.append(CartesianTrajectoryPoint(
rospy.Duration(t),
wrist_pose,
Twist()))
action_trajectory_goal.wrench_constraint = max_wrench
action_trajectory_client.send_goal(action_trajectory_goal)
def moveTool(tool_frame, t):
global action_tool_client
tool_pose = pm.toMsg(tool_frame)
action_tool_goal = CartesianTrajectoryGoal()
action_tool_goal.trajectory.header.stamp = rospy.Time.now()
action_tool_goal.trajectory.points.append(CartesianTrajectoryPoint(
rospy.Duration(t),
tool_pose,
Twist()))
action_tool_client.send_goal(action_tool_goal)
if __name__ == '__main__':
a = []
for arg in sys.argv:
a.append(arg)
if (len(a) > 1) and ((a[1]=="left") or ("right")):
prefix = a[1]
else:
print "Usage: %s prefix"%a[0]
exit(0)
rospy.init_node('impedance_riser')
listener = tf.TransformListener();
action_impedance_client = actionlib.SimpleActionClient("/" + prefix + "_arm/cartesian_impedance", CartesianImpedanceAction)
action_impedance_client.wait_for_server()
action_trajectory_client = actionlib.SimpleActionClient("/" + prefix + "_arm/cartesian_trajectory", CartesianTrajectoryAction)
action_trajectory_client.wait_for_server()
action_tool_client = actionlib.SimpleActionClient("/" + prefix + "_arm/tool_trajectory", CartesianTrajectoryAction)
action_tool_client.wait_for_server()
rospy.sleep(1.0)
# save current wrist position
time_now = rospy.Time.now() - rospy.Duration(1.0)
listener.waitForTransform('torso_base', prefix+'_arm_7_link', time_now, rospy.Duration(4.0))
pose = listener.lookupTransform('torso_base', prefix+'_arm_7_link', time_now)
T_B_W = pm.fromTf(pose)
T_W_T = PyKDL.Frame() # tool transformation
print "setting the tool to %s relative to wrist frame"%(T_W_T)
# move both tool position and wrist position - the gripper holds its position
print "moving wrist"
# we assume that during the initialization there are no contact forces, so we limit the wrench
moveWrist( T_B_W, T_W_T, 2.0, Wrench(Vector3(20, 20, 20), Vector3(4, 4, 4)) )
print "moving tool"
moveTool( T_W_T, 2.0 )
rospy.sleep(2.0)
# change the stiffness
print "changing stiffness for door approach"
moveImpedance(Wrench(Vector3(1200.0, 1200.0, 1200.0), Vector3(300.0, 300.0, 300.0)), 4.0)
rospy.sleep(4.0)
|
bsd-3-clause
| 521,373,026,289,866,900
| 40.596774
| 130
| 0.722373
| false
| 3.504076
| false
| false
| false
|
boada/planckClusters
|
MOSAICpipe/bpz-1.99.3/priors/prior_hdfn_gen.py
|
1
|
2464
|
from __future__ import division
from past.utils import old_div
from bpz_tools import *
def function(z, m, nt):
"""HDFN prior from Benitez 2000
for Ellipticals, Spirals, and Irregular/Starbursts
Returns an array pi[z[:],:nt]
The input magnitude is F814W AB
"""
global zt_at_a
nz = len(z)
momin_hdf = 20.
if m > 32.: m = 32.
if m < 20.: m = 20.
# nt Templates = nell Elliptical + nsp Spiral + nSB starburst
try: # nt is a list of 3 values
nell, nsp, nsb = nt
except: # nt is a single value
nell = 1 # 1 Elliptical in default template set
nsp = 2 # 2 Spirals in default template set
nsb = nt - nell - nsp # rest Irr/SB
nn = nell, nsp, nsb
nt = sum(nn)
# See Table 1 of Benitez00
a = 2.465, 1.806, 0.906
zo = 0.431, 0.390, 0.0626
km = 0.0913, 0.0636, 0.123
k_t = 0.450, 0.147
a = repeat(a, nn)
zo = repeat(zo, nn)
km = repeat(km, nn)
k_t = repeat(k_t, nn[:2])
# Fractions expected at m = 20:
# 35% E/S0
# 50% Spiral
# 15% Irr
fo_t = 0.35, 0.5
fo_t = old_div(fo_t, array(nn[:2]))
fo_t = repeat(fo_t, nn[:2])
#fo_t = [0.35, 0.5]
#fo_t.append(1 - sum(fo_t))
#fo_t = array(fo_t) / array(nn)
#fo_t = repeat(fo_t, nn)
#print 'a', a
#print 'zo', zo
#print 'km', km
#print 'fo_t', fo_t
#print 'k_t', k_t
dm = m - momin_hdf
zmt = clip(zo + km * dm, 0.01, 15.)
zmt_at_a = zmt**(a)
#We define z**a as global to keep it
#between function calls. That way it is
# estimated only once
try:
xxx[9] = 3
zt_at_a.shape
except NameError:
zt_at_a = power.outer(z, a)
#Morphological fractions
nellsp = nell + nsp
f_t = zeros((len(a), ), float)
f_t[:nellsp] = fo_t * exp(-k_t * dm)
f_t[nellsp:] = old_div((1. - add.reduce(f_t[:nellsp])), float(nsb))
#Formula:
#zm=zo+km*(m_m_min)
#p(z|T,m)=(z**a)*exp(-(z/zm)**a)
p_i = zt_at_a[:nz, :nt] * exp(-clip(
old_div(zt_at_a[:nz, :nt], zmt_at_a[:nt]), 0., 700.))
#This eliminates the very low level tails of the priors
norm = add.reduce(p_i[:nz, :nt], 0)
p_i[:nz, :nt] = where(
less(
old_div(p_i[:nz, :nt], norm[:nt]), old_div(1e-2, float(nz))), 0.,
old_div(p_i[:nz, :nt], norm[:nt]))
norm = add.reduce(p_i[:nz, :nt], 0)
p_i[:nz, :nt] = p_i[:nz, :nt] / norm[:nt] * f_t[:nt]
return p_i
|
mit
| -1,657,587,623,260,758,800
| 27
| 77
| 0.520292
| false
| 2.491405
| false
| false
| false
|
pjiangtw/HOPE
|
WishCplex/WISHCPLEX.py
|
1
|
2949
|
#----------------------------------------------------------------------------------------
# Copyright, 2013:
#
# Stefano Ermon - Cornell University , ermonste@cs.cornell.edu
# Ashish Sabharwal - IBM Watson Research Center , ashish.sabharwal@us.ibm.com
#----------------------------------------------------------------------------------------
import sys
import math
import random
import os
import argparse
from WISHLogProcess import process_logs
from WISHLogProcess import process_logs_cplex_LB
from WISHLogProcess import process_logs_cplex_UB
# version number
__version__ = '1.0'
#########################################
# Usage Information:
# run "python WISH.py -h" for help
#########################################
parser = argparse.ArgumentParser(description='Estimate the partition function using the WISH algorithm and CPLEX for the optimization.')
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
parser.add_argument("infile", help="Graphical model (in UAI format)")
parser.add_argument("outfolder", help="Folder where logs are stored")
parser.add_argument('-alpha', '--alpha', type=float, help="Accuracy alpha", default=1.0)
parser.add_argument('-delta', '--delta', type=float, help="Failure probability delta", default=0.1)
parser.add_argument('-timeout', '--timeout', type=int, help="Timeout for each optimization instance (seconds)", default=10)
args = parser.parse_args()
print "Reading factor graph from " + args.infile
inputfile = open(args.infile, "r")
fileName, fileExtension = os.path.splitext(args.infile)
ind = 0
origNbrFactor = 0
origNbrVar = 0
for l in inputfile:
if not l.strip()=='':
ind = ind +1
if ind==2:
origNbrVar=int(l)
elif ind==3:
l = l.rstrip("\n")
elif ind==4: ## add xor cpt tabe
origNbrFactor = int(l)
elif ind>5:
break
print "Model with " + str(origNbrVar) + "variables and "+str(origNbrFactor) +" factors"
depth = origNbrVar
T = 7 #int(math.ceil(math.log(origNbrVar)*math.log(1.0/args.delta)/args.alpha))
print "Using " + str(T) +" samples per level"
os.system("mkdir "+args.outfolder)
for i in range(0,depth+1): ## main for loop
if i==0:
sampnum=1
else:
sampnum=T
for t in range(1,sampnum+1): ## main for loop
outfilenamelog = "%s.xor%d.loglen%d.%d.ILOGLUE.uai.LOG" % (os.path.basename(fileName) , i , 0 , t)
cmdline = ("timeout %d ./WH_cplex -paritylevel 1 -number %d -seed 10 %s > %s") % (args.timeout , i , args.infile , args.outfolder +"/"+ outfilenamelog)
os.system(cmdline)
## Parallel execution:
##
## assign this job to a separate core (a system dependent script is needed here)
## we provide an example based on Torque/PBS:
##
## os.system("qsub -v basedir="+basedir+",file="+infile+",level="+str(i)+",len="+str(0)+",outdir="+outdir+",sample="+str(t)+",timeout=900s"+" LaunchIloglue.sh")
process_logs_cplex_LB(args.outfolder)
process_logs_cplex_UB(args.outfolder)
|
apache-2.0
| 4,981,883,023,722,491,000
| 32.511364
| 162
| 0.631061
| false
| 3.137234
| false
| false
| false
|
mahyarap/httpclient
|
httpclient/httpclient.py
|
1
|
1653
|
#!/usr/bin/env python3
import sys
import argparse
from httpclient.http import HttpRequest
__version__ = '0.1.0'
def parse_cmd_options(args):
"""Parse the command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('url', help='URL to send the request to')
parser.add_argument('-m', '--method',
default='GET',
help='HTTP request method')
parser.add_argument('-H', '--header',
action='append',
default=[],
help='HTTP headers')
parser.add_argument('-v', '--verbose',
action='store_true',
help='be verbose')
parser.add_argument('-V', '--version',
action='version',
version='%(prog)s {}'.format(__version__),
help='show version and exit')
return parser.parse_args(args)
def main():
args = parse_cmd_options(sys.argv[1:])
if args.url:
headers = {}
for header in args.header:
key, val = header.split(':', maxsplit=1)
headers[key.strip()] = val.strip()
request = HttpRequest(args.url, method=args.method,
headers=headers)
response = request.send()
if args.verbose:
print(str(request))
if request.body is not None:
print(str(request.body))
print(str(response))
if response.body is not None:
print(response.body, end='')
return 0
if __name__ == '__main__':
sys.exit(main())
|
gpl-3.0
| 8,312,854,801,826,824,000
| 28
| 66
| 0.503932
| false
| 4.467568
| false
| false
| false
|
mackal/faction.py
|
faction3.py
|
1
|
35996
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2014 Michael Cook <mcook@mackal.net>
#
# GPLv3
"""
Processes an eqlog file and generates SQL to update factions
Should work with a full log, but cleaning up the log will be quicker
The file needs at least the zone enter messages, faction messages,
and slain messages in their full to work
IMPORTANT: faction messages from non-kills should be filtered out ...
File prep:
I just did a $ grep 'faction\\|slain\\|entered' on the log file
to clean up the log for processing
"""
import re, sys, os
import collections
# str to str so we don't have to worry about string cat
factiontable = {
"Agents of Dreadspire": "396",
"Agents of Mistmoore": "1",
"Agnarr": "2",
"Ak'Anon Strike Force V": "497",
"Akheva": "3",
"Allize Taeew": "4",
"Allize Volew": "5",
"Ancestors of the Crypt": "499",
"Ancestors of Valdeholm": "498",
"Anchorites of Brell Serilis": "6",
"Ancient Cyclops": "481",
"Ankhefenmut": "397",
"Anti-mage": "8",
"Antonius Bayle": "9",
"Arboreans of Faydark": "10",
"Arcane Scientists": "11",
"Army of Light": "494",
"Ashen Order": "12",
"Askr the Lost": "13",
"Aviak": "14",
"Banker": "15",
"Battalion of Marr": "16",
"Beetle": "457",
"Befallen Inhabitants": "17",
"Bertoxxulous": "382",
"Beta Neutral": "18",
"Betrayers of Di`Zok": "19",
"Bloodgills": "20",
"Bloodsabers": "21",
"Broken Skull Clan": "22",
"Brood of Di`Zok": "23",
"Brood of Kotiz": "24",
"Brood of Ssraeshza": "25",
"Brownie": "26",
"Burning Dead": "27",
"Burynai Legion": "28",
"Butcherblock Bandits": "29",
"Cabilis Residents": "30",
"Carson McCabe": "31",
"Cazic Thule": "368",
"Chetari": "32",
"Children of Dranik": "398",
"Circle Of Unseen Hands": "33",
"Citizens of Froststone": "399",
"Citizens of Gukta": "35",
"Citizens of Qeynos": "36",
"Citizens of Seru": "37",
"Citizens of Sharvahl": "483",
"Citizens of Takish-Hiz": "38",
"Clan Grikbar": "39",
"Clan Kolbok": "40",
"Clan Runnyeye": "41",
"Class 41": "377",
"Claws of Veeshan": "42",
"Cleaving Tooth Clan": "383",
"Clerics of Tunare": "43",
"Clerics of Underfoot": "44",
"Clockwork Gnome": "45",
"Clurg": "46",
"Coalition of Tradefolk": "47",
"Coalition of TradeFolk III": "369",
"Coalition of Tradefolk Underground": "48",
"Coldain": "49",
"Combine Empire": "50",
"Commons Residents": "51",
"Concillium Universus": "52",
"Corrupt Qeynos Guards": "53",
"Coterie Elite": "54",
"Coterie of the Eternal Night": "55",
"Craftkeepers": "56",
"Craknek Warriors": "57",
"Creatures of Darkhollow": "400",
"Creatures of Gloomingdeep": "401",
"Creatures of Justice": "58",
"Creatures of Taelosia": "59",
"Creep Reapers": "402",
"Crescent Guards": "493",
"Crimson Hands": "60",
"Critters of Jaggedpine": "61",
"Crusaders of Greenmist": "62",
"Crushbone Orcs": "63",
"Crystal Caverns Terrors/Spiders/Crawlers": "395",
"Cult of the Arisen": "64",
"Cult of the Great Saprophyte": "65",
"Cursed Drakes": "403",
"DaBashers": "66",
"Dain Frostreaver IV": "67",
"Dar Khura": "68",
"Dark Bargainers": "69",
"Dark Ones": "70",
"Dark Reflection": "71",
"Dark Reign": "404",
"Dark Sendings": "72",
"Darkpaws of Jaggedpine": "73",
"Dawnhoppers": "74",
"Death Fist Orcs": "405",
"Deathfist Orcs": "75",
"Deep Muses": "76",
"Deep Sporali": "406",
"Deeppockets": "77",
"Deepshade Collective": "78",
"Deepwater Knights": "79",
"Defective Clockwork": "80",
"Defenders of the Broodlands": "407",
"Defenders of the Haven": "81",
"Deklean Korgad": "408",
"Denizens of Discord": "409",
"Denizens of Fear": "82",
"Denizens of Mischief": "391",
"Dervish Cutthroats": "83",
"Disciples of Kerafyrm": "84",
"Disciples of Rhag`Zadune": "85",
"Dismal Rage": "86",
"Dranik Loyalists": "410",
"Dreadguard Inner": "87",
"Dreadguard Outer": "88",
"Drusella Sathir": "89",
"Dulaks Clan": "459",
"Ebon Mask": "90",
"Eldritch Collective": "91",
"Elementals": "374",
"Emerald Warriors": "92",
"Emperor Ssraeshza": "93",
"Erudite Citizen": "380",
"EvilEye": "94",
"Exiled Frogloks": "95",
"Expedition 328": "411",
"Eye of Seru": "96",
"Faerie": "97",
"Fallen Guard of Illsalin": "412",
"Fallen of Bloody Kithicor": "98",
"Faydarks Champions": "99",
"FelGuard": "100",
"Firiona Vie": "101",
"Fizzlethorp": "414",
"Fizzlethorpe": "102",
"Followers of Korucust": "103",
"Forgotten Guktan Spirits": "104",
"Free Traders of Malgrinnor": "415",
"The Freeport Militia": "105",
"Frogloks of Guk": "106",
"Frogloks of Krup": "107",
"Frogloks of Kunark": "108",
"Frogloks of Sebilis": "109",
"Frostfoot Goblins": "110",
"FungusMan": "111",
"Gate Callers": "112",
"Gate Keepers": "113",
"Gelistial": "114",
"Gem Choppers": "115",
"Geonid Collective": "116",
"Ghouls of Neriak": "117",
"Giant Spider": "386",
"Gladiators of Mata Muram": "416",
"Goblin": "118",
"Goblins of Cleaving Tooth": "119",
"Goblins of Fire Peak": "120",
"Goblins of Mountain Death": "121",
"Gor Taku": "122",
"Gralloks": "123",
"Greater Brann Giants": "124",
"Greater Jord Giants": "125",
"Greater Vann Giants": "126",
"Greater Vind Giants": "127",
"Green Blood Knights": "128",
"Greenfoot Goblins": "417",
"Grieg": "129",
"Grimlings of the Forest": "392",
"Grimlings of the Moor": "130",
"Grobb Merchants": "131",
"Guardians of Shar Vahl": "132",
"Guardians of the Vale": "133",
"Guardians of Veeshan": "134",
"Guards of Gloomingdeep": "475",
"Guards of Qeynos": "135",
"Guktan Elders": "136",
"Guktan Suppliers": "484",
"Gunthaks Clan": "458",
"Hall of the Ebon Mask": "137",
"Hand Legionnaries": "138",
"Hand of Seru": "139",
"Harbingers Clan": "373",
"Haven Defenders": "140",
"Haven Smugglers": "141",
"Heart of Seru": "142",
"Heretics": "143",
"Hexxt": "144",
"High Council of Erudin": "145",
"High Council of Gukta": "146",
"High Guard of Erudin": "147",
"HighHold Citizens": "148",
"Highpass Guards": "149",
"HoHMaiden": "471",
"Holgresh": "150",
"Horde of Xalgoz": "151",
"House of Fordel": "152",
"House of Midst": "153",
"House of Stout": "154",
"Iksar": "371",
"Indifferent": "463",
"Indigo Brotherhood": "155",
"Inhabitants of Air": "464",
"Inhabitants of Firiona Vie": "418",
"Inhabitants of Hate": "156",
"Inhabitants of Tanaan": "157",
"Innoruuk's Curse of the Cauldron": "158",
"Invaders of the Moor": "503",
"Jaggedpine Treefolk": "159",
"Jaled-Dar": "160",
"Johanius Barleou": "161",
"Kaladim Citizens": "162",
"Kaladim Merchants": "419",
"Kane Bayle": "164",
"Karana": "165",
"Karana Bandits": "166",
"Karana Residents": "167",
"Katta Castellum Citizens": "168",
"Kazon Stormhammer": "169",
"Kedge": "420",
"Keepers of the Art": "170",
"Keepers of the Claw": "171",
"Kejek Village": "172",
"Kejekan": "173",
"Kelethin Merchants": "174",
"Kerra": "421",
"Kerra Isle": "175",
"Kessdona": "422",
"Khati Sha": "423",
"King Ak'Anon": "176",
"King Aythox Thex": "379",
"King Naythox Thex": "177",
"King Tearis Thex": "178",
"King Tormax": "179",
"King Xorbb": "180",
"Kingdom of Above and Below": "181",
"Kithicor Residents": "182",
"Knights of Thunder": "183",
"Knights of Truth": "184",
"Kobold": "185",
"Kobolds of Fire Pit": "186",
"Kobolds of Gloomingdeep": "424",
"Koka'Vor Tribe": "501",
"KOS": "366",
"KOS Inhabitants of Air": "465",
"KOS Plane of Disease": "466",
"KOS Plane of Innovation": "468",
"KOS Plane of Nightmare": "467",
"KOS Plane of Storms": "489",
"KOS Plane of Time": "469",
"KOS_animal": "367",
"Krag": "187",
"Kromrif": "188",
"Kromzek": "189",
"Kunark Fire Giants": "190",
"Lake Recondite Bandits": "191",
"Lanys T`Vyl": "425",
"League of Antonican Bards": "192",
"Legion of Cabilis": "193",
"Legion of Mata Muram": "194",
"Lesser Brann Giants": "195",
"Lesser Jord Giants": "196",
"Lesser Vann Giants": "197",
"Lesser Vind Giants": "198",
"Lithiniath": "199",
"Lizard Man": "200",
"Lodikai": "201",
"Lorekeepers of Gukta": "202",
"Lost Kingdom of Lok": "203",
"Lost Minions of Miragul": "204",
"Loyals": "454",
"Luclin": "205",
"Madmen": "480",
"Magus Conlegium": "206",
"Mayong Mistmoore": "207",
"Mayor Gubbin": "208",
"Meldrath": "209",
"Merchants of Ak'Anon": "210",
"Merchants of Erudin": "211",
"Merchants of Felwithe": "212",
"Merchants of Halas": "213",
"Merchants of Highpass": "214",
"Merchants of Kaladim": "215",
"Merchants of Ogguk": "216",
"Merchants of Qeynos": "217",
"Merchants of Rivervale": "218",
"Mermaid": "426",
"Mermaids": "375",
"Miners Guild 249": "219",
"Miners Guild 628": "220",
"Minions of Scale": "221",
"Minions of the Sunlord": "222",
"Minions of Tirranun": "427",
"Minions of Underfoot": "223",
"Mountain Death Clan": "384",
"Mucktail Gnolls": "224",
"Murrissa Sandwhisper": "372",
"Nadox Clan": "472",
"Nadox Initiate": "225",
"Nagafen": "226",
"Najena": "227",
"Nathyn Illuminious": "228",
"Needlite": "460",
"Neriak Merchants": "486",
"Neriak Ogre": "378",
"Neriak Trolls": "229",
"Nest Guardians": "428",
"New Alliance of Stone": "230",
"Nihil": "231",
"Nitram": "474",
"Noobie Monsters KOS to Guards": "394",
"Norrath's Keepers": "429",
"Oggok Citizens": "233",
"Oggok Guards": "232",
"Ogguk Residents": "430",
"Ogre": "431",
"Ogre Warriors": "234",
"OmensBatRat": "485",
"OmensMurks": "487",
"Opal Dark Briar": "235",
"Oracle of Karnon": "236",
"Oracle of Marud": "237",
"Orc": "238",
"Order of Autarkic Umbrage": "239",
"Order of Three": "240",
"Orphans": "452",
"Othmir": "241",
"Outcasts and Mutants": "242",
"Overlord Mata Muram": "432",
"Owlbears of the Moor": "505",
"Pack of Tomar": "243",
"Paebala": "244",
"Paladins of Gukta": "245",
"Paladins of Underfoot": "246",
"Paludal_Mushrooms": "490",
"Paludal_Underbulk": "491",
"Peace Keepers": "247",
"Phingel Autropos": "433",
"Phinigel Autropos": "248",
"Pickclaw Goblins": "249",
"Pirates of Gunthak": "250",
"Pirates of Iceclad": "251",
"Pirates of the Pine": "252",
"Pixie": "253",
"Pixtt": "254",
"Planar Collective": "455",
"Planes_Neutral": "488",
"Prexuz": "255",
"Priests of Innoruuk": "256",
"Priests of Life": "257",
"Priests of Marr": "258",
"Priests of Mischief": "259",
"Primordial Malice": "260",
"Prisoners of Justice": "261",
"Progeny": "262",
"Protectors of Growth": "263",
"Protectors of Gukta": "264",
"Protectors of Pine": "265",
"Qeynos Citizens": "434",
"QRG Protected Animals": "267",
"Queen Cristanos Thex": "268",
"Rallos Zek": "269",
"Rav": "270",
"Residents of Gloomingdeep": "476",
"Residents of Jaggedpine": "271",
"Residents of Karanas": "272",
"Riftseekers": "435",
"Rikkukin": "436",
"Ring of Scale": "273",
"Riptide Goblins": "274",
"Rogues of the White Rose": "275",
"Root of Innuruuk": "276",
"Rujarkian Slavers": "277",
"Rygorr Clan Snow Orcs": "278",
"Sabertooths of Blackburrow": "279",
"Sandworkers": "280",
"Sarnak Collective": "281",
"Scaled Mystics": "282",
"Scions of Dreadspire": "437",
"Scorchclaw Goblins": "438",
"Seru": "284",
"Servants of Aero": "285",
"Servants of Hydro": "286",
"Servants of Inferno": "287",
"Servants of Saryrn": "288",
"Servants of Terra": "289",
"Servants of Tunare": "290",
"Shadowed Men": "291",
"Shadowknights of Night Keep": "292",
"Shak Dratha": "293",
"Shamen of Justice": "294",
"Shamen of War": "295",
"Shei Vinitras": "296",
"Shik Nar": "297",
"Shoulders of Seru": "298",
"Shralok Orcs": "299",
"Silent Fist Clan": "300",
"Silla Herald": "496",
"Sirens of the Grotto": "301",
"Sky Talons": "439",
"Skytalons": "302",
"Snowfang Gnolls": "303",
"Soldiers of Tunare": "304",
"Solusek Mining Co": "305",
"Song Weavers": "306",
"Spider": "500",
"Spire Spirits": "388",
"Spirits of Katta Castellum": "307",
"Spirocs of Timorous": "308",
"Splitpaw Clan": "309",
"Sporali": "310",
"Sporali Collective": "440",
"Steel Warriors": "311",
"Steelslaves": "312",
"Stillmoon Acolytes": "441",
"Stone Hive Bixies": "313",
"Storm Guard": "314",
"Storm Guardians": "315",
"Storm Reapers": "316",
"Sustainers": "453",
"Swamp Giants of Kunark": "370",
"Swift Tails": "317",
"Syrik Iceblood": "318",
"Tarmok Tribe": "390",
"Taruun": "319",
"Temple Of Sol Ro": "442",
"Temple of Solusek Ro": "320",
"The Bloodtribe": "389",
"The Cral Ligi Clan": "321",
"The Dark Alliance": "443",
"The Dead": "322",
"The Forsaken": "323",
"The Grol Baku Clan": "324",
"The Guardians": "444",
"The HotWingz": "325",
"The Kromdek": "326",
"The Kromdul": "327",
"The Rainkeeper": "328",
"The Recuso": "329",
"The Sambata Tribe": "330",
"The Spurned": "331",
"The Tro Jeg Clan": "332",
"The Truth": "333",
"The Vas Ren Clan": "334",
"The_Angry_Sambata": "492",
"Thought Leeches": "335",
"Thrall of Kly": "336",
"Thunder Guardians": "445",
"Tirranun": "446",
"TizmakClan": "337",
"Traders of the Haven": "338",
"Trakanon": "339",
"Treants of Jaggedpine": "340",
"Tribe Vrodak": "341",
"True Spirit": "342",
"Trusik Tribe": "447",
"Tserrina Syl'Tor": "343",
"Tunare's Scouts": "283",
"Tunarean Court": "344",
"Ulthork": "345",
"Undead Frogloks of Guk": "346",
"Undead Residents of Kithicor": "381",
"Underbulks": "461",
"Unkempt Druids": "347",
"Unrest Inhabitants": "376",
"VahShir Crusaders": "348",
"Valdanov Zevfeer": "349",
"Validus Custodus": "350",
"Veeshan": "351",
"Velketor": "352",
"Venril Sathir": "353",
"Verish Mal": "456",
"VillagerRoom": "482",
"Vishimtar": "448",
"Volkara": "449",
"Volkara's Brood": "450",
"Vornol Transon": "354",
"Vox": "355",
"Warlord Ngrub": "473",
"Wayfarers Brotherhood": "356",
"WehateThelin": "470",
"Werewolf": "357",
"Whisperling": "358",
"Whistling Fist Brotherhood": "359",
"Wisps": "462",
"Witnesses of Hate": "393",
"Wizards of Gukta": "360",
"Wolves of the Moor": "504",
"Wolves of the North": "361",
"Yar`lir": "451",
"Yelinak": "362",
"Yunjo Slave Resistance": "363",
"Zazamoukh": "364",
"Zlandicar": "365",
"Zordakalicus Ragefire": "385",
"Zun'Muram": "502",
"Human": "506",
"Donovon":"507",
}
# There are some duplicate keys here, too lazy for now ..
zonetable = {
"The Abysmal Sea": 279,
"The Acrylia Caverns": 154,
"The Plane of Sky": 71,
"Ak'Anon": 55,
"The Akheva Ruins": 179,
"Anguish, the Fallen Palace": 317,
"Designer Apprentice": 999,
"Arcstone, Isle of Spirits": 369,
"The Arena": 77,
"The Arena Two": 180,
"Art Testing Domain": 996,
"Ashengate, Reliquary of the Scale": 406,
"Jewel of Atiiki": 418,
"Aviak Village": 53,
"Barindu, Hanging Gardens": 283,
"Barren Coast": 422,
"The Barter Hall": 346,
"The Bazaar": 151,
"Befallen": 36,
"Befallen": 411,
"The Gorge of King Xorbb": 16,
"Temple of Bertoxxulous": 469,
"Blackburrow": 17,
"Blacksail Folly": 428,
"The Bloodfields": 301,
"Bloodmoon Keep": 445,
"Bastion of Thunder": 209,
"The Broodlands": 337,
"The Buried Sea": 423,
"The Burning Wood": 87,
"Butcherblock Mountains": 68,
"Cabilis East": 106,
"Cabilis West": 82,
"Dagnor's Cauldron": 70,
"Nobles' Causeway": 303,
"Accursed Temple of CazicThule": 48,
"Muramite Proving Grounds": 304,
"Muramite Proving Grounds": 305,
"Muramite Proving Grounds": 306,
"Muramite Proving Grounds": 307,
"Muramite Proving Grounds": 308,
"Muramite Proving Grounds": 309,
"The Howling Stones": 105,
"Chardok": 103,
"Chardok: The Halls of Betrayal": 277,
"The City of Mist": 90,
"Loading": 190,
"Cobaltscar": 117,
"The Crypt of Decay": 200,
"The Commonlands": 408,
"West Commonlands": 21,
"Corathus Creep": 365,
"Sporali Caverns": 366,
"The Corathus Mines": 367,
"Crescent Reach": 394,
"Crushbone": 58,
"Crypt of Shade": 449,
"The Crystal Caverns": 121,
"Crystallos, Lair of the Awakened": 446,
"Sunset Home": 26,
"The Crypt of Dalnir": 104,
"The Dawnshroud Peaks": 174,
"Deadbone Reef": 427,
"Lavaspinner's Lair": 341,
"Tirranun's Delve": 342,
"The Seething Wall": 373,
"The Devastation": 372,
"Direwind Cliffs": 405,
"Korafax, Home of the Riders": 470,
"Citadel of the Worldslayer": 471,
"The Hive": 354,
"The Hatchery": 355,
"The Cocoons": 356,
"Queen Sendaii`s Lair": 357,
"Dragonscale Hills": 442,
"Deepscar's Den": 451,
"The Ruined City of Dranik": 336,
"Catacombs of Dranik": 328,
"Catacombs of Dranik": 329,
"Catacombs of Dranik": 330,
"Dranik's Hollows": 318,
"Dranik's Hollows": 319,
"Dranik's Hollows": 320,
"Sewers of Dranik": 331,
"Sewers of Dranik": 332,
"Sewers of Dranik": 333,
"Dranik's Scar": 302,
"The Dreadlands": 86,
"Dreadspire Keep": 351,
"The Temple of Droga": 81,
"Dulak's Harbor": 225,
"Eastern Plains of Karana": 15,
"The Undershore": 362,
"Snarlstone Dens": 363,
"Eastern Wastes": 116,
"The Echo Caverns": 153,
"East Commonlands": 22,
"The Elddar Forest": 378,
"Tunare's Shrine": 379,
"The Emerald Jungle": 94,
"Erudin": 24,
"The Erudin Palace": 23,
"Erud's Crossing": 98,
"Marauders Mire": 130,
"Everfrost Peaks": 30,
"The Plane of Fear": 72,
"The Feerrott": 47,
"Northern Felwithe": 61,
"Southern Felwithe": 62,
"Ferubi, Forgotten Temple of Taelosia": 284,
"The Forgotten Halls": 998,
"The Field of Bone": 78,
"Firiona Vie": 84,
"Academy of Arcane Sciences": 385,
"Arena": 388,
"City Hall": 389,
"East Freeport": 382,
"Hall of Truth: Bounty": 391,
"Freeport Militia House: My Precious": 387,
"Freeport Sewers": 384,
"Temple of Marr": 386,
"Theater of the Tranquil": 390,
"West Freeport": 383,
"East Freeport": 10,
"North Freeport": 8,
"West Freeport": 9,
"Frontier Mountains": 92,
"Frostcrypt, Throne of the Shade King": 402,
"The Tower of Frozen Shadow": 111,
"The Fungus Grove": 157,
"The Greater Faydark": 54,
"The Great Divide": 118,
"Grieg's End": 163,
"Grimling Forest": 167,
"Grobb": 52,
"The Plane of Growth": 127,
"The Mechamatic Guardian": 447,
"Guild Hall": 345,
"Guild Lobby": 344,
"Deepest Guk: Cauldron of Lost Souls": 229,
"The Drowning Crypt": 234,
"The Ruins of Old Guk": 66,
"Deepest Guk: Ancient Aqueducts": 239,
"The Mushroom Grove": 244,
"Deepest Guk: The Curse Reborn": 249,
"Deepest Guk: Chapel of the Witnesses": 254,
"The Root Garden": 259,
"Deepest Guk: Accursed Sanctuary": 264,
"The City of Guk": 65,
"The Gulf of Gunthak": 224,
"Gyrospire Beza": 440,
"Gyrospire Zeka": 441,
"Halas": 29,
"Harbinger's Spire": 335,
"Plane of Hate": 76,
"The Plane of Hate": 186,
"Hate's Fury": 228,
"High Keep": 6,
"Highpass Hold": 5,
"Highpass Hold": 407,
"HighKeep": 412,
"Hills of Shade": 444,
"The Halls of Honor": 211,
"The Temple of Marr": 220,
"The Hole": 39,
"Hollowshade Moor": 166,
"The Iceclad Ocean": 110,
"Icefall Glacier": 400,
"Ikkinz, Chambers of Transcendence": 294,
"Ruins of Illsalin": 347,
"Illsalin Marketplace": 348,
"Temple of Korlach": 349,
"The Nargil Pits": 350,
"Inktu'Ta, the Unmasked Chapel": 296,
"Innothule Swamp": 46,
"The Innothule Swamp": 413,
"The Jaggedpine Forest": 181,
"Jardel's Hook": 424,
"Kael Drakkel": 113,
"Kaesora": 88,
"South Kaladim": 60,
"North Kaladim": 67,
"Karnor's Castle": 102,
"Katta Castellum": 160,
"Katta Castrum": 416,
"Kedge Keep": 64,
"Kerra Isle": 74,
"Kithicor Forest": 410,
"Kithicor Forest": 20,
"Kod'Taz, Broken Trial Grounds": 293,
"Korascian Warrens": 476,
"Kurn's Tower": 97,
"Lake of Ill Omen": 85,
"Lake Rathetear": 51,
"The Lavastorm Mountains": 27,
"Mons Letalis": 169,
"The Lesser Faydark": 57,
"Loading Zone": 184,
"New Loading Zone": 185,
"Loping Plains": 443,
"The Maiden's Eye": 173,
"Maiden's Grave": 429,
"Meldrath's Majestic Mansion": 437,
"Fortress Mechanotus": 436,
"Goru`kar Mesa": 397,
"Miragul's Menagerie: Silent Gallery": 232,
"Miragul's Menagerie: Frozen Nightmare": 237,
"The Spider Den": 242,
"Miragul's Menagerie: Hushed Banquet": 247,
"The Frosted Halls": 252,
"The Forgotten Wastes": 257,
"Miragul's Menagerie: Heart of the Menagerie": 262,
"The Morbid Laboratory": 267,
"The Theater of Imprisoned Horror": 271,
"Miragul's Menagerie: Grand Library": 275,
"The Plane of Mischief": 126,
"The Castle of Mistmoore": 59,
"Misty Thicket": 33,
"The Misty Thicket": 415,
"Mistmoore's Catacombs: Forlorn Caverns": 233,
"Mistmoore's Catacombs: Dreary Grotto": 238,
"Mistmoore's Catacombs: Struggles within the Progeny": 243,
"Mistmoore's Catacombs: Chambers of Eternal Affliction": 248,
"Mistmoore's Catacombs: Sepulcher of the Damned": 253,
"Mistmoore's Catacombs: Scion Lair of Fury": 258,
"Mistmoore's Catacombs: Cesspits of Putrescence": 263,
"Mistmoore's Catacombs: Aisles of Blood": 268,
"Mistmoore's Catacombs: Halls of Sanguinary Rites": 272,
"Mistmoore's Catacombs: Infernal Sanctuary": 276,
"Monkey Rock": 425,
"Blightfire Moors": 395,
"Marus Seru": 168,
"The Crypt of Nadox": 227,
"Najena": 44,
"Natimbi, the Broken Shores": 280,
"Dragon Necropolis": 123,
"Nedaria's Landing": 182,
"Nektropos": 28,
"The Nektulos Forest": 25,
"Shadowed Grove": 368,
"Neriak - Foreign Quarter": 40,
"Neriak - Commons": 41,
"Neriak - 3rd Gate": 42,
"Neriak Palace": 43,
"Netherbian Lair": 161,
"Nexus": 152,
"The Lair of Terris Thule": 221,
"The Northern Plains of Karana": 13,
"North Desert of Ro": 392,
"Northern Desert of Ro": 34,
"The Mines of Nurga": 107,
"Oasis of Marr": 37,
"Oceangreen Hills": 466,
"Oceangreen Village": 467,
"The Ocean of Tears": 409,
"Oggok": 49,
"BlackBurrow": 468,
"Old Bloodfields": 472,
"Old Commonlands": 457,
"City of Dranik": 474,
"Field of Scale": 452,
"Highpass Hold": 458,
"Kaesora Library": 453,
"Kaesora Hatchery": 454,
"Bloody Kithicor": 456,
"Kurn's Tower": 455,
"Ocean of Tears": 69,
"The Overthere": 93,
"Paineel": 75,
"The Paludal Caverns": 156,
"The Lair of the Splitpaw": 18,
"The Permafrost Caverns": 73,
"The Plane of Air": 215,
"The Plane of Disease": 205,
"The Plane of Earth": 218,
"The Plane of Earth": 222,
"The Plane of Fire": 217,
"The Plane of Innovation": 206,
"The Plane of Justice": 201,
"The Plane of Knowledge": 202,
"The Plane of Nightmares": 204,
"The Plane of Storms": 210,
"Drunder, the Fortress of Zek": 214,
"The Plane of Time": 219,
"The Plane of Time": 223,
"Torment, the Plane of Pain": 207,
"The Plane of Tranquility": 203,
"The Plane of Valor": 208,
"Plane of War": 213,
"The Plane of Water": 216,
"The Precipice of War": 473,
"Muramite Provinggrounds": 316,
"The Qeynos Aqueduct System": 45,
"The Western Plains of Karana": 12,
"South Qeynos": 1,
"North Qeynos": 2,
"The Qeynos Hills": 4,
"Qinimi, Court of Nihilia": 281,
"The Surefall Glade": 3,
"Qvic, Prayer Grounds of Calling": 295,
"Qvic, the Hidden Vault": 299,
"Sverag, Stronghold of Rage": 374,
"Razorthorn, Tower of Sullon Zek": 375,
"Rathe Council Chamber": 477,
"The Rathe Mountains": 50,
"Redfeather Isle": 430,
"Relic, the Artifact City": 370,
"Riftseekers' Sanctum": 334,
"Rivervale": 19,
"Riwwi, Coliseum of Games": 282,
"Blackfeather Roost": 398,
"The Rujarkian Hills: Bloodied Quarries": 230,
"The Rujarkian Hills: Halls of War": 235,
"The Rujarkian Hills: Wind Bridges": 240,
"The Rujarkian Hills: Prison Break": 245,
"The Rujarkian Hills: Drudge Hollows": 250,
"The Rujarkian Hills: Fortified Lair of the Taskmasters": 255,
"The Rujarkian Hills: Hidden Vale of Deceit": 260,
"The Rujarkian Hills: Blazing Forge ": 265,
"The Rujarkian Hills: Arena of Chance": 269,
"The Rujarkian Hills: Barracks of War": 273,
"The Liberated Citadel of Runnyeye": 11,
"The Scarlet Desert": 175,
"The Ruins of Sebilis": 89,
"Shadeweaver's Thicket": 165,
"Shadow Haven": 150,
"Shadowrest": 187,
"Shadow Spine": 364,
"The City of Shar Vahl": 155,
"The Open Sea": 435,
"The Open Sea": 431,
"The Open Sea": 432,
"The Open Sea": 433,
"The Open Sea": 434,
"S.H.I.P. Workshop": 439,
"Silyssar, New Chelsith": 420,
"Siren's Grotto": 125,
"The Skyfire Mountains": 91,
"Skylance": 371,
"Skyshrine": 114,
"The Sleeper's Tomb": 128,
"Sewers of Nihilia, Emanating Cre": 288,
"Sewers of Nihilia, Lair of Trapp": 286,
"Sewers of Nihilia, Purifying Pla": 287,
"Sewers of Nihilia, Pool of Sludg": 285,
"Solusek's Eye": 31,
"Nagafen's Lair": 32,
"The Caverns of Exile": 278,
"The Tower of Solusek Ro": 212,
"The Temple of Solusek Ro": 80,
"Solteris, the Throne of Ro": 421,
"The Southern Plains of Karana": 14,
"South Desert of Ro": 393,
"Southern Desert of Ro": 35,
"Sanctus Seru": 159,
"Ssraeshza Temple": 162,
"The Steam Factory": 438,
"Steamfont Mountains": 56,
"The Steamfont Mountains": 448,
"The Steppes": 399,
"Stillmoon Temple": 338,
"The Ascent": 339,
"The Stonebrunt Mountains": 100,
"Stone Hive": 396,
"Suncrest Isle": 426,
"Sunderock Springs": 403,
"The Swamp of No Hope": 83,
"Tacvi, The Broken Temple": 298,
"Takish-Hiz: Sunken Library": 231,
"Takish-Hiz: Shifting Tower": 236,
"Takish-Hiz: Fading Temple": 241,
"Takish-Hiz: Royal Observatory": 246,
"Takish-Hiz: River of Recollection": 251,
"Takish-Hiz: Sandfall Corridors": 256,
"Takish-Hiz: Balancing Chamber": 261,
"Takish-Hiz: Sweeping Tides": 266,
"Takish-Hiz: Antiquated Palace": 270,
"Ruins of Takish-Hiz": 376,
"The Root of Ro": 377,
"Takish-Hiz: Prismatic Corridors": 274,
"The Temple of Veeshan": 124,
"The Tenebrous Mountains": 172,
"Thalassius, the Coral Keep": 417,
"Theater of Blood": 380,
"Deathknell, Tower of Dissonance": 381,
"The Deep": 164,
"The Grey": 171,
"The Nest": 343,
"The Void": 459,
"The Void": 460,
"The Void": 461,
"The Void": 462,
"The Void": 463,
"The Void": 464,
"The Void": 465,
"Thundercrest Isles": 340,
"The City of Thurgadin": 115,
"Icewell Keep": 129,
"Timorous Deep": 96,
"Tipt, Treacherous Crags": 289,
"The Torgiran Mines": 226,
"Toskirakk": 475,
"Toxxulia Forest": 38,
"Toxxulia Forest": 414,
"Trakanon's Teeth": 95,
"EverQuest Tutorial": 183,
"The Mines of Gloomingdeep": 188,
"The Mines of Gloomingdeep": 189,
"The Twilight Sea": 170,
"Txevu, Lair of the Elite": 297,
"The Umbral Plains": 176,
"The Estate of Unrest": 63,
"Uqua, the Ocean God Chantry": 292,
"Valdeholm": 401,
"Veeshan's Peak": 108,
"Veksar": 109,
"Velketor's Labyrinth": 112,
"Vergalid Mines": 404,
"Vex Thal": 158,
"Vxed, the Crumbling Caverns": 290,
"The Wakening Land": 119,
"Wall of Slaughter": 300,
"The Warrens": 101,
"The Warsliks Woods": 79,
"Stoneroot Falls": 358,
"Prince's Manor": 359,
"Caverns of the Lost": 360,
"Lair of the Korlach": 361,
"The Western Wastes": 120,
"Yxtta, Pulpit of Exiles ": 291,
"Zhisza, the Shissar Sanctuary": 419,
"The Nektulos Forest": 25,
"Brell's Rest": 480,
"The Cooling Chamber": 483,
"Pellucid Grotto": 488,
"Arthicrex": 485,
"The Foundation": 486,
"The Underquarry": 482,
"Brell's Arena": 492,
"Volska's Husk": 489,
"The Convorteum": 491,
"The Library": 704,
"Morell's Castle": 707,
"Al'Kabor's Nightmare": 709,
"Erudin Burning": 706,
"The Feerrott": 700,
"The Grounds": 703,
"Miragul's Nightmare": 710,
"Sanctum Somnium": 708,
"Fear Itself": 711,
"House of Thule": 701,
"House of Thule, Upper Floors": 702,
"The Well": 705,
"Sunrise Hills": 712,
"Argath, Bastion of Illdaera": 724,
"Valley of Lunanyn": 725,
"Sarith, City of Tides": 726,
"Rubak Oseka, Temple of the Sea": 727,
"Beasts' Domain": 728,
"The Resplendent Temple": 729,
"Pillars of Alra": 730,
"Windsong Sanctuary": 731,
"Erillion, City of Bronze": 732,
"Sepulcher of Order": 733,
"Sepulcher East": 734,
"Sepulcher West": 735,
"Wedding Chapel": 493,
"Wedding Chapel": 494,
"Lair of the Risen": 495,
"The Bazaar": 151,
"Brell's Temple": 490,
"Fungal Forest": 481,
"Lichen Creep": 487,
"Kernagir, the Shining City": 484,
"The Breeding Grounds": 757,
"Chapterhouse of the Fallen": 760,
"The Crystal Caverns: Fragment of Fear": 756,
"East Wastes: Zeixshi-Kar's Awakening": 755,
"Evantil, the Vile Oak": 758,
"Grelleth's Palace, the Chateau of Filth": 759,
"Kael Drakkel: The King's Madness": 754,
"Shard's Landing": 752,
"Valley of King Xorbb": 753,
}
def factionsetname(item):
"Generates faction set name"
return re.sub(' ', '', item[0]) + re.sub('-', '', item[1])
def cleanmobname(name):
"Cleans mob name for DB look up"
return re.sub(' ', '_', name)
class FactionSet(object):
"""
FactionSet class
name: name of the faction set
primary: primary faction ID
hits: faction hits assumes a dict like object
faction ID: hit value
"""
def __init__(self, name, primid, hits):
self.name = name
self.primary = primid
self.hits = hits.copy()
def __repr__(self):
return str((self.name, self.primary, self.hits))
# factionsets[name].hits[key] == factionsets[name][key]
def __getitem__(self, key):
return self.hits[key]
# names need to be unique to the set to work
def __eq__(self, other):
return self.name == other.name
def __contains__(self, key):
"Wrapper to key in hits"
return key in self.hits
def generate_sql(self):
"Generates SQL statements"
statement = ('INSERT INTO npc_faction (name, primaryfaction) VALUES '
'(\'{}\', \'{}\');\n'.format(self.name, self.primary) +
'SELECT id INTO @setid FROM npc_faction WHERE name = '
'\'{}\' LIMIT 1;\n'.format(self.name))
for hit in self.hits:
statement += ('INSERT INTO npc_faction_entries '
'(npc_faction_id, faction_id, value, npc_value) '
'VALUES (@setid, \'{}\', \'{}\', \'{}\');\n'
.format(hit, self.hits[hit],
1 if int(self.hits[hit]) < 0 else 0))
return statement
class Mob(object):
"""
Mob class
name: name of mob
zone: zone ID for mob
faction: faction set name
"""
def __init__(self, name, zone, faction):
self.name = name
self.zone = zone
self.faction = faction
def __repr__(self):
return str((self.name, self.zone, self.faction))
def __eq__(self, other):
return self.name == other.name and self.zone == other.zone
def generate_sql(self):
"Generates SQL statements"
return ('UPDATE npc_types SET npc_faction_id = @{} WHERE '
'name RLIKE \'{}\' AND id >= {} AND id <= {};'
.format(self.faction, cleanmobname(self.name), self.zone * 1000,
self.zone * 1000 + 999))
def main(filename):
"Processes eqlog and generates SQL to update mob factions"
if not os.path.exists(filename):
print(filename + ' not found')
exit(-1)
pfaction = re.compile(r'\[.*\] Your faction standing with (.*) has been '
r'adjusted by (.*)\.')
pslain1 = re.compile(r'\[.*\] You have slain (.*)!')
pslain2 = re.compile(r'\[.*\] (.*) has been slain by .*!')
penter = re.compile(r'\[.*\] You have entered (.*)\.')
factions = {} # mob: mob object
factionsets = {} # set name: set object
hits = collections.OrderedDict() # faction ID: value
nohits = [] # mobs with no faction hits
setname = None
primary = None
zone = None
eqlog = open(filename, 'r')
for line in eqlog:
m = penter.match(line)
if m:
if not re.search('PvP|levitation', line):
zone = zonetable[m.group(1)] if \
m.group(1) in zonetable else m.group(1)
continue
m = pfaction.match(line)
if m:
if not setname and not hits.items():
setname = factionsetname(m.groups())
primary = factiontable[m.group(1)]
hits[factiontable[m.group(1)]] = m.group(2)
continue
m = pslain1.match(line)
if not m:
m = pslain2.match(line)
if m:
# hits will be empty if no faction hits, so we skip it
if m.group(1) not in factions and hits.items():
factions[m.group(1)] = Mob(m.group(1), zone, setname)
if setname not in factionsets:
factionsets[setname] = FactionSet(setname, primary, hits)
elif not hits.items():
nohits.append(m.group(1))
hits.clear()
setname = None
primary = None
continue
eqlog.close()
print('-- Faction set entries')
for fset in factionsets.values():
print(fset.generate_sql())
print('-- Mob entries')
for setname in factionsets:
print('SELECT id INTO @{0} FROM npc_faction WHERE name = \'{0}\' '
'LIMIT 1;'.format(setname))
print()
# The zone limiting assumes the mob ids follows PEQ's scheme
for mob in factions.values():
print(mob.generate_sql())
# This might output some pets
if len(nohits):
print('-- some of these might be pets')
for mob in nohits:
print('-- no faction hit {}'.format(mob))
return 0
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Incorrect arguments. python ' + sys.argv[0] + ' filename')
exit(-1)
main(sys.argv[1])
|
gpl-3.0
| -6,577,107,049,170,106,000
| 29.844045
| 80
| 0.576969
| false
| 2.635066
| false
| false
| false
|
Tilapiatsu/modo-tila_batchexporter
|
lxserv/Tila_BatchTransform.py
|
1
|
2520
|
#!/usr/bin/env python
import modo
import lx
import lxu.command
import lxu.select
import traceback
import Tila_BatchExportModule as t
from Tila_BatchExportModule import user_value
from Tila_BatchExportModule import batch_export
class CmdBatchExport(lxu.command.BasicCommand):
def __init__(self):
lxu.command.BasicCommand.__init__(self)
reload(user_value)
reload(t)
user_value.add_User_Values(self, t.userValues)
def cmd_Flags(self):
return lx.symbol.fCMD_MODEL | lx.symbol.fCMD_UNDO
def basic_Enable(self, msg):
return True
def cmd_Interact(self):
pass
def basic_Execute(self, msg, flags):
reload(t)
reload(batch_export)
try:
scn = modo.Scene()
currScn = modo.scene.current()
userSelection = scn.selected
userSelectionCount = len(userSelection)
olderSelection = []
currPath = currScn.filename
if currPath is None:
currPath = ""
scnIndex = lx.eval('query sceneservice scene.index ? current')
userValues = user_value.query_User_Values(self, t.kit_prefix)
tbe = batch_export.TilaBacthExport
userValues[1] = False
userValues[2] = False
if bool(userValues[0]):
olderSelection = userSelection
userSelection = tbe.select_visible_items(tbe(userSelection,
userSelectionCount,
scn,
currScn,
currPath,
scnIndex,
userValues))
userSelectionCount = len(userSelection)
tbe.batch_transform(tbe(userSelection,
userSelectionCount,
scn,
currScn,
currPath,
scnIndex,
userValues))
if bool(userValues[0]):
scn.select(olderSelection)
except:
lx.out(traceback.format_exc())
def cmd_Query(self, index, vaQuery):
lx.notimpl()
lx.bless(CmdBatchExport, t.TILA_BATCH_TRANSFORM)
|
mit
| -7,176,125,561,679,100,000
| 28.658824
| 76
| 0.483333
| false
| 4.701493
| false
| false
| false
|
no13bus/btcproject
|
btc/tasks.py
|
1
|
13215
|
#encoding=utf-8
from __future__ import absolute_import
from celery import shared_task
# from celery.task import task
from btcproject import celery_app
from btc.lib.okcoin import *
from btc.lib.btceapi import *
from btc.lib.bitfinex import *
from btc.lib.huobi import *
from btc.lib.btcchina import *
from celery import Celery,platforms,group
import time
import pprint
import datetime
from btc.models import *
from datetime import timedelta
from django.utils.timezone import utc
from django.conf import settings
import logging
import logging.handlers
from mailer import Mailer
from mailer import Message
LOG_FILE = 'btc_celery.log'
handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes = 1024*1024*20, backupCount = 10)
fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'
formatter = logging.Formatter(fmt)
handler.setFormatter(formatter)
logger = logging.getLogger('btc_celery')
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
@celery_app.task
def send_btc_mail(subjectstring, messagestring):
message = Message(From=settings.MAILSENDER,To=settings.TOLIST, charset="utf-8")
message.Subject = subjectstring
message.Html = messagestring
mysender = Mailer(host=settings.MAILHOST, pwd=settings.MAILPWD, usr=settings.MAILSENDER)
mysender.send(message)
@celery_app.task
def set_bool_task():
if settings.SENDBOOL == False:
logger.debug('set_bool is seted to true')
settings.SENDBOOL = True
@celery_app.task
def bitfinex_task_btc(bfx):
payload = {}
book = bfx.book(payload)
bitfinex_seller_price = float(book['asks'][1]['price'])
bitfinex_buyer_price = float(book['bids'][1]['price'])
bitfinex_buyer_price_done = float(book['asks'][5]['price'])
bitfinex_seller_price_done = float(book['bids'][5]['price'])
return [bitfinex_seller_price, bitfinex_buyer_price, bitfinex_seller_price_done, bitfinex_buyer_price_done]
@celery_app.task
def bitfinex_task_ltc(bfx):
payload = {}
ltc_book = bfx.book(payload,'ltcusd')
bitfinex_seller_price_ltc = float(ltc_book['asks'][1]['price'])
bitfinex_buyer_price_ltc = float(ltc_book['bids'][1]['price'])
bitfinex_buyer_price_ltc_done = float(ltc_book['bids'][5]['price'])
bitfinex_seller_price_ltc_done = float(ltc_book['asks'][5]['price'])
return [bitfinex_seller_price_ltc, bitfinex_buyer_price_ltc, bitfinex_seller_price_ltc_done, bitfinex_buyer_price_ltc_done]
@celery_app.task
def bitfinex_task_info(bfx):
user_info = bfx.balances()
if [i['amount'] for i in user_info if i['type']=='exchange' and i['currency']=='usd']:
usd = float([i['amount'] for i in user_info if i['type']=='exchange' and i['currency']=='usd'][0])
else:
usd = 0.0
if [i['amount'] for i in user_info if i['type']=='exchange' and i['currency']=='ltc']:
ltc = float([i['amount'] for i in user_info if i['type']=='exchange' and i['currency']=='ltc'][0])
else:
ltc = 0.0
if [i['amount'] for i in user_info if i['type']=='exchange' and i['currency']=='btc']:
btc = float([i['amount'] for i in user_info if i['type']=='exchange' and i['currency']=='btc'][0])
else:
btc = 0.0
return [usd,btc,ltc]
@celery_app.task
def btce_task_btc(btce):
de = btce.get_Depth()
btce_seller_price = de['bids'][1][0]
btce_buyer_price = de['asks'][1][0]
btce_seller_price_done = de['bids'][5][0]
btce_buyer_price_done = de['asks'][5][0]
return [btce_seller_price, btce_buyer_price, btce_seller_price_done, btce_buyer_price_done]
@celery_app.task
def btce_task_ltc(btce):
ltc_de = btce.get_Depth('ltc_usd')
btce_seller_price_ltc = ltc_de['bids'][1][0]
btce_buyer_price_ltc = ltc_de['asks'][1][0]
btce_seller_price_ltc_done = ltc_de['bids'][5][0]
btce_buyer_price_ltc_done = ltc_de['asks'][5][0]
return [btce_seller_price_ltc, btce_buyer_price_ltc, btce_seller_price_ltc_done, btce_buyer_price_ltc_done]
@celery_app.task
def btce_task_info(btce):
user_info = btce.getInfo()
usd = user_info['return']['funds']['usd']
btc = user_info['return']['funds']['btc']
ltc = user_info['return']['funds']['ltc']
return [usd,btc,ltc]
@celery_app.task
def okcoin_task_btc(okcoin,rate):
de = okcoin.get_depth()
okcoin_seller_price = de['asks'][-1][0]
okcoin_buyer_price = de['bids'][1][0]
okcoin_seller_price_done = de['asks'][-5][0]
okcoin_buyer_price_done = de['bids'][5][0]
return [okcoin_seller_price, okcoin_buyer_price, okcoin_seller_price_done, okcoin_buyer_price_done]
@celery_app.task
def okcoin_task_ltc(okcoin,rate):
ltc_de = okcoin.get_depth_ltc()
okcoin_seller_price_ltc = ltc_de['asks'][-1][0]
okcoin_buyer_price_ltc = ltc_de['bids'][1][0]
okcoin_seller_price_ltc_done = ltc_de['asks'][-5][0]
okcoin_buyer_price_ltc_done = ltc_de['bids'][5][0]
return [okcoin_seller_price_ltc, okcoin_buyer_price_ltc, okcoin_seller_price_ltc_done, okcoin_buyer_price_ltc_done]
@celery_app.task
def okcoin_task_info(okcoin,rate):
user_info = okcoin.get_account()
cny = float(user_info['info']['funds']['free']['cny'])
ltc = float(user_info['info']['funds']['free']['ltc'])
btc = float(user_info['info']['funds']['free']['btc'])
return [cny,btc,ltc]
@celery_app.task
def huobi_task_btc(huobi,rate):
de = huobi.get_depth('btc')
huobi_seller_price = float(de['asks'][-1][0])
huobi_buyer_price = float(de['bids'][1][0])
huobi_buyer_price_done = float(de['bids'][5][0])
huobi_seller_price_done = float(de['asks'][-5][0])
return [huobi_seller_price, huobi_buyer_price, huobi_seller_price_done, huobi_buyer_price_done]
@celery_app.task
def huobi_task_ltc(huobi,rate):
ltc_de = huobi.get_depth('ltc')
huobi_seller_price_ltc = float(ltc_de['asks'][-1][0])
huobi_buyer_price_ltc = float(ltc_de['bids'][1][0])
huobi_buyer_price_ltc_done = float(ltc_de['bids'][5][0])
huobi_seller_price_ltc_done = float(ltc_de['asks'][-5][0])
return [huobi_seller_price_ltc, huobi_buyer_price_ltc, huobi_seller_price_ltc_done, huobi_buyer_price_ltc_done]
@celery_app.task
def huobi_task_info(huobi,rate):
user_info = huobi.get_account_info()
cny = float(user_info['available_cny_display']) if 'available_cny_display' in user_info else 0.0
ltc = float(user_info['available_ltc_display']) if 'available_ltc_display' in user_info else 0.0
btc = float(user_info['available_btc_display']) if 'available_btc_display' in user_info else 0.0
return [cny,btc,ltc]
### http.cannot requests
@celery_app.task
def btcchina_task_btc(btcchina,rate):
de = btcchina.get_depth()
btcchina_seller_price = de['asks'][-1][0]
btcchina_buyer_price = de['bids'][1][0]
btcchina_buyer_price_done = de['bids'][3][0]
btcchina_seller_price_done = de['asks'][-3][0]
return [btcchina_seller_price, btcchina_buyer_price, btcchina_seller_price_done, btcchina_buyer_price_done]
@celery_app.task
def btcchina_task_ltc(btcchina,rate):
ltc_de = btcchina.get_depth('ltccny')
btcchina_seller_price_ltc = ltc_de['asks'][-1][0]
btcchina_buyer_price_ltc = ltc_de['bids'][1][0]
btcchina_buyer_price_ltc_done = ltc_de['bids'][3][0]
btcchina_seller_price_ltc_done = ltc_de['asks'][-3][0]
return [btcchina_seller_price_ltc, btcchina_buyer_price_ltc, btcchina_seller_price_ltc_done, btcchina_buyer_price_ltc_done]
@celery_app.task
def btcchina_task_info(bc,rate):
user_info = bc.get_account_info()
cny = user_info['balance']['cny']['amount']
ltc = user_info['balance']['ltc']['amount']
btc = user_info['balance']['btc']['amount']
cny = float(cny)
ltc = float(ltc)
btc = float(btc)
return [cny,btc,ltc]
@celery_app.task
def insert_buy_info(okcoin_buyprice,huobi_buyprice,btcchina_buyprice,bitfinex_buyprice,okcoin_buyprice_ltc,huobi_buyprice_ltc,btcchina_buyprice_ltc,bitfinex_buyprice_ltc,created):
now = datetime.datetime.utcnow().replace(tzinfo=utc)
p = Pricebuysell(okcoin_buyprice=okcoin_buyprice,huobi_buyprice=huobi_buyprice,
btcchina_buyprice=btcchina_buyprice, bitfinex_buyprice=bitfinex_buyprice,
okcoin_buyprice_ltc=okcoin_buyprice_ltc,huobi_buyprice_ltc=huobi_buyprice_ltc,
btcchina_buyprice_ltc=btcchina_buyprice_ltc, bitfinex_buyprice_ltc=bitfinex_buyprice_ltc,created=now)
p.save()
####实时抓取交易价格 并入mysql库 为了将其在前台显示出来
@celery_app.task
def user_trade():
#### admin's settings
user = Userprofile.objects.filter(id=1)
user = user[0]
rate = user.rate
amount = user.amount
ltcamount = user.ltcamount
auto_trade = user.auto_trade
user_id = user.user.id
okcoin2bitfinex = user.okCoin2bitfinex
bitfinex2okcoin = user.bitfinex2okCoin
okcoin2huobi = user.okCoin2huobi
huobi2okcoin = user.huobi2okCoin
okcoin2btcchina = user.okCoin2btcchina
btcchina2okcoin = user.btcchina2okCoin
huobi2btcchina = user.huobi2btcchina
btcchina2huobi = user.btcchina2huobi
huobi2bitfinex = user.huobi2bitfinex
bitfinex2huobi = user.bitfinex2huobi
bitfinex2btcchina = user.bitfinex2btcchina
btcchina2bitfinex = user.btcchina2bitfinex
okcoin2bitfinex_ltc = user.okCoin2bitfinex_ltc
bitfinex2okcoin_ltc = user.bitfinex2okCoin_ltc
okcoin2huobi_ltc = user.okCoin2huobi_ltc
huobi2okcoin_ltc = user.huobi2okCoin_ltc
okcoin2btcchina_ltc = user.okCoin2btcchina_ltc
btcchina2okcoin_ltc = user.btcchina2okCoin_ltc
huobi2btcchina_ltc = user.huobi2btcchina_ltc
btcchina2huobi_ltc = user.btcchina2huobi_ltc
huobi2bitfinex_ltc = user.huobi2bitfinex_ltc
bitfinex2huobi_ltc = user.bitfinex2huobi_ltc
bitfinex2btcchina_ltc = user.bitfinex2btcchina_ltc
btcchina2bitfinex_ltc = user.btcchina2bitfinex_ltc
##
okcoin = OkCoin(user.okcoin_key.__str__(),user.okcoin_secret.__str__())
bfx = Bitfinex()
bfx.key = user.bitfinex_key.__str__()
bfx.secret = user.bitfinex_secret.__str__()
huobi = HuoBi(user.huobi_key.__str__(), user.huobi_secret.__str__())
btcchina = BTCChina(user.btcchina_key.__str__(), user.btcchina_secret.__str__())
g=group(bitfinex_task_btc.s(bfx), huobi_task_btc.s(huobi, rate),
btcchina_task_btc.s(btcchina, rate), okcoin_task_btc.s(okcoin, rate),
bitfinex_task_ltc.s(bfx), huobi_task_ltc.s(huobi, rate),
btcchina_task_ltc.s(btcchina, rate), okcoin_task_ltc.s(okcoin, rate),
bitfinex_task_info.s(bfx),huobi_task_info.s(huobi, rate),btcchina_task_info.s(btcchina, rate),okcoin_task_info.s(okcoin, rate))
result = g().get()
okcoin_buyprice_btc = result[3][1]
huobi_buyprice_btc = result[1][1]
btcchina_buyprice_btc = result[2][1]
bitfinex_buyprice_btc = result[0][1]
okcoin_sellprice_btc = result[3][0]
huobi_sellprice_btc = result[1][0]
btcchina_sellprice_btc = result[2][0]
bitfinex_sellprice_btc = result[0][0]
okcoin_buyprice_ltc = result[7][1]
huobi_buyprice_ltc = result[5][1]
btcchina_buyprice_ltc = result[6][1]
bitfinex_buyprice_ltc = result[4][1]
okcoin_sellprice_ltc = result[7][0]
huobi_sellprice_ltc = result[5][0]
btcchina_sellprice_ltc = result[6][0]
bitfinex_sellprice_ltc = result[4][0]
created = datetime.datetime.utcnow().replace(tzinfo=utc)
insert_buy_info.delay(okcoin_buyprice_btc,huobi_buyprice_btc,btcchina_buyprice_btc,bitfinex_buyprice_btc,
okcoin_buyprice_ltc,huobi_buyprice_ltc,btcchina_buyprice_ltc,bitfinex_buyprice_ltc,created)
@celery_app.task
def tradedate():
user = Userprofile.objects.filter(id=1)
user = user[0]
rate = user.rate
amount = user.amount
ltcamount = user.ltcamount
auto_trade = user.auto_trade
user_id = user.user.id
huobi = HuoBi(user.huobi_key.__str__(), user.huobi_secret.__str__())
huobi_j = huobi.get_trades_history('btc')
trade_dates = huobi_j['trades']
for data in trade_dates:
price = data['price']
amount = data['amount']
mtype = data['type']
created_day = datetime.datetime.now().strftime("%Y-%m-%d")
mtime = '%s %s' % (created_day, data['time'])
now = datetime.datetime.utcnow().replace(tzinfo=utc)
td = Tradedate.objects.filter(mtime=mtime,price=price,amount=amount,mtype=mtype)
if not td:
trade_item = Tradedate(mtime=mtime,price=price,amount=amount,mtype=mtype,created=now)
trade_item.save()
@celery_app.task
def tradedate_analysis():
t_delta = datetime.timedelta(seconds=60)
nowdate = datetime.datetime.now()
start_time = nowdate.strftime("%Y-%m-%d %H:%M:%S")
end_time = (nowdate - t_delta).strftime("%Y-%m-%d %H:%M:%S")
td = Tradedate.objects.filter(mtime__gte=end_time, mtime__lte=start_time).order_by('-mtime')
if not td:
return
avg_price = sum([item.price for item in td]) / len(td)
avg_price = round(avg_price,4)
buy_data = td.filter(mtype=u'买入')
buy_amount = sum([item.amount for item in buy_data])
buy_amount = round(buy_amount,4)
sell_data = td.filter(mtype=u'卖出')
sell_amount = sum([item.amount for item in sell_data])
sell_amount = round(sell_amount,4)
if buy_amount > sell_amount:
buyorsell = 'buy'
else:
buyorsell = 'sell'
if not Tradedate_analysis.objects.filter(start_time=start_time,end_time=end_time):
now = datetime.datetime.utcnow().replace(tzinfo=utc)
ta = Tradedate_analysis(buyorsell=buyorsell,avg_price=avg_price,start_time=start_time,end_time=end_time,
buy_amount=buy_amount,sell_amount=sell_amount,created=now)
ta.save()
|
mit
| -85,900,734,797,334,750
| 36.043353
| 179
| 0.697713
| false
| 2.358961
| false
| false
| false
|
valuesandvalue/valuesandvalue
|
vavs_project/fbdata/fields.py
|
1
|
2071
|
# fbdata.fields
# DJANGO
from django.db import models
from django.utils import six
# SOUTH
from south.modelsinspector import add_introspection_rules
class IntegerListField(models.Field):
description = "Integer List"
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 120
super(IntegerListField, self).__init__(*args, **kwargs)
def db_type(self, connection):
return 'char(%s)' % self.max_length
def get_internal_type(self):
return 'CharField'
def to_python(self, value):
if isinstance(value, basestring):
return [int(s) for s in value.split(',') if s.isdigit()]
elif isinstance(value, list):
return value
def get_prep_value(self, value):
return ','.join([str(v) for v in value])
def pre_save(self, model_instance, add):
value = getattr(model_instance, self.attname)
if not value and self.default:
value = list(self.default)
setattr(model_instance, self.attname, value)
return value
def get_prep_lookup(self, lookup_type, value):
# We only handle 'exact' and 'in'. All others are errors.
if lookup_type == 'exact':
return self.get_prep_value(value)
elif lookup_type == 'in':
return [self.get_prep_value(v) for v in value]
else:
raise TypeError('Lookup type %r not supported.' % lookup_type)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_db_prep_value(value)
def formfield(self, **kwargs):
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(IntegerListField, self).formfield(**defaults)
add_introspection_rules([
(
[IntegerListField], # Class(es) these apply to
[], # Positional arguments (not used)
{}, # Keyword argument
),
], ["^fbdata\.fields\.IntegerListField"])
|
mit
| 5,486,886,093,068,114,000
| 30.378788
| 74
| 0.593916
| false
| 3.944762
| false
| false
| false
|
jcurry/ZenPacks.ZenSystems.Juniper
|
ZenPacks/ZenSystems/Juniper/modeler/plugins/JuniperFPCMap.py
|
1
|
6265
|
##########################################################################
# Author: Jane Curry, jane.curry@skills-1st.co.uk
# Date: February 28th, 2011
# Revised: Extra debugging added Aug 23, 2011
#
# JuniperFPC modeler plugin
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
##########################################################################
__doc__ = """JuniperFPCMap
Gather table information from Juniper Contents tables
"""
import re
from Products.DataCollector.plugins.CollectorPlugin import SnmpPlugin, GetMap, GetTableMap
class JuniperFPCMap(SnmpPlugin):
"""Map Juniper FPC table to model."""
maptype = "JuniperFPCMap"
modname = "ZenPacks.ZenSystems.Juniper.JuniperFPC"
relname = "JuniperFP"
compname = ""
snmpGetTableMaps = (
GetTableMap('jnxContentsTable',
'.1.3.6.1.4.1.2636.3.1.8.1',
{
'.1': 'containerIndex',
'.5': 'FPCType',
'.6': 'FPCDescr',
'.7': 'FPCSerialNo',
'.8': 'FPCRevision',
'.10': 'FPCPartNo',
'.11': 'FPCChassisId',
'.12': 'FPCChassisDescr',
'.13': 'FPCChassisCLEI',
}
),
GetTableMap('jnxOperatingTable',
'.1.3.6.1.4.1.2636.3.1.13.1',
{
'.6': 'FPCState',
'.7': 'FPCTemp',
'.8': 'FPCCPU',
'.13': 'FPCUpTime',
'.15': 'FPCMemory',
}
),
GetTableMap('jnxContainersTable',
'.1.3.6.1.4.1.2636.3.1.6.1',
{
'.1': 'containerIndex',
'.3': 'containerLevel',
'.4': 'containerNextLevel',
'.5': 'containerType',
'.6': 'containerDescr',
}
),
)
def process(self, device, results, log):
"""collect snmp information from this device"""
log.info('processing %s for device %s', self.name(), device.id)
getdata, tabledata = results
rm = self.relMap()
contentsTable = tabledata.get('jnxContentsTable')
operatingTable = tabledata.get('jnxOperatingTable')
containersTable = tabledata.get('jnxContainersTable')
# If no data supplied then simply return
if not contentsTable:
log.warn( 'No SNMP response from %s for the %s plugin for contents', device.id, self.name() )
log.warn( "Data= %s", tabledata )
return
if not operatingTable:
log.warn( 'No SNMP response from %s for the %s plugin for operating system', device.id, self.name() )
log.warn( "Data= %s", tabledata )
return
if not containersTable:
log.warn( 'No SNMP response from %s for the %s plugin for containers', device.id, self.name() )
log.warn( "Data= %s", tabledata )
return
for oid, data in contentsTable.items():
try:
om = self.objectMap(data)
FPCDescr = om.FPCDescr
# log.info(' FPCDescr is %s ' % (om.FPCDescr))
isaFPC = re.match(r'(.*FPC.*)', FPCDescr.upper())
if not isaFPC:
continue
else:
for oid1, data1 in operatingTable.items():
if oid1 == oid:
om.FPCState = data1['FPCState']
om.FPCTemp = data1['FPCTemp']
om.FPCCPU = data1['FPCCPU']
om.FPCUpTime = data1['FPCUpTime']
om.FPCMemory = data1['FPCMemory']
for oid2, data2 in containersTable.items():
# log.info( ' oid is %s - oid2 is %s - data is %s' % (oid, oid2 , data2))
if oid.startswith(oid2):
om.containerDescr = data2['containerDescr']
if data2['containerLevel'] == 1:
om.containerDescr = '....' + om.containerDescr
elif data2['containerLevel'] == 2:
om.containerDescr = '........' + om.containerDescr
om.containerParentIndex = data2['containerNextLevel']
if om.containerParentIndex != 0:
for oid3, data3 in containersTable.items():
if oid3.endswith(str(om.containerParentIndex)):
om.containerParentDescr = data3['containerDescr']
om.snmpindex = oid1.strip('.')
# Convert FPCUpTime from milliseconds to hours
om.FPCUpTime = om.FPCUpTime / 1000 / 60 / 60 /24
# Transform numeric FPCState into a status string via operatingStateLookup
if (om.FPCState < 1 or om.FPCState > 7):
om.FPCState = 1
om.FPCState = self.operatingStateLookup[om.FPCState]
om.id = self.prepId( om.FPCDescr.replace(' ','_') + '_' + str( om.snmpindex.replace('.','_') ) )
except (KeyError, IndexError, AttributeError, TypeError), errorInfo:
log.warn( ' Error in %s modeler plugin %s' % ( self.name(), errorInfo))
continue
rm.append(om)
# log.info('rm %s' % (rm) )
return rm
operatingStateLookup = { 1: 'Unknown',
2: 'Running',
3: 'Ready',
4: 'Reset',
5: 'RunningAtFullSpeed (Fan)',
6: 'Down',
7: 'Standby'
}
|
gpl-2.0
| 707,825,517,397,618,400
| 43.75
| 116
| 0.444054
| false
| 4.341649
| false
| false
| false
|
kussj/mesosbeat
|
scripts/generate_field_docs.py
|
1
|
2634
|
#!/usr/bin/env python
"""
This script generates markdown documentation from the fields yml file.
Usage: python generate_field_docs.py file.yml file.asciidoc
"""
import sys
import yaml
SECTIONS = [
("env", "Common"),
("cluster_health", "Contains elasticsearch cluster health statistics"),
("cluster_stats", "Contains elasticsearch cluster stats statistics"),
("cluster_node", "Contains elasticsearch node stats statistics")]
def document_fields(output, section):
if "anchor" in section:
output.write("[[exported-fields-{}]]\n".format(section["anchor"]))
output.write("=== {} Fields\n\n".format(section["name"]))
if "description" in section:
output.write("{}\n\n".format(section["description"]))
output.write("\n")
for field in section["fields"]:
if "type" in field and field["type"] == "group":
for sec, name in SECTIONS:
if sec == field["name"]:
field["anchor"] = field["name"]
field["name"] = name
break
document_fields(output, field)
else:
document_field(output, field)
def document_field(output, field):
if "path" not in field:
field["path"] = field["name"]
output.write("==== {}\n\n".format(field["path"]))
if "type" in field:
output.write("type: {}\n\n".format(field["type"]))
if "example" in field:
output.write("example: {}\n\n".format(field["example"]))
if "format" in field:
output.write("format: {}\n\n".format(field["format"]))
if "required" in field:
output.write("required: {}\n\n".format(field["required"]))
if "description" in field:
output.write("{}\n\n".format(field["description"]))
def fields_to_asciidoc(input, output):
output.write("""
////
This file is generated! See etc/fields.yml and scripts/generate_field_docs.py
////
[[exported-fields]]
== Exported Fields
This document describes the fields that are exported by
ApacheBeat. They are grouped in the
following categories:
""")
for doc, _ in SECTIONS:
output.write("* <<exported-fields-{}>>\n".format(doc))
output.write("\n")
docs = yaml.load(input)
for doc, name in SECTIONS:
if doc in docs:
section = docs[doc]
if "type" in section:
if section["type"] == "group":
section["name"] = name
section["anchor"] = doc
document_fields(output, section)
if __name__ == "__main__":
if len(sys.argv) != 3:
print ("Usage: %s file.yml file.asciidoc" % (sys.argv[0]))
sys.exit(1)
input = open(sys.argv[1], 'r')
output = open(sys.argv[2], 'w')
try:
fields_to_asciidoc(input, output)
finally:
input.close()
output.close()
|
apache-2.0
| -3,599,554,905,244,448,000
| 24.572816
| 77
| 0.629461
| false
| 3.452163
| false
| false
| false
|
fragaria/BorIS
|
post_migration_restart.py
|
1
|
2733
|
"""
This is script should bring existing installations in line with the state
in repository. It is supposed to be run after:
1. The migration_restart branch has been merged to master and deployed.
2. south_migrationhistory has been truncated.
3. The initial migrations for clients and services have been faked.
"""
from django.contrib.auth.management import create_permissions
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core.management import call_command
from django.db.models import get_models, get_app
from boris.services.management import proxy_permissions_fix
# First, create the missing permissions.
create_permissions(get_app('services'), get_models(), 2)
# Then remove the obsolete permissions.
# Obsolete models
contenttypes = (
('services', 'crisisintervention'),
('clients', 'riskybehavior'),
)
for app_label, model in contenttypes:
try:
ct = ContentType.objects.get(app_label=app_label, model=model)
except ContentType.DoesNotExist:
print 'ContentType for %s not found!' % model
else:
qset = Permission.objects.filter(content_type=ct)
print "Deleting %i permissions for %s" % (qset.count(), model)
qset.delete()
# Remove services proxy permissions.
services_ct = ContentType.objects.get(app_label='services', model='service')
codenames = [
'add_utilitywork',
'change_utilitywork',
'delete_utilitywork',
'add_incomeexamination',
'change_incomeexamination',
'delete_incomeexamination',
'add_individualcounselling',
'change_individualcounselling',
'delete_individualcounselling',
'add_phoneusage',
'change_phoneusage',
'delete_phoneusage',
]
print "Deleting the proxy permissions: %s" % ', '.join(codenames)
for codename in codenames:
qset = Permission.objects.filter(codename=codename, content_type=services_ct)
if qset.count() != 1:
print "Something's wrong with the %s permission." % codename
else:
qset.delete()
# Run the proxy permissions fix hook.
services = get_app('services')
proxy_permissions_fix.delete_proxy_permissions(services, get_models(services), 2)
# Delete the obsolete contenttypes.
contenttypes = (
('clients', 'riskybehavior'),
('services', 'practitionerencounter'),
)
for app_label, model in contenttypes:
try:
ct = ContentType.objects.get(app_label=app_label, model=model)
except ContentType.DoesNotExist:
print 'ContentType for %s not found!' % model
else:
print "Deleting contenttype: %s, %s" % (app_label, model)
ct.delete()
# Finally, reload the group permissions fixture.
call_command('loaddata', 'groups.json')
|
mit
| -3,734,604,905,836,807,700
| 32.329268
| 81
| 0.716429
| false
| 3.8331
| false
| false
| false
|
Zlash65/erpnext
|
erpnext/controllers/item_variant.py
|
1
|
11842
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cstr, flt
import json, copy
from six import string_types
class ItemVariantExistsError(frappe.ValidationError): pass
class InvalidItemAttributeValueError(frappe.ValidationError): pass
class ItemTemplateCannotHaveStock(frappe.ValidationError): pass
@frappe.whitelist()
def get_variant(template, args=None, variant=None, manufacturer=None,
manufacturer_part_no=None):
"""Validates Attributes and their Values, then looks for an exactly
matching Item Variant
:param item: Template Item
:param args: A dictionary with "Attribute" as key and "Attribute Value" as value
"""
item_template = frappe.get_doc('Item', template)
if item_template.variant_based_on=='Manufacturer' and manufacturer:
return make_variant_based_on_manufacturer(item_template, manufacturer,
manufacturer_part_no)
else:
if isinstance(args, string_types):
args = json.loads(args)
if not args:
frappe.throw(_("Please specify at least one attribute in the Attributes table"))
return find_variant(template, args, variant)
def make_variant_based_on_manufacturer(template, manufacturer, manufacturer_part_no):
'''Make and return a new variant based on manufacturer and
manufacturer part no'''
from frappe.model.naming import append_number_if_name_exists
variant = frappe.new_doc('Item')
copy_attributes_to_variant(template, variant)
variant.manufacturer = manufacturer
variant.manufacturer_part_no = manufacturer_part_no
variant.item_code = append_number_if_name_exists('Item', template.name)
return variant
def validate_item_variant_attributes(item, args=None):
if isinstance(item, string_types):
item = frappe.get_doc('Item', item)
if not args:
args = {d.attribute.lower():d.attribute_value for d in item.attributes}
attribute_values, numeric_values = get_attribute_values(item)
for attribute, value in args.items():
if not value:
continue
if attribute.lower() in numeric_values:
numeric_attribute = numeric_values[attribute.lower()]
validate_is_incremental(numeric_attribute, attribute, value, item.name)
else:
attributes_list = attribute_values.get(attribute.lower(), [])
validate_item_attribute_value(attributes_list, attribute, value, item.name)
def validate_is_incremental(numeric_attribute, attribute, value, item):
from_range = numeric_attribute.from_range
to_range = numeric_attribute.to_range
increment = numeric_attribute.increment
if increment == 0:
# defensive validation to prevent ZeroDivisionError
frappe.throw(_("Increment for Attribute {0} cannot be 0").format(attribute))
is_in_range = from_range <= flt(value) <= to_range
precision = max(len(cstr(v).split(".")[-1].rstrip("0")) for v in (value, increment))
#avoid precision error by rounding the remainder
remainder = flt((flt(value) - from_range) % increment, precision)
is_incremental = remainder==0 or remainder==increment
if not (is_in_range and is_incremental):
frappe.throw(_("Value for Attribute {0} must be within the range of {1} to {2} in the increments of {3} for Item {4}")\
.format(attribute, from_range, to_range, increment, item),
InvalidItemAttributeValueError, title=_('Invalid Attribute'))
def validate_item_attribute_value(attributes_list, attribute, attribute_value, item):
allow_rename_attribute_value = frappe.db.get_single_value('Item Variant Settings', 'allow_rename_attribute_value')
if allow_rename_attribute_value:
pass
elif attribute_value not in attributes_list:
frappe.throw(_("The value {0} is already assigned to an exisiting Item {2}.").format(
attribute_value, attribute, item), InvalidItemAttributeValueError, title=_('Rename Not Allowed'))
def get_attribute_values(item):
if not frappe.flags.attribute_values:
attribute_values = {}
numeric_values = {}
for t in frappe.get_all("Item Attribute Value", fields=["parent", "attribute_value"]):
attribute_values.setdefault(t.parent.lower(), []).append(t.attribute_value)
for t in frappe.get_all('Item Variant Attribute',
fields=["attribute", "from_range", "to_range", "increment"],
filters={'numeric_values': 1, 'parent': item.variant_of}):
numeric_values[t.attribute.lower()] = t
frappe.flags.attribute_values = attribute_values
frappe.flags.numeric_values = numeric_values
return frappe.flags.attribute_values, frappe.flags.numeric_values
def find_variant(template, args, variant_item_code=None):
conditions = ["""(iv_attribute.attribute={0} and iv_attribute.attribute_value={1})"""\
.format(frappe.db.escape(key), frappe.db.escape(cstr(value))) for key, value in args.items()]
conditions = " or ".join(conditions)
from erpnext.portal.product_configurator.utils import get_item_codes_by_attributes
possible_variants = [i for i in get_item_codes_by_attributes(args, template) if i != variant_item_code]
for variant in possible_variants:
variant = frappe.get_doc("Item", variant)
if len(args.keys()) == len(variant.get("attributes")):
# has the same number of attributes and values
# assuming no duplication as per the validation in Item
match_count = 0
for attribute, value in args.items():
for row in variant.attributes:
if row.attribute==attribute and row.attribute_value== cstr(value):
# this row matches
match_count += 1
break
if match_count == len(args.keys()):
return variant.name
@frappe.whitelist()
def create_variant(item, args):
if isinstance(args, string_types):
args = json.loads(args)
template = frappe.get_doc("Item", item)
variant = frappe.new_doc("Item")
variant.variant_based_on = 'Item Attribute'
variant_attributes = []
for d in template.attributes:
variant_attributes.append({
"attribute": d.attribute,
"attribute_value": args.get(d.attribute)
})
variant.set("attributes", variant_attributes)
copy_attributes_to_variant(template, variant)
make_variant_item_code(template.item_code, template.item_name, variant)
return variant
@frappe.whitelist()
def enqueue_multiple_variant_creation(item, args):
# There can be innumerable attribute combinations, enqueue
if isinstance(args, string_types):
variants = json.loads(args)
total_variants = 1
for key in variants:
total_variants *= len(variants[key])
if total_variants >= 600:
frappe.throw(_("Please do not create more than 500 items at a time"))
return
if total_variants < 10:
return create_multiple_variants(item, args)
else:
frappe.enqueue("erpnext.controllers.item_variant.create_multiple_variants",
item=item, args=args, now=frappe.flags.in_test);
return 'queued'
def create_multiple_variants(item, args):
count = 0
if isinstance(args, string_types):
args = json.loads(args)
args_set = generate_keyed_value_combinations(args)
for attribute_values in args_set:
if not get_variant(item, args=attribute_values):
variant = create_variant(item, attribute_values)
variant.save()
count +=1
return count
def generate_keyed_value_combinations(args):
"""
From this:
args = {"attr1": ["a", "b", "c"], "attr2": ["1", "2"], "attr3": ["A"]}
To this:
[
{u'attr1': u'a', u'attr2': u'1', u'attr3': u'A'},
{u'attr1': u'b', u'attr2': u'1', u'attr3': u'A'},
{u'attr1': u'c', u'attr2': u'1', u'attr3': u'A'},
{u'attr1': u'a', u'attr2': u'2', u'attr3': u'A'},
{u'attr1': u'b', u'attr2': u'2', u'attr3': u'A'},
{u'attr1': u'c', u'attr2': u'2', u'attr3': u'A'}
]
"""
# Return empty list if empty
if not args:
return []
# Turn `args` into a list of lists of key-value tuples:
# [
# [(u'attr2', u'1'), (u'attr2', u'2')],
# [(u'attr3', u'A')],
# [(u'attr1', u'a'), (u'attr1', u'b'), (u'attr1', u'c')]
# ]
key_value_lists = [[(key, val) for val in args[key]] for key in args.keys()]
# Store the first, but as objects
# [{u'attr2': u'1'}, {u'attr2': u'2'}]
results = key_value_lists.pop(0)
results = [{d[0]: d[1]} for d in results]
# Iterate the remaining
# Take the next list to fuse with existing results
for l in key_value_lists:
new_results = []
for res in results:
for key_val in l:
# create a new clone of object in result
obj = copy.deepcopy(res)
# to be used with every incoming new value
obj[key_val[0]] = key_val[1]
# and pushed into new_results
new_results.append(obj)
results = new_results
return results
def copy_attributes_to_variant(item, variant):
# copy non no-copy fields
exclude_fields = ["naming_series", "item_code", "item_name", "show_in_website",
"show_variant_in_website", "opening_stock", "variant_of", "valuation_rate"]
if item.variant_based_on=='Manufacturer':
# don't copy manufacturer values if based on part no
exclude_fields += ['manufacturer', 'manufacturer_part_no']
allow_fields = [d.field_name for d in frappe.get_all("Variant Field", fields = ['field_name'])]
if "variant_based_on" not in allow_fields:
allow_fields.append("variant_based_on")
for field in item.meta.fields:
# "Table" is part of `no_value_field` but we shouldn't ignore tables
if (field.reqd or field.fieldname in allow_fields) and field.fieldname not in exclude_fields:
if variant.get(field.fieldname) != item.get(field.fieldname):
if field.fieldtype == "Table":
variant.set(field.fieldname, [])
for d in item.get(field.fieldname):
row = copy.deepcopy(d)
if row.get("name"):
row.name = None
variant.append(field.fieldname, row)
else:
variant.set(field.fieldname, item.get(field.fieldname))
variant.variant_of = item.name
if 'description' not in allow_fields:
if not variant.description:
variant.description = ""
if item.variant_based_on=='Item Attribute':
if variant.attributes:
attributes_description = item.description + " "
for d in variant.attributes:
attributes_description += "<div>" + d.attribute + ": " + cstr(d.attribute_value) + "</div>"
if attributes_description not in variant.description:
variant.description += attributes_description
def make_variant_item_code(template_item_code, template_item_name, variant):
"""Uses template's item code and abbreviations to make variant's item code"""
if variant.item_code:
return
abbreviations = []
for attr in variant.attributes:
item_attribute = frappe.db.sql("""select i.numeric_values, v.abbr
from `tabItem Attribute` i left join `tabItem Attribute Value` v
on (i.name=v.parent)
where i.name=%(attribute)s and (v.attribute_value=%(attribute_value)s or i.numeric_values = 1)""", {
"attribute": attr.attribute,
"attribute_value": attr.attribute_value
}, as_dict=True)
if not item_attribute:
continue
# frappe.throw(_('Invalid attribute {0} {1}').format(frappe.bold(attr.attribute),
# frappe.bold(attr.attribute_value)), title=_('Invalid Attribute'),
# exc=InvalidItemAttributeValueError)
abbr_or_value = cstr(attr.attribute_value) if item_attribute[0].numeric_values else item_attribute[0].abbr
abbreviations.append(abbr_or_value)
if abbreviations:
variant.item_code = "{0}-{1}".format(template_item_code, "-".join(abbreviations))
variant.item_name = "{0}-{1}".format(template_item_name, "-".join(abbreviations))
@frappe.whitelist()
def create_variant_doc_for_quick_entry(template, args):
variant_based_on = frappe.db.get_value("Item", template, "variant_based_on")
args = json.loads(args)
if variant_based_on == "Manufacturer":
variant = get_variant(template, **args)
else:
existing_variant = get_variant(template, args)
if existing_variant:
return existing_variant
else:
variant = create_variant(template, args=args)
variant.name = variant.item_code
validate_item_variant_attributes(variant, args)
return variant.as_dict()
|
gpl-3.0
| 1,849,677,033,109,478,400
| 33.932153
| 121
| 0.708326
| false
| 3.235519
| false
| false
| false
|
FlorianLudwig/scope
|
setup.py
|
1
|
2287
|
# -*- coding: utf-8 -*-
import os
import sys
from distutils.command.sdist import sdist
from setuptools import setup, find_packages
import setuptools.command.test
class TestCommand(setuptools.command.test.test):
def finalize_options(self):
setuptools.command.test.test.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
fails = []
from tox._config import parseconfig
from tox._cmdline import Session
config = parseconfig(self.test_args, 'tox')
retcode = Session(config).runcommand()
if retcode != 0:
fails.append('tox returned errors')
import pep8
style_guide = pep8.StyleGuide(config_file=BASE_PATH + '/.pep8')
style_guide.input_dir(BASE_PATH + '/rw')
if style_guide.options.report.get_count() != 0:
fails.append('pep8 returned errros for rw/')
style_guide = pep8.StyleGuide(config_file=BASE_PATH + '/.pep8')
style_guide.input_dir(BASE_PATH + '/test')
if style_guide.options.report.get_count() != 0:
fails.append('pep8 returned errros for test/')
if fails:
print('\n'.join(fails))
sys.exit(1)
setup(
name="scope",
version="0.0.1",
url='https://github.com/FlorianLudwig/scoe',
description='call-stack based, nested dependecy injection',
author='Florian Ludwig',
install_requires=['tornado>=4.0.0,<5.0'],
extras_requires={
'test': ['tox', 'pytest', 'pep8'],
'docs': ['sphinx_rtd_theme']
},
packages=['scope'],
include_package_data=True,
package_data={
'rw': ['*.html', '*.css', 'templates/html5', 'templates/form', 'templates/nginx']
},
cmdclass={
'test': TestCommand
},
license="http://www.apache.org/licenses/LICENSE-2.0",
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
)
|
apache-2.0
| -363,262,786,118,607,400
| 31.211268
| 89
| 0.601224
| false
| 3.76771
| true
| false
| false
|
lewisodriscoll/sasview
|
src/sas/sascalc/simulation/pointsmodelpy/tests/testlores2d.py
|
3
|
3235
|
from __future__ import print_function
def test_lores2d(phi):
from sasModeling.pointsmodelpy import pointsmodelpy
from sasModeling.iqPy import iqPy
from sasModeling.geoshapespy import geoshapespy
#lores model is to hold several geometric objects
lm = pointsmodelpy.new_loresmodel(0.1)
#generate single geometry shape
c = geoshapespy.new_cylinder(10,40)
geoshapespy.set_center(c,1,1,1)
geoshapespy.set_orientation(c,0,0,0)
#add single geometry shape to lores model
pointsmodelpy.lores_add(lm,c,3.0)
#retrieve the points from lores model for sas calculation
vp = pointsmodelpy.new_point3dvec()
pointsmodelpy.get_lorespoints(lm,vp)
#Calculate I(Q) and P(r) 2D
pointsmodelpy.distdistribution_xy(lm,vp)
pointsmodelpy.outputPR_xy(lm,"out_xy.pr")
iq = iqPy.new_iq(100,0.001, 0.3)
pointsmodelpy.calculateIQ_2D(lm,iq,phi)
iqPy.OutputIQ(iq, "out_xy.iq")
def get2d():
from math import pi
from Numeric import arange,zeros
from enthought.util.numerix import Float,zeros
from sasModeling.file2array import readfile2array
from sasModeling.pointsmodelpy import pointsmodelpy
from sasModeling.geoshapespy import geoshapespy
lm = pointsmodelpy.new_loresmodel(0.1)
sph = geoshapespy.new_sphere(20)
pointsmodelpy.lores_add(lm,sph,1.0)
vp = pointsmodelpy.new_point3dvec()
pointsmodelpy.get_lorespoints(lm,vp)
pointsmodelpy.distdistribution_xy(lm,vp)
value_grid = zeros((100,100),Float)
width, height = value_grid.shape
print(width,height)
I = pointsmodelpy.calculateI_Qxy(lm,0.00001,0.000002)
print(I)
Imax = 0
for i in range(width):
for j in range(height):
qx = float(i-50)/200.0
qy = float(j-50)/200.0
value_grid[i,j] = pointsmodelpy.calculateI_Qxy(lm,qx,qy)
if value_grid[i][j] > Imax:
Imax = value_grid[i][j]
for i in range(width):
for j in range(height):
value_grid[i][j] = value_grid[i][j]/Imax
value_grid[50,50] = 1
return value_grid
def get2d_2():
from math import pi
from Numeric import arange,zeros
from enthought.util.numerix import Float,zeros
from sasModeling.file2array import readfile2array
from sasModeling.pointsmodelpy import pointsmodelpy
from sasModeling.geoshapespy import geoshapespy
lm = pointsmodelpy.new_loresmodel(0.1)
cyn = geoshapespy.new_cylinder(5,20)
geoshapespy.set_orientation(cyn,0,0,90)
pointsmodelpy.lores_add(lm,cyn,1.0)
vp = pointsmodelpy.new_point3dvec()
pointsmodelpy.get_lorespoints(lm,vp)
pointsmodelpy.distdistribution_xy(lm,vp)
value_grid = zeros((100,100),Float)
width, height = value_grid.shape
print(width,height)
I = pointsmodelpy.calculateI_Qxy(lm,0.00001,0.000002)
print(I)
Imax = 0
for i in range(width):
for j in range(height):
qx = float(i-50)/200.0
qy = float(j-50)/200.0
value_grid[i,j] = pointsmodelpy.calculateI_Qxy(lm,qx,qy)
if value_grid[i][j] > Imax:
Imax = value_grid[i][j]
for i in range(width):
for j in range(height):
value_grid[i][j] = value_grid[i][j]/Imax
value_grid[50,50] = 1
return value_grid
if __name__ == "__main__":
print("start to test lores 2D")
# test_lores2d(10)
value_grid = get2d_2()
print(value_grid)
print("pass")
|
bsd-3-clause
| 863,925,136,733,527,400
| 26.415254
| 62
| 0.705719
| false
| 2.80816
| false
| false
| false
|
zhaochao/fuel-web
|
nailgun/nailgun/utils/zabbix.py
|
1
|
3931
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import simplejson as json
import urllib2
from nailgun.errors import errors
from nailgun.logger import logger
class ZabbixManager(object):
@classmethod
def _make_zabbix_request(cls, url, method, params, auth=None):
header = {'Content-type': 'application/json'}
data = {'jsonrpc': '2.0',
'id': '1',
'method': method,
'params': params}
if auth:
data['auth'] = auth
logger.debug("Zabbix request: %s", data)
request = urllib2.Request(url, json.dumps(data), header)
try:
response = urllib2.urlopen(request)
except urllib2.URLError as e:
raise errors.CannotMakeZabbixRequest(
"Can't make a request to Zabbix: {0}".format(e)
)
result = json.loads(response.read())
logger.debug("Zabbix response: %s", result)
if 'error' in result:
code = result['error']['code']
msg = result['error']['message']
data = result['error'].get('data', '')
raise errors.ZabbixRequestError(
"Zabbix returned error code {0}, {1}: {2}".format(
code, msg, data
)
)
return result['result']
@classmethod
def _zabbix_auth(cls, url, user, password):
method = 'user.authenticate'
params = {'user': user,
'password': password}
auth_hash = cls._make_zabbix_request(url, method, params)
return auth_hash
@classmethod
def _get_zabbix_hostid(cls, url, auth, name):
method = 'host.get'
params = {'filter': {'host': name}}
result = cls._make_zabbix_request(url, method, params, auth=auth)
if len(result) == 0:
logger.info("Host %s does not exist in zabbix, skipping", name)
return None
return result[0]['hostid']
@classmethod
def remove_from_zabbix(cls, zabbix, nodes):
url = zabbix['url']
user = zabbix['user']
password = zabbix['password']
auth = cls._zabbix_auth(url, user, password)
hostids = []
method = "host.delete"
for node in nodes:
name = node['slave_name']
hostid = cls._get_zabbix_hostid(url, auth, name)
if hostid:
hostids.append(hostid)
if hostids:
cls._make_zabbix_request(url, method, hostids, auth=auth)
@classmethod
def get_zabbix_node(cls, cluster):
zabbix_nodes = filter(
lambda node: filter(
lambda role: role.name == 'zabbix-server',
node.role_list
),
cluster.nodes
)
if not zabbix_nodes:
return None
return zabbix_nodes[0]
@classmethod
def get_zabbix_credentials(cls, cluster):
creds = {}
zabbix_node = cls.get_zabbix_node(cluster)
attributes = cluster.attributes
zabbix_attrs = attributes.editable['zabbix']
creds['user'] = zabbix_attrs['username']['value']
creds['password'] = zabbix_attrs['password']['value']
creds['url'] = "http://{0}/zabbix/api_jsonrpc.php".format(
zabbix_node.ip
)
return creds
|
apache-2.0
| -6,961,527,353,915,873,000
| 30.198413
| 78
| 0.568303
| false
| 4.007136
| false
| false
| false
|
lixiangning888/whole_project
|
modules/signatures_orginal_20151110/multiple_ua.py
|
1
|
2186
|
# Copyright (C) 2015 KillerInstinct
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class Multiple_UA(Signature):
name = "multiple_useragents"
description = "Network activity contains more than one unique useragent."
severity = 3
categories = ["network"]
authors = ["KillerInstinct"]
minimum = "1.2"
evented = True
def __init__(self, *args, **kwargs):
Signature.__init__(self, *args, **kwargs)
self.useragents = list()
self.procs = list()
filter_apinames = set(["InternetOpenA", "InternetOpenW"])
def on_call(self, call, process):
# Dict whitelist with process name as key, and useragents as values
whitelist = {
"acrord32.exe": ["Mozilla/3.0 (compatible; Acrobat 5.0; Windows)"],
"iexplore.exe": ["VCSoapClient", "Shockwave Flash"],
}
ua = self.get_argument(call, "Agent")
proc = process["process_name"].lower()
if proc in whitelist.keys() and ua in whitelist[proc]:
return None
else:
if ua not in self.useragents:
if self.results["target"]["category"] == "file" or proc != "iexplore.exe":
self.useragents.append(ua)
self.procs.append((process["process_name"], ua))
def on_complete(self):
if len(self.useragents) < 2:
return False
for item in self.procs:
self.data.append({"Process" : item[0]})
self.data.append({"User-Agent" : item[1]})
return True
|
lgpl-3.0
| 7,584,770,034,356,479,000
| 36.050847
| 90
| 0.63495
| false
| 3.960145
| false
| false
| false
|
maclogan/VirtualPenPal
|
chatterbot/conversation/statement.py
|
1
|
4801
|
# -*- coding: utf-8 -*-
from .response import Response
from datetime import datetime
class Statement(object):
"""
A statement represents a single spoken entity, sentence or
phrase that someone can say.
"""
def __init__(self, text, **kwargs):
self.text = text
self.in_response_to = kwargs.pop('in_response_to', [])
# The date and time that this statement was created at
self.created_at = kwargs.pop('created_at', datetime.now())
self.extra_data = kwargs.pop('extra_data', {})
# This is the confidence with which the chat bot believes
# this is an accurate response. This value is set when the
# statement is returned by the chat bot.
self.confidence = 0
self.storage = None
def __str__(self):
return self.text
def __repr__(self):
return '<Statement text:%s>' % (self.text)
def __hash__(self):
return hash(self.text)
def __eq__(self, other):
if not other:
return False
if isinstance(other, Statement):
return self.text == other.text
return self.text == other
def save(self):
"""
Save the statement in the database.
"""
self.storage.update(self)
def add_extra_data(self, key, value):
"""
This method allows additional data to be stored on the statement object.
Typically this data is something that pertains just to this statement.
For example, a value stored here might be the tagged parts of speech for
each word in the statement text.
- key = 'pos_tags'
- value = [('Now', 'RB'), ('for', 'IN'), ('something', 'NN'), ('different', 'JJ')]
:param key: The key to use in the dictionary of extra data.
:type key: str
:param value: The value to set for the specified key.
"""
self.extra_data[key] = value
def add_response(self, response):
"""
Add the response to the list of statements that this statement is in response to.
If the response is already in the list, increment the occurrence count of that response.
:param response: The response to add.
:type response: `Response`
"""
if not isinstance(response, Response):
raise Statement.InvalidTypeException(
'A {} was recieved when a {} instance was expected'.format(
type(response),
type(Response(''))
)
)
updated = False
for index in range(0, len(self.in_response_to)):
if response.text == self.in_response_to[index].text:
self.in_response_to[index].occurrence += 1
updated = True
if not updated:
self.in_response_to.append(response)
def remove_response(self, response_text):
"""
Removes a response from the statement's response list based
on the value of the response text.
:param response_text: The text of the response to be removed.
:type response_text: str
"""
for response in self.in_response_to:
if response_text == response.text:
self.in_response_to.remove(response)
return True
return False
def get_response_count(self, statement):
"""
Find the number of times that the statement has been used
as a response to the current statement.
:param statement: The statement object to get the count for.
:type statement: `Statement`
:returns: Return the number of times the statement has been used as a response.
:rtype: int
"""
for response in self.in_response_to:
if statement.text == response.text:
return response.occurrence
return 0
def serialize(self):
"""
:returns: A dictionary representation of the statement object.
:rtype: dict
"""
data = {}
data['text'] = self.text
data['in_response_to'] = []
data['created_at'] = self.created_at
data['extra_data'] = self.extra_data
for response in self.in_response_to:
data['in_response_to'].append(response.serialize())
return data
@property
def response_statement_cache(self):
"""
This property is to allow ChatterBot Statement objects to
be swappable with Django Statement models.
"""
return self.in_response_to
class InvalidTypeException(Exception):
def __init__(self, value='Recieved an unexpected value type.'):
self.value = value
def __str__(self):
return repr(self.value)
|
bsd-3-clause
| 6,238,078,572,523,169,000
| 29.579618
| 96
| 0.580712
| false
| 4.524976
| false
| false
| false
|
mozman/ezdxf
|
examples/render/render_ellipse.py
|
1
|
1255
|
# Copyright (c) 2018-2019, Manfred Moitzi
# License: MIT License
from math import radians
import ezdxf
from ezdxf.render.forms import ellipse
from ezdxf.math import Matrix44
NAME = 'ellipse.dxf'
doc = ezdxf.new('R12', setup=True)
msp = doc.modelspace()
def render(points):
msp.add_polyline2d(list(points))
def tmatrix(x, y, angle):
return Matrix44.chain(
Matrix44.z_rotate(radians(angle)),
Matrix44.translate(x, y, 0),
)
for axis in [0.5, 0.75, 1., 1.5, 2., 3.]:
render(ellipse(200, rx=5., ry=axis))
attribs = {
'color': 1,
'linetype': 'DASHDOT',
}
msp.add_line((-7, 0), (+7, 0), dxfattribs=attribs)
msp.add_line((0, -5), (0, +5), dxfattribs=attribs)
for rotation in [0, 30, 45, 60, 90]:
m = tmatrix(20, 0, rotation)
render(m.transform_vertices(ellipse(100, rx=5., ry=2.)))
for startangle in [0, 30, 45, 60, 90]:
m = tmatrix(40, 0, startangle)
render(m.transform_vertices(
ellipse(90, rx=5., ry=2., start_param=radians(startangle), end_param= radians(startangle+90)))
)
render(m.transform_vertices(
ellipse(90, rx=5., ry=2., start_param=radians(startangle+180), end_param= radians(startangle+270)))
)
doc.saveas(NAME)
print("drawing '%s' created.\n" % NAME)
|
mit
| -3,324,645,544,781,303,000
| 24.612245
| 107
| 0.641434
| false
| 2.670213
| false
| false
| false
|
tochev/obshtestvo.bg
|
projects/admin.py
|
1
|
20632
|
from django.contrib import admin
from django.contrib.contenttypes.generic import GenericTabularInline
from django.forms import ModelForm
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from suit.widgets import *
from pagedown.widgets import AdminPagedownWidget
from .models import *
from django import forms
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
import reversion
from suit.admin import SortableTabularInline, SortableModelAdmin
from django.db import models
from django.templatetags.static import static
from django.utils.html import urlize, format_html
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_text
from django.core.exceptions import ValidationError
from django.contrib.admin.options import IncorrectLookupParameters
from guardian.models import UserObjectPermission, GroupObjectPermission
from django.db.models.signals import post_save, pre_delete
from django.dispatch import receiver
@receiver(post_save, sender=User)
def user_perobject_permissions(sender, instance, created, **kwargs):
if created:
assign_perm('change_user', instance, instance)
@receiver(pre_delete, sender=User)
def remove_user_perobject_permissions(sender, instance, **kwargs):
UserObjectPermission.objects.filter(user_id=instance.pk).delete()
def prepare_lookup_value(key, value):
if key.endswith('__in') and type(value) == 'str':
value = value.split(',')
if key.endswith('__isnull'):
value = not (value.lower() in ('', 'false', '0'))
return value
class MultipleFilter(admin.RelatedFieldListFilter):
# title = _('skills')
# parameter_name = 'skills'
template = 'admin/filter_multiple.html'
def __init__(self, field, request, params, model, model_admin, field_path):
super(MultipleFilter, self).__init__(
field, request, params, model, model_admin, field_path)
self.lookup_val = request.GET.getlist(self.lookup_kwarg, None)
self.used_parameters = {}
for p in self.expected_parameters():
if p in request.GET:
value = request.GET.getlist(p) if self.lookup_kwarg == p else request.GET.get(p)
self.used_parameters[p] = prepare_lookup_value(p, value)
def queryset(self, request, queryset):
try:
if self.lookup_kwarg in self.used_parameters:
for lookup in self.used_parameters[self.lookup_kwarg]:
value = {self.lookup_kwarg: lookup}
queryset = queryset.filter(**value)
else:
queryset.filter(**self.used_parameters)
return queryset
except ValidationError as e:
raise IncorrectLookupParameters(e)
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {
'selected': self.lookup_val is None and not self.lookup_val_isnull,
'query_string': cl.get_query_string({},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
for pk_val, val in self.lookup_choices:
yield {
'selected': smart_text(pk_val) in self.lookup_val,
'query_string': cl.get_query_string({
self.lookup_kwarg: pk_val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if (isinstance(self.field, models.related.RelatedObject)
and self.field.field.null or hasattr(self.field, 'rel')
and self.field.null):
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': EMPTY_CHANGELIST_VALUE,
}
def close_link(instance):
if not instance.id:
return ''
url = reverse('admin:%s_%s_change' % (
instance._meta.app_label, instance._meta.module_name), args=[instance.id] ) + 'tools/' + 'toolfunc'
return mark_safe(u'<a href="{u}">Close</a>'.format(u=url))
def avatar(obj):
if (obj.facebook):
url = u'http://graph.facebook.com/%s/picture?width=40&height=40' % obj.facebook.split('=' if 'profile.php' in obj.facebook else '/')[-1]
else:
url = static('img/user-silhouette.png')
return mark_safe(u'<img width="40" height="40" src="%s" />' % url)
# from guardian.admin import GuardedModelAdmin
class UserActivityInline(admin.TabularInline):
model = UserActivity
suit_classes = 'suit-tab suit-tab-activities'
extra = 1
class MyUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = User
class MyUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = User
class MyUserAdmin(UserAdmin):
form = MyUserChangeForm
inlines = (UserActivityInline,)
add_form = MyUserCreationForm
suit_form_tabs = (
('system', 'System'),
('common', 'Common'),
('activities', 'Activities'),
)
fieldsets = (
(None, {
'classes': ('suit-tab suit-tab-system',),
'fields': ('username', 'password')}
),
(_('Personal info'), {
'classes': ('suit-tab suit-tab-system',),
'fields': ('first_name', 'last_name', 'email')}
),
(_('Permissions'), {
'classes': ('suit-tab suit-tab-system',),
'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')
}),
(_('Important dates'), {
'classes': ('suit-tab suit-tab-system',),
'fields': ('last_login', 'date_joined')}
),
(_('Custom'), {
'classes': ('suit-tab suit-tab-common',),
'fields': ('profession','is_available','available_after','bio', 'avatar')}
),
)
class ProjectActivityFrom(ModelForm):
class Meta:
widgets = {
'can_accomodate': EnclosedInput(append='icon-user', attrs={'class': 'input-mini'}),
}
class ProjectActivityInline(SortableTabularInline):
model = ProjectActivity
suit_classes = 'suit-tab suit-tab-activities'
sortable = 'order'
extra = 0
form = ProjectActivityFrom
def advanced(self, instance):
if not instance.id:
return ''
url = reverse('admin:%s_%s_change' % (
instance._meta.app_label, instance._meta.module_name), args=[instance.id] )
return mark_safe(u'<a href="{u}">Edit</a>'.format(u=url) + ' ' + close_link(instance))
readonly_fields = ('advanced',)
class TaskInline(SortableTabularInline):
model = Task
suit_classes = 'suit-tab suit-tab-tasks'
sortable = 'order'
extra = 0
class ProjectMotiveInline(SortableTabularInline):
model = ProjectMotive
suit_classes = 'suit-tab suit-tab-motives'
sortable = 'order'
extra = 0
class ProjectMilestoneFrom(ModelForm):
class Meta:
widgets = {
# 'percent': RangeInput(append='%', attrs={"min":1, "max":100}),
'percent': EnclosedInput(append='%', attrs={'class': 'input-mini'}),
'target_date': forms.TextInput(attrs={'class': 'input-mini'}),
}
class ProjectMilestoneInline(SortableTabularInline):
form = ProjectMilestoneFrom
model = ProjectMilestone
suit_classes = 'suit-tab suit-tab-milestones'
sortable = 'order'
extra = 0
class ProjectUsageExampleStepForm(ModelForm):
class Meta:
widgets = {
# 'percent': RangeInput(append='%', attrs={"min":1, "max":100}),
'example_number': EnclosedInput(attrs={'class': 'input-mini'}),
'icon':EnclosedInput(append='icon-heart', attrs={'class': 'input-mini'}),
}
class ProjectUsageExampleStepInline(SortableTabularInline):
model = ProjectUsageExampleStep
suit_classes = 'suit-tab suit-tab-usage-examples'
sortable = 'order'
extra = 0
form = ProjectUsageExampleStepForm
class RangeInput(EnclosedInput):
"""HTML5 Range Input."""
input_type = 'range'
class ProjectAdminForm(ModelForm):
class Meta:
widgets = {
'url': EnclosedInput(prepend='icon-globe'),
'pm_url': EnclosedInput(prepend='icon-globe'),
'facebook_group': EnclosedInput(prepend='icon-globe'),
'github_repo': EnclosedInput(prepend='icon-globe'),
'strategy': AdminPagedownWidget(),
'description': AdminPagedownWidget()
}
class ProjectAdmin(reversion.VersionAdmin, SortableModelAdmin):
list_display = ('name',)
sortable = 'order'
form = ProjectAdminForm
search_fields = ['name']
list_filter = ['is_featured']
prepopulated_fields = {"slug": ("name",)}
inlines = [
ProjectActivityInline,
ProjectMotiveInline,
ProjectUsageExampleStepInline,
ProjectMilestoneInline,
]
suit_form_tabs = (
('general', 'General'),
('strategy', 'Strategy'),
('description', 'Description'),
# ('advanced', 'Advanced Settings'),
('activities', 'Activities'),
('milestones', 'Milestones'),
('motives', 'Motives'),
('usage-examples', 'Usage examples steps'),
)
formfield_overrides = {
models.TextField: {'widget': AutosizedTextarea(attrs={'rows':2, 'cols':50})},
}
fieldsets = (
(None, {
'classes': ('suit-tab suit-tab-general',),
'fields': ('name', 'slug', 'url', 'short_description', 'is_forced_active','is_public','has_static_page',)
}),
('Management', {
'classes': ('suit-tab suit-tab-general',),
'fields': ('pm_url', 'facebook_group', 'github_repo',)
}),
('Media', {
'classes': ('suit-tab suit-tab-general',),
'fields': (
'logo',
'logo_styled',
'logo_thumb',
'cover_image',
'complimenting_color',
)
}),
('Homepage', {
'classes': ('suit-tab suit-tab-general',),
'fields': ('is_featured',)
}),
(None, {
'classes': ('suit-tab suit-tab-strategy',),
'fields': ('strategy',)
}),
(None, {
'classes': ('suit-tab suit-tab-description',),
'fields': (
'description',
)}
),
)
class SkillGroupAdmin(SortableModelAdmin):
list_display = ('name',)
sortable = 'order'
class SkillAdmin(admin.ModelAdmin):
list_display = ('name',)
search_fields = ['name']
class ProjectActivityAdminBase(admin.ModelAdmin):
inlines = (UserActivityInline, TaskInline)
def tools(self, instance):
return close_link(instance)
list_display = ('name', 'project', 'tools')
def toolfunc(self, request, obj):
pass
toolfunc.label = "Close" # optional
toolfunc.short_description = "This will be the tooltip of the button" # optional
hobjectactions = ('toolfunc', )
class ProjectActivityAdmin(ProjectActivityAdminBase, reversion.VersionAdmin):
suit_form_tabs = (
('general', 'General'),
('tasks', 'Tasks'),
('activities', 'User activities'),
)
fieldsets = (
(None, {
'classes': ('suit-tab suit-tab-general',),
'fields': ('name', 'project',)
}),
('Settings', {
'classes': ('suit-tab suit-tab-general',),
'fields': ('is_organisational', 'is_template', 'can_accomodate', )
}),
)
# template, prepopulated forms:
# http://stackoverflow.com/questions/2223375/multiple-modeladmins-views-for-same-model-in-django-admin
# http://stackoverflow.com/questions/936376/prepopulate-django-non-model-form
class ProjectActivityTemplate(ProjectActivity):
class Meta:
proxy = True
class ProjectActivityTemplateForm(forms.ModelForm):
class Meta:
model = ProjectActivityTemplate
is_template = forms.BooleanField(widget=forms.HiddenInput(), initial=1)
order = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
class ProjectActivityTemplateAdmin(ProjectActivityAdminBase):
form = ProjectActivityTemplateForm
inlines = []
fields = ('name', 'is_organisational', 'can_accomodate', 'order', 'is_template',)
def queryset(self, request):
return self.model.objects.filter(is_template=True)
class MemberAdminFrom(forms.ModelForm):
class Meta:
widgets = {
'facebook':EnclosedInput(prepend='icon-share'),
'email':EnclosedInput(prepend='icon-envelope'),
# 'types': autocomplete_light.MultipleChoiceWidget(autocomplete='MemberTypeAutocomplete'),
# 'skills': autocomplete_light.MultipleChoiceWidget(autocomplete='SkillAutocomplete'),
# 'projects_interests': autocomplete_light.MultipleChoiceWidget(autocomplete='ProjectAutocomplete'),
}
class MemberAdmin(admin.ModelAdmin):
model = Member
form = MemberAdminFrom
ordering = ('name',)
search_fields = ['name']
list_filter = ('projects_interests', ('skills', MultipleFilter),'types', 'last_contacted_at')
list_display = (avatar, 'name', 'facebook_as_link', 'email', 'skills_display')
suit_form_tabs = (
('general', _('General')),
('specifics', _('Specifics')),
# ('integration', _('System')),
)
formfield_overrides = {
models.TextField: {'widget': AutosizedTextarea(attrs={'rows':2, 'cols':50})},
models.DateTimeField: {'widget': SuitSplitDateTimeWidget},
models.DateField: {'widget': SuitDateWidget},
}
def skills_display(self, member):
return ', '.join([obj.name for obj in member.skills.all()])
skills_display.short_description = _('skills')
def facebook_as_link(self, obj):
return format_html(urlize(obj.facebook))
facebook_as_link.short_description = 'Facebook'
fieldsets = (
(None, {
'classes': ('suit-tab suit-tab-general',),
'fields': ('name', 'facebook', 'email', 'date_joined',)
}),
(_('Expectations'), {
'classes': ('suit-tab suit-tab-general',),
'fields': ( 'availability', 'available_after', )
}),
(_("Member's preferences"), {
'classes': ('suit-tab suit-tab-general',),
'fields': ('skills', 'types', 'projects_interests','offered_help', )
}),
(_('Self-description & Comments'), {
'classes': ('suit-tab suit-tab-general',),
'fields': ('intro', 'comment')
}),
(_('Communication'), {
'classes': ('suit-tab suit-tab-specifics',),
'fields': ('last_contacted_at', 'latest_answer', 'contact_frequency', )
}),
# ('User', {
# 'classes': ('suit-tab suit-tab-integration',),
# 'fields': ('user', 'update_from_user')
# }),
)
# from guardian.admin import GuardedModelAdmin
class UpdateInline(GenericTabularInline):
model = Update
suit_classes = 'suit-tab suit-tab-updates'
extra = 0
formfield_overrides = {
models.TextField: {'widget': AutosizedTextarea(attrs={'rows':1, 'cols':100})},
models.DateTimeField: {'widget': SuitSplitDateTimeWidget},
models.DateField: {'widget': SuitDateWidget},
}
class OrganisationAdmin(admin.ModelAdmin):
model = Organisation
inlines = (UpdateInline,)
list_filter = ('middlemen',('types', MultipleFilter))
list_display = ('name','representatives', 'types_display')
search_fields = ['name']
suit_form_tabs = (
('general', _('General')),
('updates', _('Updates')),
)
formfield_overrides = {
models.TextField: {'widget': AutosizedTextarea(attrs={'rows':3, 'cols':70})},
models.DateTimeField: {'widget': SuitSplitDateTimeWidget},
models.DateField: {'widget': SuitDateWidget},
}
def types_display(self, org):
return ', '.join([obj.name for obj in org.types.all()])
types_display.short_description = _('relation type')
fieldsets = (
(None, {
'classes': ('suit-tab suit-tab-general',),
'fields': ('name', 'types','strategy')
}),
(_('Contact'), {
'classes': ('suit-tab suit-tab-general',),
'fields': ('middlemen', 'representatives', 'contact', )
}),
(_('About'), {
'classes': ('suit-tab suit-tab-general',),
'fields': ('comment', 'found_via', 'working_with', )
}),
(_('Partner'), {
'classes': ('suit-tab suit-tab-general',),
'fields': ('partnered_project', 'provided_help',)
}),
)
class SponsorOrg(Organisation):
class Meta:
proxy = True
verbose_name = Organisation._meta.verbose_name
verbose_name_plural = Organisation._meta.verbose_name
class SponsorOrgAdmin(OrganisationAdmin):
def has_add_permission(self, request):
return False
def queryset(self, request):
return self.model.objects.filter(types__id=2)
class PartnerOrg(Organisation):
class Meta:
proxy = True
verbose_name = Organisation._meta.verbose_name
verbose_name_plural = Organisation._meta.verbose_name
class PartnerOrgAdmin(OrganisationAdmin):
def has_add_permission(self, request):
return False
def queryset(self, request):
return self.model.objects.exclude(partnered_project=None)
class AvailableMember(Member):
class Meta:
proxy = True
verbose_name = Member._meta.verbose_name
verbose_name_plural = Member._meta.verbose_name
class AvailableMemberAdmin(MemberAdmin):
def has_add_permission(self, request):
return False
def queryset(self, request):
return self.model.objects.filter(availability=Member.AVAILABLE)
class PaidMember(Member):
class Meta:
proxy = True
verbose_name = Member._meta.verbose_name
verbose_name_plural = Member._meta.verbose_name
class PaidMemberAdmin(MemberAdmin):
def has_add_permission(self, request):
return False
def queryset(self, request):
return self.model.objects.filter(availability=Member.ONLY_PAID)
class ReaderMember(Member):
class Meta:
proxy = True
verbose_name = Member._meta.verbose_name
verbose_name_plural = Member._meta.verbose_name
class ReaderMemberAdmin(MemberAdmin):
def has_add_permission(self, request):
return False
def queryset(self, request):
return self.model.objects.filter(availability=Member.ONLY_READER)
class EventAdmin(admin.ModelAdmin):
model = Event
ordering = ('name',)
search_fields = ['name']
list_filter = ('date', ('organizers', MultipleFilter))
list_display = ('name', 'date')
suit_form_tabs = (
('general', _('General')),
# ('integration', _('System')),
)
formfield_overrides = {
models.TextField: {'widget': AutosizedTextarea(attrs={'rows':2, 'cols':60})},
models.DateTimeField: {'widget': SuitSplitDateTimeWidget},
models.DateField: {'widget': SuitDateWidget},
}
fieldsets = (
(None, {
'classes': ('suit-tab suit-tab-general',),
'fields': ('name', 'date', 'contact')
}),
(_('details'), {
'classes': ('suit-tab suit-tab-general',),
'fields': ( 'strategy', 'organizers', 'comment')
}),
)
admin.site.register(Organisation, OrganisationAdmin)
admin.site.register(SponsorOrg, SponsorOrgAdmin)
admin.site.register(PartnerOrg, PartnerOrgAdmin)
admin.site.register(Event, EventAdmin)
admin.site.register(Member, MemberAdmin)
admin.site.register(ReaderMember, ReaderMemberAdmin)
admin.site.register(AvailableMember, AvailableMemberAdmin)
admin.site.register(PaidMember, PaidMemberAdmin)
admin.site.register(OrganisationType)
admin.site.register(MemberType)
admin.site.register(Skill, SkillAdmin)
admin.site.register(SkillGroup, SkillGroupAdmin)
admin.site.register(UserProjectPause)
admin.site.register(ProjectActivity, ProjectActivityAdmin)
admin.site.register(ProjectActivityTemplate, ProjectActivityTemplateAdmin)
admin.site.register(Project, ProjectAdmin)
admin.site.register(User, MyUserAdmin)
admin.site.register(UserActivity)
|
unlicense
| -5,973,505,790,476,279,000
| 33.61745
| 148
| 0.612253
| false
| 3.897978
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.