repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
MarkWh1te/xueqiu_predict
|
crawler/stock.py
|
1
|
2487
|
from flask import Flask
from flask import render_template, request
from utils import Base,engine
from sqlalchemy.orm import scoped_session, sessionmaker
from models import Stock, StockDetail
from flask_bootstrap import Bootstrap
from flask import Blueprint
from flask_paginate import Pagination,get_page_args
from sqlalchemy import desc
def create_app():
app = Flask(__name__)
Bootstrap(app)
return app
app = create_app()
# @app.route('/')
# def hello_world():
# return 'Hello, World!'
@app.route('/detail/<stock_id>')
def detail(stock_id):
print(stock_id)
page = request.args.get('page', type=int, default=1)
per_page = 15
if per_page:
stocks = StockDetail.query.filter(StockDetail.stock_id == stock_id).\
order_by(desc(StockDetail.create_time)).limit(per_page)
if page:
stocks = stocks.offset((page-1)*per_page)
pagination = Pagination(page=page,
per_page=per_page,
# total=stocks.count(),
total = StockDetail.query.filter(StockDetail.stock_id == stock_id).count(),
record_name='record',
format_total=True,
format_number=True,
css_framework="bootstrap3"
)
return render_template('detail.html',
stocks=stocks,
page=page,
per_page=per_page,
pagination=pagination)
@app.route('/')
def index():
# stocks = Stock.query.all()
page = request.args.get('page', type=int, default=1)
per_page = 15
if per_page:
stocks = Stock.query.limit(per_page)
if page:
stocks = stocks.offset((page-1)*per_page)
pagination = Pagination(page=page,
per_page=per_page,
total=Stock.query.count(),
record_name='stocks',
format_total=True,
format_number=True,
css_framework="bootstrap3"
)
return render_template('index.html',
stocks=stocks,
page=page,
per_page=per_page,
pagination=pagination)
if __name__ == "__main__":
app.run(host='0.0.0.0')
|
mit
| -7,122,884,169,626,361,000
| 32.608108
| 103
| 0.51347
| false
| 4.295337
| false
| false
| false
|
jsilter/scipy
|
scipy/linalg/special_matrices.py
|
1
|
27627
|
from __future__ import division, print_function, absolute_import
import math
import numpy as np
from scipy.lib.six import xrange
from scipy.lib.six import string_types
__all__ = ['tri', 'tril', 'triu', 'toeplitz', 'circulant', 'hankel',
'hadamard', 'leslie', 'all_mat', 'kron', 'block_diag', 'companion',
'hilbert', 'invhilbert', 'pascal', 'invpascal', 'dft']
#-----------------------------------------------------------------------------
# matrix construction functions
#-----------------------------------------------------------------------------
#
# *Note*: tri{,u,l} is implemented in numpy, but an important bug was fixed in
# 2.0.0.dev-1af2f3, the following tri{,u,l} definitions are here for backwards
# compatibility.
def tri(N, M=None, k=0, dtype=None):
"""
Construct (N, M) matrix filled with ones at and below the k-th diagonal.
The matrix has A[i,j] == 1 for i <= j + k
Parameters
----------
N : integer
The size of the first dimension of the matrix.
M : integer or None
The size of the second dimension of the matrix. If `M` is None,
`M = N` is assumed.
k : integer
Number of subdiagonal below which matrix is filled with ones.
`k` = 0 is the main diagonal, `k` < 0 subdiagonal and `k` > 0
superdiagonal.
dtype : dtype
Data type of the matrix.
Returns
-------
tri : (N, M) ndarray
Tri matrix.
Examples
--------
>>> from scipy.linalg import tri
>>> tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> tri(3, 5, -1, dtype=int)
array([[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0]])
"""
if M is None:
M = N
if isinstance(M, string_types):
#pearu: any objections to remove this feature?
# As tri(N,'d') is equivalent to tri(N,dtype='d')
dtype = M
M = N
m = np.greater_equal(np.subtract.outer(np.arange(N), np.arange(M)), -k)
if dtype is None:
return m
else:
return m.astype(dtype)
def tril(m, k=0):
"""
Make a copy of a matrix with elements above the k-th diagonal zeroed.
Parameters
----------
m : array_like
Matrix whose elements to return
k : integer
Diagonal above which to zero elements.
`k` == 0 is the main diagonal, `k` < 0 subdiagonal and
`k` > 0 superdiagonal.
Returns
-------
tril : ndarray
Return is the same shape and type as `m`.
Examples
--------
>>> from scipy.linalg import tril
>>> tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = np.asarray(m)
out = tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype.char) * m
return out
def triu(m, k=0):
"""
Make a copy of a matrix with elements below the k-th diagonal zeroed.
Parameters
----------
m : array_like
Matrix whose elements to return
k : int, optional
Diagonal below which to zero elements.
`k` == 0 is the main diagonal, `k` < 0 subdiagonal and
`k` > 0 superdiagonal.
Returns
-------
triu : ndarray
Return matrix with zeroed elements below the k-th diagonal and has
same shape and type as `m`.
Examples
--------
>>> from scipy.linalg import triu
>>> triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = np.asarray(m)
out = (1 - tri(m.shape[0], m.shape[1], k - 1, m.dtype.char)) * m
return out
def toeplitz(c, r=None):
"""
Construct a Toeplitz matrix.
The Toeplitz matrix has constant diagonals, with c as its first column
and r as its first row. If r is not given, ``r == conjugate(c)`` is
assumed.
Parameters
----------
c : array_like
First column of the matrix. Whatever the actual shape of `c`, it
will be converted to a 1-D array.
r : array_like
First row of the matrix. If None, ``r = conjugate(c)`` is assumed;
in this case, if c[0] is real, the result is a Hermitian matrix.
r[0] is ignored; the first row of the returned matrix is
``[c[0], r[1:]]``. Whatever the actual shape of `r`, it will be
converted to a 1-D array.
Returns
-------
A : (len(c), len(r)) ndarray
The Toeplitz matrix. Dtype is the same as ``(c[0] + r[0]).dtype``.
See also
--------
circulant : circulant matrix
hankel : Hankel matrix
Notes
-----
The behavior when `c` or `r` is a scalar, or when `c` is complex and
`r` is None, was changed in version 0.8.0. The behavior in previous
versions was undocumented and is no longer supported.
Examples
--------
>>> from scipy.linalg import toeplitz
>>> toeplitz([1,2,3], [1,4,5,6])
array([[1, 4, 5, 6],
[2, 1, 4, 5],
[3, 2, 1, 4]])
>>> toeplitz([1.0, 2+3j, 4-1j])
array([[ 1.+0.j, 2.-3.j, 4.+1.j],
[ 2.+3.j, 1.+0.j, 2.-3.j],
[ 4.-1.j, 2.+3.j, 1.+0.j]])
"""
c = np.asarray(c).ravel()
if r is None:
r = c.conjugate()
else:
r = np.asarray(r).ravel()
# Form a 1D array of values to be used in the matrix, containing a reversed
# copy of r[1:], followed by c.
vals = np.concatenate((r[-1:0:-1], c))
a, b = np.ogrid[0:len(c), len(r) - 1:-1:-1]
indx = a + b
# `indx` is a 2D array of indices into the 1D array `vals`, arranged so
# that `vals[indx]` is the Toeplitz matrix.
return vals[indx]
def circulant(c):
"""
Construct a circulant matrix.
Parameters
----------
c : (N,) array_like
1-D array, the first column of the matrix.
Returns
-------
A : (N, N) ndarray
A circulant matrix whose first column is `c`.
See also
--------
toeplitz : Toeplitz matrix
hankel : Hankel matrix
Notes
-----
.. versionadded:: 0.8.0
Examples
--------
>>> from scipy.linalg import circulant
>>> circulant([1, 2, 3])
array([[1, 3, 2],
[2, 1, 3],
[3, 2, 1]])
"""
c = np.asarray(c).ravel()
a, b = np.ogrid[0:len(c), 0:-len(c):-1]
indx = a + b
# `indx` is a 2D array of indices into `c`, arranged so that `c[indx]` is
# the circulant matrix.
return c[indx]
def hankel(c, r=None):
"""
Construct a Hankel matrix.
The Hankel matrix has constant anti-diagonals, with `c` as its
first column and `r` as its last row. If `r` is not given, then
`r = zeros_like(c)` is assumed.
Parameters
----------
c : array_like
First column of the matrix. Whatever the actual shape of `c`, it
will be converted to a 1-D array.
r : array_like
Last row of the matrix. If None, ``r = zeros_like(c)`` is assumed.
r[0] is ignored; the last row of the returned matrix is
``[c[-1], r[1:]]``. Whatever the actual shape of `r`, it will be
converted to a 1-D array.
Returns
-------
A : (len(c), len(r)) ndarray
The Hankel matrix. Dtype is the same as ``(c[0] + r[0]).dtype``.
See also
--------
toeplitz : Toeplitz matrix
circulant : circulant matrix
Examples
--------
>>> from scipy.linalg import hankel
>>> hankel([1, 17, 99])
array([[ 1, 17, 99],
[17, 99, 0],
[99, 0, 0]])
>>> hankel([1,2,3,4], [4,7,7,8,9])
array([[1, 2, 3, 4, 7],
[2, 3, 4, 7, 7],
[3, 4, 7, 7, 8],
[4, 7, 7, 8, 9]])
"""
c = np.asarray(c).ravel()
if r is None:
r = np.zeros_like(c)
else:
r = np.asarray(r).ravel()
# Form a 1D array of values to be used in the matrix, containing `c`
# followed by r[1:].
vals = np.concatenate((c, r[1:]))
a, b = np.ogrid[0:len(c), 0:len(r)]
indx = a + b
# `indx` is a 2D array of indices into the 1D array `vals`, arranged so
# that `vals[indx]` is the Hankel matrix.
return vals[indx]
def hadamard(n, dtype=int):
"""
Construct a Hadamard matrix.
Constructs an n-by-n Hadamard matrix, using Sylvester's
construction. `n` must be a power of 2.
Parameters
----------
n : int
The order of the matrix. `n` must be a power of 2.
dtype : numpy dtype
The data type of the array to be constructed.
Returns
-------
H : (n, n) ndarray
The Hadamard matrix.
Notes
-----
.. versionadded:: 0.8.0
Examples
--------
>>> from scipy.linalg import hadamard
>>> hadamard(2, dtype=complex)
array([[ 1.+0.j, 1.+0.j],
[ 1.+0.j, -1.-0.j]])
>>> hadamard(4)
array([[ 1, 1, 1, 1],
[ 1, -1, 1, -1],
[ 1, 1, -1, -1],
[ 1, -1, -1, 1]])
"""
# This function is a slightly modified version of the
# function contributed by Ivo in ticket #675.
if n < 1:
lg2 = 0
else:
lg2 = int(math.log(n, 2))
if 2 ** lg2 != n:
raise ValueError("n must be an positive integer, and n must be "
"a power of 2")
H = np.array([[1]], dtype=dtype)
# Sylvester's construction
for i in range(0, lg2):
H = np.vstack((np.hstack((H, H)), np.hstack((H, -H))))
return H
def leslie(f, s):
"""
Create a Leslie matrix.
Given the length n array of fecundity coefficients `f` and the length
n-1 array of survival coefficents `s`, return the associated Leslie matrix.
Parameters
----------
f : (N,) array_like
The "fecundity" coefficients.
s : (N-1,) array_like
The "survival" coefficients, has to be 1-D. The length of `s`
must be one less than the length of `f`, and it must be at least 1.
Returns
-------
L : (N, N) ndarray
The array is zero except for the first row,
which is `f`, and the first sub-diagonal, which is `s`.
The data-type of the array will be the data-type of ``f[0]+s[0]``.
Notes
-----
.. versionadded:: 0.8.0
The Leslie matrix is used to model discrete-time, age-structured
population growth [1]_ [2]_. In a population with `n` age classes, two sets
of parameters define a Leslie matrix: the `n` "fecundity coefficients",
which give the number of offspring per-capita produced by each age
class, and the `n` - 1 "survival coefficients", which give the
per-capita survival rate of each age class.
References
----------
.. [1] P. H. Leslie, On the use of matrices in certain population
mathematics, Biometrika, Vol. 33, No. 3, 183--212 (Nov. 1945)
.. [2] P. H. Leslie, Some further notes on the use of matrices in
population mathematics, Biometrika, Vol. 35, No. 3/4, 213--245
(Dec. 1948)
Examples
--------
>>> from scipy.linalg import leslie
>>> leslie([0.1, 2.0, 1.0, 0.1], [0.2, 0.8, 0.7])
array([[ 0.1, 2. , 1. , 0.1],
[ 0.2, 0. , 0. , 0. ],
[ 0. , 0.8, 0. , 0. ],
[ 0. , 0. , 0.7, 0. ]])
"""
f = np.atleast_1d(f)
s = np.atleast_1d(s)
if f.ndim != 1:
raise ValueError("Incorrect shape for f. f must be one-dimensional")
if s.ndim != 1:
raise ValueError("Incorrect shape for s. s must be one-dimensional")
if f.size != s.size + 1:
raise ValueError("Incorrect lengths for f and s. The length"
" of s must be one less than the length of f.")
if s.size == 0:
raise ValueError("The length of s must be at least 1.")
tmp = f[0] + s[0]
n = f.size
a = np.zeros((n, n), dtype=tmp.dtype)
a[0] = f
a[list(range(1, n)), list(range(0, n - 1))] = s
return a
@np.deprecate
def all_mat(*args):
return list(map(np.matrix, args))
def kron(a, b):
"""
Kronecker product.
The result is the block matrix::
a[0,0]*b a[0,1]*b ... a[0,-1]*b
a[1,0]*b a[1,1]*b ... a[1,-1]*b
...
a[-1,0]*b a[-1,1]*b ... a[-1,-1]*b
Parameters
----------
a : (M, N) ndarray
Input array
b : (P, Q) ndarray
Input array
Returns
-------
A : (M*P, N*Q) ndarray
Kronecker product of `a` and `b`.
Examples
--------
>>> from numpy import array
>>> from scipy.linalg import kron
>>> kron(array([[1,2],[3,4]]), array([[1,1,1]]))
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
"""
if not a.flags['CONTIGUOUS']:
a = np.reshape(a, a.shape)
if not b.flags['CONTIGUOUS']:
b = np.reshape(b, b.shape)
o = np.outer(a, b)
o = o.reshape(a.shape + b.shape)
return np.concatenate(np.concatenate(o, axis=1), axis=1)
def block_diag(*arrs):
"""
Create a block diagonal matrix from provided arrays.
Given the inputs `A`, `B` and `C`, the output will have these
arrays arranged on the diagonal::
[[A, 0, 0],
[0, B, 0],
[0, 0, C]]
Parameters
----------
A, B, C, ... : array_like, up to 2-D
Input arrays. A 1-D array or array_like sequence of length `n`is
treated as a 2-D array with shape ``(1,n)``.
Returns
-------
D : ndarray
Array with `A`, `B`, `C`, ... on the diagonal. `D` has the
same dtype as `A`.
Notes
-----
If all the input arrays are square, the output is known as a
block diagonal matrix.
Examples
--------
>>> from scipy.linalg import block_diag
>>> A = [[1, 0],
... [0, 1]]
>>> B = [[3, 4, 5],
... [6, 7, 8]]
>>> C = [[7]]
>>> block_diag(A, B, C)
[[1 0 0 0 0 0]
[0 1 0 0 0 0]
[0 0 3 4 5 0]
[0 0 6 7 8 0]
[0 0 0 0 0 7]]
>>> block_diag(1.0, [2, 3], [[4, 5], [6, 7]])
array([[ 1., 0., 0., 0., 0.],
[ 0., 2., 3., 0., 0.],
[ 0., 0., 0., 4., 5.],
[ 0., 0., 0., 6., 7.]])
"""
if arrs == ():
arrs = ([],)
arrs = [np.atleast_2d(a) for a in arrs]
bad_args = [k for k in range(len(arrs)) if arrs[k].ndim > 2]
if bad_args:
raise ValueError("arguments in the following positions have dimension "
"greater than 2: %s" % bad_args)
shapes = np.array([a.shape for a in arrs])
out = np.zeros(np.sum(shapes, axis=0), dtype=arrs[0].dtype)
r, c = 0, 0
for i, (rr, cc) in enumerate(shapes):
out[r:r + rr, c:c + cc] = arrs[i]
r += rr
c += cc
return out
def companion(a):
"""
Create a companion matrix.
Create the companion matrix [1]_ associated with the polynomial whose
coefficients are given in `a`.
Parameters
----------
a : (N,) array_like
1-D array of polynomial coefficients. The length of `a` must be
at least two, and ``a[0]`` must not be zero.
Returns
-------
c : (N-1, N-1) ndarray
The first row of `c` is ``-a[1:]/a[0]``, and the first
sub-diagonal is all ones. The data-type of the array is the same
as the data-type of ``1.0*a[0]``.
Raises
------
ValueError
If any of the following are true: a) ``a.ndim != 1``;
b) ``a.size < 2``; c) ``a[0] == 0``.
Notes
-----
.. versionadded:: 0.8.0
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> from scipy.linalg import companion
>>> companion([1, -10, 31, -30])
array([[ 10., -31., 30.],
[ 1., 0., 0.],
[ 0., 1., 0.]])
"""
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Incorrect shape for `a`. `a` must be "
"one-dimensional.")
if a.size < 2:
raise ValueError("The length of `a` must be at least 2.")
if a[0] == 0:
raise ValueError("The first coefficient in `a` must not be zero.")
first_row = -a[1:] / (1.0 * a[0])
n = a.size
c = np.zeros((n - 1, n - 1), dtype=first_row.dtype)
c[0] = first_row
c[list(range(1, n - 1)), list(range(0, n - 2))] = 1
return c
def hilbert(n):
"""
Create a Hilbert matrix of order `n`.
Returns the `n` by `n` array with entries `h[i,j] = 1 / (i + j + 1)`.
Parameters
----------
n : int
The size of the array to create.
Returns
-------
h : (n, n) ndarray
The Hilbert matrix.
See Also
--------
invhilbert : Compute the inverse of a Hilbert matrix.
Notes
-----
.. versionadded:: 0.10.0
Examples
--------
>>> from scipy.linalg import hilbert
>>> hilbert(3)
array([[ 1. , 0.5 , 0.33333333],
[ 0.5 , 0.33333333, 0.25 ],
[ 0.33333333, 0.25 , 0.2 ]])
"""
values = 1.0 / (1.0 + np.arange(2 * n - 1))
h = hankel(values[:n], r=values[n - 1:])
return h
def invhilbert(n, exact=False):
"""
Compute the inverse of the Hilbert matrix of order `n`.
The entries in the inverse of a Hilbert matrix are integers. When `n`
is greater than 14, some entries in the inverse exceed the upper limit
of 64 bit integers. The `exact` argument provides two options for
dealing with these large integers.
Parameters
----------
n : int
The order of the Hilbert matrix.
exact : bool
If False, the data type of the array that is returned is np.float64,
and the array is an approximation of the inverse.
If True, the array is the exact integer inverse array. To represent
the exact inverse when n > 14, the returned array is an object array
of long integers. For n <= 14, the exact inverse is returned as an
array with data type np.int64.
Returns
-------
invh : (n, n) ndarray
The data type of the array is np.float64 if `exact` is False.
If `exact` is True, the data type is either np.int64 (for n <= 14)
or object (for n > 14). In the latter case, the objects in the
array will be long integers.
See Also
--------
hilbert : Create a Hilbert matrix.
Notes
-----
.. versionadded:: 0.10.0
Examples
--------
>>> from scipy.linalg import invhilbert
>>> invhilbert(4)
array([[ 16., -120., 240., -140.],
[ -120., 1200., -2700., 1680.],
[ 240., -2700., 6480., -4200.],
[ -140., 1680., -4200., 2800.]])
>>> invhilbert(4, exact=True)
array([[ 16, -120, 240, -140],
[ -120, 1200, -2700, 1680],
[ 240, -2700, 6480, -4200],
[ -140, 1680, -4200, 2800]], dtype=int64)
>>> invhilbert(16)[7,7]
4.2475099528537506e+19
>>> invhilbert(16, exact=True)[7,7]
42475099528537378560L
"""
from scipy.special import comb
if exact:
if n > 14:
dtype = object
else:
dtype = np.int64
else:
dtype = np.float64
invh = np.empty((n, n), dtype=dtype)
for i in xrange(n):
for j in xrange(0, i + 1):
s = i + j
invh[i, j] = ((-1) ** s * (s + 1) *
comb(n + i, n - j - 1, exact) *
comb(n + j, n - i - 1, exact) *
comb(s, i, exact) ** 2)
if i != j:
invh[j, i] = invh[i, j]
return invh
def pascal(n, kind='symmetric', exact=True):
"""
Returns the n x n Pascal matrix.
The Pascal matrix is a matrix containing the binomial coefficients as
its elements.
Parameters
----------
n : int
The size of the matrix to create; that is, the result is an n x n
matrix.
kind : str, optional
Must be one of 'symmetric', 'lower', or 'upper'.
Default is 'symmetric'.
exact : bool, optional
If `exact` is True, the result is either an array of type
numpy.uint64 (if n < 35) or an object array of Python long integers.
If `exact` is False, the coefficients in the matrix are computed using
`scipy.special.comb` with `exact=False`. The result will be a floating
point array, and the values in the array will not be the exact
coefficients, but this version is much faster than `exact=True`.
Returns
-------
p : (n, n) ndarray
The Pascal matrix.
See Also
--------
invpascal
Notes
-----
See http://en.wikipedia.org/wiki/Pascal_matrix for more information
about Pascal matrices.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.linalg import pascal
>>> pascal(4)
array([[ 1, 1, 1, 1],
[ 1, 2, 3, 4],
[ 1, 3, 6, 10],
[ 1, 4, 10, 20]], dtype=uint64)
>>> pascal(4, kind='lower')
array([[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 2, 1, 0],
[1, 3, 3, 1]], dtype=uint64)
>>> pascal(50)[-1, -1]
25477612258980856902730428600L
>>> from scipy.special import comb
>>> comb(98, 49, exact=True)
25477612258980856902730428600L
"""
from scipy.special import comb
if kind not in ['symmetric', 'lower', 'upper']:
raise ValueError("kind must be 'symmetric', 'lower', or 'upper'")
if exact:
if n >= 35:
L_n = np.empty((n, n), dtype=object)
L_n.fill(0)
else:
L_n = np.zeros((n, n), dtype=np.uint64)
for i in range(n):
for j in range(i + 1):
L_n[i, j] = comb(i, j, exact=True)
else:
L_n = comb(*np.ogrid[:n, :n])
if kind is 'lower':
p = L_n
elif kind is 'upper':
p = L_n.T
else:
p = np.dot(L_n, L_n.T)
return p
def invpascal(n, kind='symmetric', exact=True):
"""
Returns the inverse of the n x n Pascal matrix.
The Pascal matrix is a matrix containing the binomial coefficients as
its elements.
Parameters
----------
n : int
The size of the matrix to create; that is, the result is an n x n
matrix.
kind : str, optional
Must be one of 'symmetric', 'lower', or 'upper'.
Default is 'symmetric'.
exact : bool, optional
If `exact` is True, the result is either an array of type
`numpy.int64` (if `n` <= 35) or an object array of Python integers.
If `exact` is False, the coefficients in the matrix are computed using
`scipy.special.comb` with `exact=False`. The result will be a floating
point array, and for large `n`, the values in the array will not be the
exact coefficients.
Returns
-------
invp : (n, n) ndarray
The inverse of the Pascal matrix.
See Also
--------
pascal
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] "Pascal matrix", http://en.wikipedia.org/wiki/Pascal_matrix
.. [2] Cohen, A. M., "The inverse of a Pascal matrix", Mathematical
Gazette, 59(408), pp. 111-112, 1975.
Examples
--------
>>> from scipy.linalg import invpascal, pascal
>>> invp = invpascal(5)
>>> invp
array([[ 5, -10, 10, -5, 1],
[-10, 30, -35, 19, -4],
[ 10, -35, 46, -27, 6],
[ -5, 19, -27, 17, -4],
[ 1, -4, 6, -4, 1]])
>>> p = pascal(5)
>>> p.dot(invp)
array([[ 1., 0., 0., 0., 0.],
[ 0., 1., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 1.]])
An example of the use of `kind` and `exact`:
>>> invpascal(5, kind='lower', exact=False)
array([[ 1., -0., 0., -0., 0.],
[-1., 1., -0., 0., -0.],
[ 1., -2., 1., -0., 0.],
[-1., 3., -3., 1., -0.],
[ 1., -4., 6., -4., 1.]])
"""
from scipy.special import comb
if kind not in ['symmetric', 'lower', 'upper']:
raise ValueError("'kind' must be 'symmetric', 'lower' or 'upper'.")
if kind == 'symmetric':
if exact:
if n > 34:
dt = object
else:
dt = np.int64
else:
dt = np.float64
invp = np.empty((n, n), dtype=dt)
for i in range(n):
for j in range(0, i + 1):
v = 0
for k in range(n - i):
v += comb(i + k, k, exact=exact) * comb(i + k, i + k - j,
exact=exact)
invp[i, j] = (-1)**(i - j) * v
if i != j:
invp[j, i] = invp[i, j]
else:
# For the 'lower' and 'upper' cases, we computer the inverse by
# changing the sign of every other diagonal of the pascal matrix.
invp = pascal(n, kind=kind, exact=exact)
if invp.dtype == np.uint64:
# This cast from np.uint64 to int64 OK, because if `kind` is not
# "symmetric", the values in invp are all much less than 2**63.
invp = invp.view(np.int64)
# The toeplitz matrix has alternating bands of 1 and -1.
invp *= toeplitz((-1)**np.arange(n)).astype(invp.dtype)
return invp
def dft(n, scale=None):
"""
Discrete Fourier transform matrix.
Create the matrix that computes the discrete Fourier transform of a
sequence [1]_. The n-th primitive root of unity used to generate the
matrix is exp(-2*pi*i/n), where i = sqrt(-1).
Parameters
----------
n : int
Size the matrix to create.
scale : str, optional
Must be None, 'sqrtn', or 'n'.
If `scale` is 'sqrtn', the matrix is divided by `sqrt(n)`.
If `scale` is 'n', the matrix is divided by `n`.
If `scale` is None (the default), the matrix is not normalized, and the
return value is simply the Vandermonde matrix of the roots of unity.
Returns
-------
m : (n, n) ndarray
The DFT matrix.
Notes
-----
When `scale` is None, multiplying a vector by the matrix returned by
`dft` is mathematically equivalent to (but much less efficient than)
the calculation performed by `scipy.fftpack.fft`.
.. versionadded:: 0.14.0
References
----------
.. [1] "DFT matrix", http://en.wikipedia.org/wiki/DFT_matrix
Examples
--------
>>> np.set_printoptions(precision=5, suppress=True)
>>> x = np.array([1, 2, 3, 0, 3, 2, 1, 0])
>>> m = dft(8)
>>> m.dot(x) # Comute the DFT of x
array([ 12.+0.j, -2.-2.j, 0.-4.j, -2.+2.j, 4.+0.j, -2.-2.j,
-0.+4.j, -2.+2.j])
Verify that ``m.dot(x)`` is the same as ``fft(x)``.
>>> from scipy.fftpack import fft
>>> fft(x) # Same result as m.dot(x)
array([ 12.+0.j, -2.-2.j, 0.-4.j, -2.+2.j, 4.+0.j, -2.-2.j,
0.+4.j, -2.+2.j])
"""
if scale not in [None, 'sqrtn', 'n']:
raise ValueError("scale must be None, 'sqrtn', or 'n'; "
"%r is not valid." % (scale,))
omegas = np.exp(-2j * np.pi * np.arange(n) / n).reshape(-1, 1)
m = omegas ** np.arange(n)
if scale == 'sqrtn':
m /= math.sqrt(n)
elif scale == 'n':
m /= n
return m
|
bsd-3-clause
| 4,770,515,006,507,963,000
| 27.07622
| 79
| 0.508452
| false
| 3.246798
| false
| false
| false
|
rthouvenin/meteography
|
meteography/neighbors.py
|
1
|
2176
|
# -*- coding: utf-8 -*-
"""
Wrapper around sklearn k-neighbors estimators that can work in batches on
pytables arrays (or other disk-backed arrays that support slicing)
"""
import numpy as np
from sklearn.neighbors import NearestNeighbors as SKNN
from meteography.dataset import PIXEL_TYPE
class NearestNeighbors:
BATCH_SIZE = 20 * 1024 * 1024 # 20 Mb
def __init__(self, **kwargs):
self.sknn = SKNN(1, algorithm='brute', **kwargs)
def fit(self, X, y=None):
self.X = X
self.y = y
self.batch_len = max(1, self.BATCH_SIZE // X.shape[1])
self.nb_batch = 0
self.batch = None
if len(X) > 0:
self._reset_nb_batch()
def _reset_nb_batch(self):
old = self.nb_batch
self.nb_batch = len(self.X) // self.batch_len
if len(self.X) % self.batch_len:
self.nb_batch += 1
oldincr = (old > 1)
incr = (self.nb_batch > 1)
if self.batch is None or oldincr != incr:
self.batch = np.empty((self.batch_len+incr, self.X.shape[1]),
dtype=PIXEL_TYPE)
return self.nb_batch
def _get_batch(self, b, extra_row):
start = b * self.batch_len
end = min(start+self.batch_len, len(self.X))
actual_len = end - start
self.batch[:actual_len] = self.X[start:end]
has_extra = 0
if extra_row is not None:
has_extra = 1
self.batch[actual_len] = self.X[extra_row]
if actual_len+has_extra == self.batch.shape[0]:
return self.batch
else:
return self.batch[:actual_len+has_extra]
def predict(self, input_row):
self._reset_nb_batch()
nearest = None
for b in range(self.nb_batch):
batch = self._get_batch(b, nearest)
self.sknn.fit(batch)
i_batch = self.sknn.kneighbors([input_row], return_distance=False)
i_batch = i_batch[0][0]
if i_batch != (batch.shape[0]-1) or b == 0:
nearest = b * self.batch_len + i_batch
if self.y is None:
return nearest
return self.y[nearest]
|
mit
| 2,629,343,150,644,379,000
| 30.536232
| 78
| 0.554688
| false
| 3.410658
| false
| false
| false
|
nickretallack/babel
|
babel/messages/pofile.py
|
1
|
17024
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Reading and writing of files in the ``gettext`` PO (portable object)
format.
:see: `The Format of PO Files
<http://www.gnu.org/software/gettext/manual/gettext.html#PO-Files>`_
"""
import os
import re
from babel.messages.catalog import Catalog, Message
from babel.util import wraptext
from babel._compat import text_type
__all__ = ['read_po', 'write_po']
def unescape(string):
r"""Reverse `escape` the given string.
>>> print unescape('"Say:\\n \\"hello, world!\\"\\n"')
Say:
"hello, world!"
<BLANKLINE>
:param string: the string to unescape
:return: the unescaped string
"""
def replace_escapes(match):
m = match.group(1)
if m == 'n':
return '\n'
elif m == 't':
return '\t'
elif m == 'r':
return '\r'
# m is \ or "
return m
return re.compile(r'\\([\\trn"])').sub(replace_escapes, string[1:-1])
def denormalize(string):
r"""Reverse the normalization done by the `normalize` function.
>>> print denormalize(r'''""
... "Say:\n"
... " \"hello, world!\"\n"''')
Say:
"hello, world!"
<BLANKLINE>
>>> print denormalize(r'''""
... "Say:\n"
... " \"Lorem ipsum dolor sit "
... "amet, consectetur adipisicing"
... " elit, \"\n"''')
Say:
"Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
<BLANKLINE>
:param string: the string to denormalize
:return: the denormalized string
:rtype: `unicode` or `str`
"""
if '\n' in string:
escaped_lines = string.splitlines()
if string.startswith('""'):
escaped_lines = escaped_lines[1:]
lines = map(unescape, escaped_lines)
return ''.join(lines)
else:
return unescape(string)
def read_po(fileobj, locale=None, domain=None, ignore_obsolete=False, charset=None):
"""Read messages from a ``gettext`` PO (portable object) file from the given
file-like object and return a `Catalog`.
>>> from datetime import datetime
>>> from StringIO import StringIO
>>> buf = StringIO('''
... #: main.py:1
... #, fuzzy, python-format
... msgid "foo %(name)s"
... msgstr "quux %(name)s"
...
... # A user comment
... #. An auto comment
... #: main.py:3
... msgid "bar"
... msgid_plural "baz"
... msgstr[0] "bar"
... msgstr[1] "baaz"
... ''')
>>> catalog = read_po(buf)
>>> catalog.revision_date = datetime(2007, 04, 01)
>>> for message in catalog:
... if message.id:
... print (message.id, message.string)
... print ' ', (message.locations, message.flags)
... print ' ', (message.user_comments, message.auto_comments)
(u'foo %(name)s', u'quux %(name)s')
([(u'main.py', 1)], set([u'fuzzy', u'python-format']))
([], [])
((u'bar', u'baz'), (u'bar', u'baaz'))
([(u'main.py', 3)], set([]))
([u'A user comment'], [u'An auto comment'])
.. versionadded:: 1.0
Added support for explicit charset argument.
:param fileobj: the file-like object to read the PO file from
:param locale: the locale identifier or `Locale` object, or `None`
if the catalog is not bound to a locale (which basically
means it's a template)
:param domain: the message domain
:param ignore_obsolete: whether to ignore obsolete messages in the input
:param charset: the character set of the catalog.
:return: a catalog object representing the parsed PO file
:rtype: `Catalog`
"""
catalog = Catalog(locale=locale, domain=domain, charset=charset)
counter = [0]
offset = [0]
messages = []
translations = []
locations = []
flags = []
user_comments = []
auto_comments = []
obsolete = [False]
context = []
in_msgid = [False]
in_msgstr = [False]
in_msgctxt = [False]
def _add_message():
translations.sort()
if len(messages) > 1:
msgid = tuple([denormalize(m) for m in messages])
else:
msgid = denormalize(messages[0])
if isinstance(msgid, (list, tuple)):
string = []
for idx in range(catalog.num_plurals):
try:
string.append(translations[idx])
except IndexError:
string.append((idx, ''))
string = tuple([denormalize(t[1]) for t in string])
else:
string = denormalize(translations[0][1])
if context:
msgctxt = denormalize('\n'.join(context))
else:
msgctxt = None
message = Message(msgid, string, list(locations), set(flags),
auto_comments, user_comments, lineno=offset[0] + 1,
context=msgctxt)
if obsolete[0]:
if not ignore_obsolete:
catalog.obsolete[msgid] = message
else:
catalog[msgid] = message
del messages[:]; del translations[:]; del context[:]; del locations[:];
del flags[:]; del auto_comments[:]; del user_comments[:];
obsolete[0] = False
counter[0] += 1
def _process_message_line(lineno, line):
if line.startswith('msgid_plural'):
in_msgid[0] = True
msg = line[12:].lstrip()
messages.append(msg)
elif line.startswith('msgid'):
in_msgid[0] = True
offset[0] = lineno
txt = line[5:].lstrip()
if messages:
_add_message()
messages.append(txt)
elif line.startswith('msgstr'):
in_msgid[0] = False
in_msgstr[0] = True
msg = line[6:].lstrip()
if msg.startswith('['):
idx, msg = msg[1:].split(']', 1)
translations.append([int(idx), msg.lstrip()])
else:
translations.append([0, msg])
elif line.startswith('msgctxt'):
if messages:
_add_message()
in_msgid[0] = in_msgstr[0] = False
context.append(line[7:].lstrip())
elif line.startswith('"'):
if in_msgid[0]:
messages[-1] += u'\n' + line.rstrip()
elif in_msgstr[0]:
translations[-1][1] += u'\n' + line.rstrip()
elif in_msgctxt[0]:
context.append(line.rstrip())
for lineno, line in enumerate(fileobj.readlines()):
line = line.strip()
if not isinstance(line, text_type):
line = line.decode(catalog.charset)
if line.startswith('#'):
in_msgid[0] = in_msgstr[0] = False
if messages and translations:
_add_message()
if line[1:].startswith(':'):
for location in line[2:].lstrip().split():
pos = location.rfind(':')
if pos >= 0:
try:
lineno = int(location[pos + 1:])
except ValueError:
continue
locations.append((location[:pos], lineno))
elif line[1:].startswith(','):
for flag in line[2:].lstrip().split(','):
flags.append(flag.strip())
elif line[1:].startswith('~'):
obsolete[0] = True
_process_message_line(lineno, line[2:].lstrip())
elif line[1:].startswith('.'):
# These are called auto-comments
comment = line[2:].strip()
if comment: # Just check that we're not adding empty comments
auto_comments.append(comment)
else:
# These are called user comments
user_comments.append(line[1:].strip())
else:
_process_message_line(lineno, line)
if messages:
_add_message()
# No actual messages found, but there was some info in comments, from which
# we'll construct an empty header message
elif not counter[0] and (flags or user_comments or auto_comments):
messages.append(u'')
translations.append([0, u''])
_add_message()
return catalog
WORD_SEP = re.compile('('
r'\s+|' # any whitespace
r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w)' # em-dash
')')
def escape(string):
r"""Escape the given string so that it can be included in double-quoted
strings in ``PO`` files.
>>> escape('''Say:
... "hello, world!"
... ''')
'"Say:\\n \\"hello, world!\\"\\n"'
:param string: the string to escape
:return: the escaped string
"""
return '"%s"' % string.replace('\\', '\\\\') \
.replace('\t', '\\t') \
.replace('\r', '\\r') \
.replace('\n', '\\n') \
.replace('\"', '\\"')
def normalize(string, prefix='', width=76):
r"""Convert a string into a format that is appropriate for .po files.
>>> print normalize('''Say:
... "hello, world!"
... ''', width=None)
""
"Say:\n"
" \"hello, world!\"\n"
>>> print normalize('''Say:
... "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
... ''', width=32)
""
"Say:\n"
" \"Lorem ipsum dolor sit "
"amet, consectetur adipisicing"
" elit, \"\n"
:param string: the string to normalize
:param prefix: a string that should be prepended to every line
:param width: the maximum line width; use `None`, 0, or a negative number
to completely disable line wrapping
:return: the normalized string
"""
if width and width > 0:
prefixlen = len(prefix)
lines = []
for line in string.splitlines(True):
if len(escape(line)) + prefixlen > width:
chunks = WORD_SEP.split(line)
chunks.reverse()
while chunks:
buf = []
size = 2
while chunks:
l = len(escape(chunks[-1])) - 2 + prefixlen
if size + l < width:
buf.append(chunks.pop())
size += l
else:
if not buf:
# handle long chunks by putting them on a
# separate line
buf.append(chunks.pop())
break
lines.append(u''.join(buf))
else:
lines.append(line)
else:
lines = string.splitlines(True)
if len(lines) <= 1:
return escape(string)
# Remove empty trailing line
if lines and not lines[-1]:
del lines[-1]
lines[-1] += '\n'
return u'""\n' + u'\n'.join([(prefix + escape(l)) for l in lines])
def write_po(fileobj, catalog, width=76, no_location=False, omit_header=False,
sort_output=False, sort_by_file=False, ignore_obsolete=False,
include_previous=False):
r"""Write a ``gettext`` PO (portable object) template file for a given
message catalog to the provided file-like object.
>>> catalog = Catalog()
>>> catalog.add(u'foo %(name)s', locations=[('main.py', 1)],
... flags=('fuzzy',))
<Message...>
>>> catalog.add((u'bar', u'baz'), locations=[('main.py', 3)])
<Message...>
>>> from io import BytesIO
>>> buf = BytesIO()
>>> write_po(buf, catalog, omit_header=True)
>>> print buf.getvalue()
#: main.py:1
#, fuzzy, python-format
msgid "foo %(name)s"
msgstr ""
<BLANKLINE>
#: main.py:3
msgid "bar"
msgid_plural "baz"
msgstr[0] ""
msgstr[1] ""
<BLANKLINE>
<BLANKLINE>
:param fileobj: the file-like object to write to
:param catalog: the `Catalog` instance
:param width: the maximum line width for the generated output; use `None`,
0, or a negative number to completely disable line wrapping
:param no_location: do not emit a location comment for every message
:param omit_header: do not include the ``msgid ""`` entry at the top of the
output
:param sort_output: whether to sort the messages in the output by msgid
:param sort_by_file: whether to sort the messages in the output by their
locations
:param ignore_obsolete: whether to ignore obsolete messages and not include
them in the output; by default they are included as
comments
:param include_previous: include the old msgid as a comment when
updating the catalog
"""
def _normalize(key, prefix=''):
return normalize(key, prefix=prefix, width=width)
def _write(text):
if isinstance(text, text_type):
text = text.encode(catalog.charset, 'backslashreplace')
fileobj.write(text)
def _write_comment(comment, prefix=''):
# xgettext always wraps comments even if --no-wrap is passed;
# provide the same behaviour
if width and width > 0:
_width = width
else:
_width = 76
for line in wraptext(comment, _width):
_write('#%s %s\n' % (prefix, line.strip()))
def _write_message(message, prefix=''):
if isinstance(message.id, (list, tuple)):
if message.context:
_write('%smsgctxt %s\n' % (prefix,
_normalize(message.context, prefix)))
_write('%smsgid %s\n' % (prefix, _normalize(message.id[0], prefix)))
_write('%smsgid_plural %s\n' % (
prefix, _normalize(message.id[1], prefix)
))
for idx in range(catalog.num_plurals):
try:
string = message.string[idx]
except IndexError:
string = ''
_write('%smsgstr[%d] %s\n' % (
prefix, idx, _normalize(string, prefix)
))
else:
if message.context:
_write('%smsgctxt %s\n' % (prefix,
_normalize(message.context, prefix)))
_write('%smsgid %s\n' % (prefix, _normalize(message.id, prefix)))
_write('%smsgstr %s\n' % (
prefix, _normalize(message.string or '', prefix)
))
messages = list(catalog)
if sort_output:
messages.sort()
elif sort_by_file:
messages.sort(lambda x,y: cmp(x.locations, y.locations))
for message in messages:
if not message.id: # This is the header "message"
if omit_header:
continue
comment_header = catalog.header_comment
if width and width > 0:
lines = []
for line in comment_header.splitlines():
lines += wraptext(line, width=width,
subsequent_indent='# ')
comment_header = u'\n'.join(lines)
_write(comment_header + u'\n')
for comment in message.user_comments:
_write_comment(comment)
for comment in message.auto_comments:
_write_comment(comment, prefix='.')
if not no_location:
locs = u' '.join([u'%s:%d' % (filename.replace(os.sep, '/'), lineno)
for filename, lineno in message.locations])
_write_comment(locs, prefix=':')
if message.flags:
_write('#%s\n' % ', '.join([''] + sorted(message.flags)))
if message.previous_id and include_previous:
_write_comment('msgid %s' % _normalize(message.previous_id[0]),
prefix='|')
if len(message.previous_id) > 1:
_write_comment('msgid_plural %s' % _normalize(
message.previous_id[1]
), prefix='|')
_write_message(message)
_write('\n')
if not ignore_obsolete:
for message in catalog.obsolete.values():
for comment in message.user_comments:
_write_comment(comment)
_write_message(message, prefix='#~ ')
_write('\n')
|
bsd-3-clause
| 6,471,569,443,023,267,000
| 34.101031
| 84
| 0.520266
| false
| 4.112077
| false
| false
| false
|
IDNoise/NoiseIDE
|
NoiseIDEPython/idn_snippet_completer.py
|
1
|
1650
|
import os
from idn_completer import Completer
import core
import yaml
class SnippetCompleter(Completer):
def __init__(self, stc):
Completer.__init__(self, stc)
self.snippets = []
for path in [os.path.join(core.MainFrame.cwd, "data", "erlang", "ide_snippets.yaml"),
os.path.join(core.MainFrame.cwd, "data", "erlang", "user_snippets.yaml"),
os.path.join(core.Project.projectDir, "snippets.yaml")]:
if os.path.exists(path):
stream = file(path, 'r')
data = yaml.load(stream)
if data:
self.snippets += data
def OnUpdate(self, text, nextChar = None):
self.list.Clear()
core.Log(text)
i = len(text) - 1
while i >= 0 and text[i].isalpha():
self.prefix += text[i]
i -= 1
self.prefix = self.prefix[::-1]
core.Log(self.prefix)
for snippet in self.snippets:
if self.prefix == "" or snippet['id'].startswith(self.prefix):
self.list.Append(snippet['id'], snippet['desc'] + "<br/><br/>" + snippet['snippet'])
def AutoComplete(self, text):
snippet = ""
for m in self.snippets:
if m['id'] == text:
snippet = m['snippet']
if not snippet: return
startPos = self.stc.GetCurrentPos() - len(self.prefix)
self.stc.SetSelectionStart(startPos)
self.stc.SetSelectionEnd(self.stc.GetCurrentPos())
self.stc.ReplaceSelection(snippet)
self.HideCompleter()
self.stc.StartSnippetEditing(startPos, snippet)
|
gpl-2.0
| 3,293,013,052,869,466,000
| 32
| 100
| 0.555152
| false
| 3.810624
| false
| false
| false
|
activityhistory/TracesVisualizer
|
dayview/scripts/extract.py
|
1
|
8057
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# TESTING FILE made.by.a.fox. 12.2.15
# Updated by acrule 01.21.16
#FEATURE LIST
# Y connect to db
# Y write to file
# Y Write JSON format
# Accept input date parameter
#KNOWN ISSUES
# 2. no formatting or conversion of datetime stamps
import re
import os
import sys
import json
import sqlite3 as lite
import collections
import time
import datetime
db_file = os.path.expanduser('~/.traces/traces.sqlite') #looks for db under ~/.traces
con = lite.connect(db_file)
with con:
data = [] #master data container
apps = [] #list of apps
windows = [] # list of windows
urls = []
appevents = [] #list of application events
windowevents = [] #list of window events
urlevents = []
exps = [] #list of experiences
images = [] #list of screenshots
words = [] #list of keywords
cur = con.cursor()
#SQL query strings
appsSQL = "SELECT * FROM app"
windowsSQL = "SELECT * FROM window"
urlSQL = "SELECT * FROM url"
activeappSQL = "SELECT a.id, a.app_id, a.event, a.time as startt, min(b.time) AS endt FROM appevent a, appevent b WHERE a.app_id = b.app_id AND a.event = 'Active' AND b.event in ('Inactive', 'Close') AND a.time < b.time AND a.time IS NOT NULL AND b.time IS NOT NULL GROUP BY startt"
activewindowSQL = "SELECT a.id, a.window_id, a.event, a.time as startt, min(b.time) AS endt FROM windowevent a, windowevent b WHERE a.window_id = b.window_id AND a.event = 'Active' AND b.event in ('Inactive', 'Close') AND a.time < b.time AND a.time IS NOT NULL AND b.time IS NOT NULL GROUP BY startt"
activeurlSQL = "SELECT a.id, a.url_id, a.app_id, a.window_id, a.event, a.time as startt, min(b.time) AS endt FROM urlevent a, urlevent b WHERE a.url_id = b.url_id AND a.window_id = b.window_id AND a.app_id = b.app_id AND a.event = 'Active' AND b.event in ('Inactive', 'Close') AND a.time < b.time AND a.time IS NOT NULL AND b.time IS NOT NULL GROUP BY startt"
experienceSQL = "SELECT * FROM experience"
wordsSQL = "SELECT * FROM keys"
#GET list of applications
cur.execute(appsSQL)
rows = cur.fetchall()
for row in rows:
a = collections.OrderedDict()
a['id'] = row[0]
a['time'] = row[1]
a['name'] = row[2]
apps.append(a)
#GET list of windows
cur.execute(windowsSQL)
rows = cur.fetchall()
for row in rows:
w = collections.OrderedDict()
w['id'] = row[0]
w['time'] = row[1]
w['name'] = row[2]
w['app'] = row[3]
windows.append(w)
#GET list of urls
cur.execute(urlSQL)
rows = cur.fetchall()
for row in rows:
u = collections.OrderedDict()
u['id'] = row[0]
u['time'] = row[1]
u['title'] = row[2]
u['url'] = row[3]
u['host'] = row[4]
urls.append(u)
#GET list intervals for primary application
cur.execute(activeappSQL)
rows = cur.fetchall()
for row in rows:
a = collections.OrderedDict()
a['id'] = row[0]
a['appid'] = row[1]
a['event'] = row[2]
a['start'] = row[3]
a['end'] = row[4]
appevents.append(a)
#GET list intervals for primary window
cur.execute(activewindowSQL)
rows = cur.fetchall()
for row in rows:
w = collections.OrderedDict()
w['id'] = row[0]
w['windowid'] = row[1]
w['appid'] = (item for item in windows if item["id"] == row[1]).next()['app']
w['event'] = row[2]
w['start'] = row[3]
w['end'] = row[4]
windowevents.append(w)
#GET list intervals for urls
cur.execute(activeurlSQL)
rows = cur.fetchall()
for row in rows:
u = collections.OrderedDict()
u['id'] = row[0]
u['urlid'] = row[1]
u['appid'] = row[2]
u['windowid'] = row[3]
u['event'] = row[4]
u['start'] = row[5]
u['end'] = row[6]
urlevents.append(u)
#GET list of experiences
cur.execute(experienceSQL)
rows = cur.fetchall()
for row in rows:
a = collections.OrderedDict()
a['id'] = row[0]
a['text'] = row[2]
exps.append(a)
#GET list of screenshots
image_dir = os.path.expanduser('~/.traces/screenshots') #looks for db under ~/.traces
for y in os.listdir(image_dir):
y_dir = os.path.join(image_dir,y)
if not os.path.isdir(y_dir):
continue
for m in os.listdir(y_dir):
m_dir = os.path.join(y_dir, m)
if not os.path.isdir(m_dir):
continue
for d in os.listdir(m_dir):
d_dir = os.path.join(m_dir, d)
if not os.path.isdir(d_dir):
continue
for h in os.listdir(d_dir):
h_dir = os.path.join(d_dir, h)
if not os.path.isdir(h_dir):
continue
h_images = os.listdir(h_dir)
for image in h_images:
#make sure the file is an image
if image[-4:] == '.jpg':
i = collections.OrderedDict()
image_time = datetime.datetime.strptime(image[0:19], '%y%m%d-%H%M%S%f')
i['time'] = (image_time - datetime.datetime(1970,1,1)).total_seconds() + time.timezone #add timezone offset
i['image'] = os.path.join("screenshots", y, m, d, h, image)
images.append(i)
#GET keywords
cmd_rows = []
newWord = ['Enter','Left','Right','Up','Down','Tab','Escape', ' ']
starttime = 0.0
app = 0
window = 0
s = ''
cur.execute(wordsSQL)
rows = cur.fetchall()
for row in rows:
if 'Cmd' in row[3]:
cmd_rows.append(row)
else:
text = str(row[2])
# if its a char indicating a new word, save our text token
if text in newWord:
# save our data
if len(s) > 0:
k = collections.OrderedDict()
k['time'] = starttime #datetime.datetime.fromtimestamp(starttime).strftime("%H:%M %m/%d/%y")
k['text'] = s #just pass the whole string for now
k['app'] = app
k['window'] = window
words.append(k)
#reset tracking time
starttime = float(row[1])
s = ''
# if its a regular char on the same window, just keep building the string
elif int(row[5]) == window: # and float(row[1]) - time <= 300.0:
if text == 'Backspace':
s = s[:-1]
else:
s += row[2]
#else its a regular char but we switched windows, save the data
else:
if len(s) > 0:
k = collections.OrderedDict()
k['time'] = starttime #datetime.datetime.fromtimestamp(starttime).strftime("%H:%M %m/%d/%y")
k['text'] = s #just pass teh whole string for now
k['app'] = app
k['window'] = window
words.append(k)
#reset tracking variables
window = int(row[5])
app = int(row[4])
starttime = float(row[1])
#write the character to start the next word
if text in newWord or text == 'Backspace':
s = ''
else:
s = row[2]
#ASSEMBLE apps and experince into json
d = collections.OrderedDict()
d['apps']=apps
d['window']=windows
d['url']=urls
d['appevents']=appevents
d['windowevents']=windowevents
d['urlevents']=urlevents
d['exps']=exps
d['images']=images
d['words']=words
data = d
#WRITE file
file = 'extract.json'
z = open(file,'w')
z.writelines(json.dumps(data))
|
gpl-2.0
| -7,282,651,105,888,489,000
| 32.995781
| 363
| 0.52563
| false
| 3.550903
| false
| false
| false
|
solanolabs/rply
|
rply/parser.py
|
1
|
2619
|
from rply.errors import ParsingError
class LRParser(object):
def __init__(self, lr_table, error_handler):
self.lr_table = lr_table
self.error_handler = error_handler
def parse(self, tokenizer, state=None):
from rply.token import Token
lookahead = None
lookaheadstack = []
statestack = [0]
symstack = [Token("$end", None)]
current_state = 0
while True:
if lookahead is None:
if lookaheadstack:
lookahead = lookaheadstack.pop()
else:
lookahead = tokenizer.next()
if lookahead is None:
lookahead = Token("$end", None)
ltype = lookahead.gettokentype()
if ltype in self.lr_table.lr_action[current_state]:
t = self.lr_table.lr_action[current_state][ltype]
if t > 0:
statestack.append(t)
current_state = t
symstack.append(lookahead)
lookahead = None
continue
elif t < 0:
# reduce a symbol on the stack and emit a production
p = self.lr_table.grammar.productions[-t]
pname = p.name
plen = p.getlength()
start = len(symstack) + (-plen - 1)
assert start >= 0
targ = symstack[start:]
del targ[0]
start = len(symstack) + (-plen)
assert start >= 0
del symstack[start:]
del statestack[start:]
if state is None:
value = p.func(targ)
else:
value = p.func(state, targ)
symstack.append(value)
current_state = self.lr_table.lr_goto[statestack[-1]][pname]
statestack.append(current_state)
continue
else:
n = symstack[-1]
return n
else:
# TODO: actual error handling here
if self.error_handler is not None:
if state is None:
self.error_handler(lookahead)
else:
self.error_handler(state, lookahead)
raise AssertionError("For now, error_handler must raise.")
else:
raise ParsingError(lookahead.getsourcepos())
|
bsd-3-clause
| 457,821,966,247,470,600
| 35.887324
| 80
| 0.450554
| false
| 5.196429
| false
| false
| false
|
cortesi/pry
|
libpry/explain.py
|
1
|
3089
|
"""
A module for printing "nice" messages from assertion statements.
"""
import tokenize, parser
class _Wrap:
def __init__(self, *lines):
self.lines = list(lines)
def __call__(self):
if not self.lines:
raise StopIteration
else:
return self.lines.pop(0)
class Expression:
def __init__(self, s):
self.s = s.strip()
def show(self, glob, loc):
try:
return repr(eval(self.s, glob, loc))
except SyntaxError, v:
return "<could not be evaluated>"
def __eq__(self, other):
return self.s == other.s
class Explain:
_specialOps = set(["==", "!=", "<", ">", ])
_specialNames = set(["not", "and", "or"])
def __init__(self, expr=None, glob=None, loc=None):
self.expr, self.glob, self.loc = expr, glob, loc
if self.expr:
self.parsed, self.expr = self.parseExpression(self.expr)
def parseExpression(self, expr):
"""
Parses an expression into components. It understands the following
delimiters: ==, !=, >, <, not, and, or
In each of these cases, the variables "x" and "y" will be evaluated.
Discards the second (message) clause of an assertion expression.
Returns None if the expression could not be interpreted.
"""
nest = 0
rem = expr
# A list of (str, start, end) tuples.
delimiters = []
try:
for i in list(tokenize.generate_tokens(_Wrap(expr))):
name, txt = tokenize.tok_name[i[0]], i[1]
start, end = i[2][1], i[3][1]
if name == "OP" and (txt == "(" or txt == "["):
nest += 1
elif name == "OP" and (txt == ")" or txt == "]"):
nest -= 1
elif nest == 0:
if name == "OP" and txt in self._specialOps:
delimiters.append((txt, start, end))
elif name == "NAME" and txt in self._specialNames:
delimiters.append((txt, start, end))
elif name == "OP" and txt == ",":
rem = expr[:start]
break
except tokenize.TokenError:
return None, None
if delimiters:
ret = []
cur = 0
for s, start, end in delimiters:
if start > cur:
ret.append(Expression(rem[cur:start]))
ret.append(s)
cur = end
ret.append(Expression(rem[end:]))
return ret, rem
else:
return [Expression(rem)], rem
def __str__(self):
l = []
l.append(" :: Re-evaluating expression:\n")
l.append(" :: %s\n"%self.expr)
l.append(" ::")
for i in self.parsed:
if isinstance(i, Expression):
l.append(i.show(self.glob, self.loc))
else:
l.append(i)
return " ".join(l)
|
mit
| 6,707,783,616,712,852,000
| 31.861702
| 80
| 0.471997
| false
| 4.179973
| false
| false
| false
|
ThomasMarcel/selection-naturelle
|
user/models.py
|
1
|
1507
|
import json
import logging
from google.appengine.ext import ndb
from lib import tools
default_permissions = {'reader': 0, 'administrator': 0}
class User(ndb.Model):
username = ndb.StringProperty()
email = ndb.StringProperty()
password=ndb.StringProperty()
first_name = ndb.StringProperty()
last_name = ndb.StringProperty()
permissions = ndb.JsonProperty(default=json.dumps(default_permissions))
active = ndb.BooleanProperty(default=False)
notes = ndb.TextProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
modified = ndb.DateTimeProperty(auto_now=True)
@classmethod
def get_by_username(cls, username):
return cls.query(cls.username == username).get()
@classmethod
def get_by_email(cls, email):
return cls.query(cls.email == email).get()
@classmethod
def reset_permissions(cls):
cls.permissions = json.dumps(default_permissions)
@classmethod
def get_by_urlkey(cls, userkey):
return cls.query(User.key == ndb.Key(urlsafe = userkey)).get()
def to_dict(cls):
return {
'key': cls.key,
'username': cls.username,
'email': cls.email,
'password': cls.password,
'first_name': cls.first_name,
'last_name': cls.last_name,
'permissions': cls.permissions,
'active': cls.active,
'notes': cls.notes,
'created': cls.created,
'modified': cls.modified
}
|
apache-2.0
| -5,442,921,198,355,938,000
| 28
| 75
| 0.624419
| false
| 3.924479
| false
| false
| false
|
viz4biz/PyDataNYC2015
|
enaml/mpl_canvas.py
|
1
|
2532
|
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Typed, ForwardTyped, Bool, observe, set_default, Value, List, Enum
from enaml.core.declarative import d_
from .control import Control, ProxyControl
#: Delay the import of matplotlib until needed. This removes the hard
#: dependecy on matplotlib for the rest of the Enaml code base.
def Figure():
from matplotlib.figure import Figure
return Figure
class ProxyMPLCanvas(ProxyControl):
""" The abstract definition of a proxy MPLCanvas object.
"""
#: A reference to the MPLCanvas declaration.
declaration = ForwardTyped(lambda: MPLCanvas)
def set_figure(self, figure):
raise NotImplementedError
def set_toolbar_visible(self, visible):
raise NotImplementedError
def set_toolbar_location(self, location):
raise NotImplementedError
def set_event_actions(self, actions):
raise NotImplementedError
def draw(self):
raise NotImplementedError
class MPLCanvas(Control):
""" A control which can be used to embded a matplotlib figure.
"""
#: The matplotlib figure to display in the widget.
figure = d_(ForwardTyped(Figure))
#: Whether or not the matplotlib figure toolbar is visible.
toolbar_visible = d_(Bool(False))
toolbar_location = d_(Enum('top', 'bottom'))
event_actions = d_(List(Value()))
#: Matplotlib figures expand freely in height and width by default.
hug_width = set_default('ignore')
hug_height = set_default('ignore')
#: A reference to the ProxyMPLCanvas object.
proxy = Typed(ProxyMPLCanvas)
def draw(self):
""" Request draw on the Figure """
if self.proxy_is_active:
self.proxy.draw()
#--------------------------------------------------------------------------
# Observers
#--------------------------------------------------------------------------
@observe('figure', 'toolbar_visible', 'toolbar_location', 'event_actions')
def _update_proxy(self, change):
""" An observer which sends state change to the proxy.
"""
# The superclass handler implementation is sufficient.
super(MPLCanvas, self)._update_proxy(change)
|
apache-2.0
| -4,969,528,589,405,369,000
| 31.050633
| 87
| 0.600711
| false
| 4.813688
| false
| false
| false
|
LiGhT1EsS/cobra
|
cobra/scheduler/report.py
|
1
|
4364
|
# -*- coding: utf-8 -*-
"""
scheduler.report
~~~~~~~~~~~~~~~~
Implements automation report Cobra data
:author: Feei <feei@feei.cn>
:homepage: https://github.com/wufeifei/cobra
:license: MIT, see LICENSE for more details.
:copyright: Copyright (c) 2017 Feei. All rights reserved
"""
import os
import subprocess
import base64
import datetime
from cobra.utils.log import logging
from cobra.utils.config import Config
import smtplib
from smtplib import SMTPException
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
logging = logging.getLogger(__name__)
phantomjs = '/usr/local/bin/phantomjs'
time_types = ['w', 'm', 'q']
time_type_des = {
'w': '周',
'm': '月',
'q': '季'
}
class Report(object):
def __init__(self, time_type, month=None):
if time_type not in time_types:
logging.critical('Time type exception')
return
self.time_type_de = time_type_des[time_type]
# mail
mark = ''
if month is None:
c_month = int(datetime.datetime.today().strftime("%m"))
else:
c_month = int(month)
if time_type == 'w':
c_week = int(datetime.datetime.today().strftime("%U"))
mark = 'W{week}'.format(week=c_week)
elif time_type == 'm':
mark = 'M{month}'.format(month=c_month)
elif time_type == 'q':
c_quarter = 0
if c_month in [1, 2, 3]:
c_quarter = 1
elif c_month in [4, 5, 6]:
c_quarter = 2
elif c_month in [7, 8, 9]:
c_quarter = 3
elif c_month in [10, 11, 12]:
c_quarter = 4
mark = 'Q{quarter}'.format(quarter=c_quarter)
self.subject = '[Cobra] 代码安全{0}报({mark})'.format(self.time_type_de, mark=mark)
self.user = Config('email', 'user').value
self.name = Config('email', 'name').value
self.to = Config('report', 'to').value
self.host = Config('email', 'host').value
self.port = Config('email', 'port').value
self.password = Config('email', 'password').value
self.param = [phantomjs, os.path.join(Config().project_directory, 'scheduler', 'report.js'), Config().project_directory, time_type]
if month is not None:
self.param.append(month)
def run(self):
capture = self.capture()
if capture is False:
logging.critical('Capture failed')
return False
# send notification
if self.notification(capture):
return True
else:
logging.critical('Notification failed')
return False
def capture(self):
"""
Use PhantomJS to capture report page
:return: boolean
"""
capture = None
p = subprocess.Popen(self.param, stdout=subprocess.PIPE)
result, err = p.communicate()
if 'Critical' in result:
logging.critical('Capture exception')
return False
lines = result.split('\n')
for l in lines:
if 'reports' in l:
capture = l.split(':')[1].strip()
if capture is None:
logging.critical('get capture image file failed')
return False
else:
return os.path.join(Config().project_directory, capture)
def notification(self, capture_path):
"""
Email notification
:param capture_path:
:return: boolean
"""
msg = MIMEMultipart()
msg['Subject'] = self.subject
msg['From'] = '{0}<{1}>'.format(self.name, self.user)
msg['To'] = self.to
with open(capture_path, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
text = MIMEText('<img src="data:image/png;base64,{0}">'.format(encoded_string), 'html')
msg.attach(text)
try:
s = smtplib.SMTP(self.host, self.port)
s.ehlo()
s.starttls()
s.ehlo()
s.login(self.user, self.password)
s.sendmail(self.user, self.to, msg.as_string())
s.quit()
return True
except SMTPException:
logging.critical('Send mail failed')
return False
|
mit
| 8,891,019,861,325,576,000
| 29.405594
| 139
| 0.548298
| false
| 3.844385
| true
| false
| false
|
AlexStarov/Shop
|
applications/delivery2/migrations/0002_auto_20161124_2123.py
|
1
|
4727
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import applications.delivery2.models
class Migration(migrations.Migration):
dependencies = [
('delivery2', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='EmailImageTemplate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.CharField(max_length=256, verbose_name='\u041f\u0443\u0442\u044c')),
('image', models.ImageField(upload_to=applications.delivery2.models.upload_to, null=True, verbose_name='\u0418\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0435', blank=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430 \u0441\u043e\u0437\u0434\u0430\u043d\u0438\u044f', null=True)),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='\u0414\u0430\u0442\u0430 \u043e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u044f', null=True)),
],
options={
'ordering': ['-created_at'],
'db_table': 'Delivery2_EmailImageTemplate',
'verbose_name': '\u0418\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0435 \u0432 \u043f\u0438\u0441\u044c\u043c\u0435',
'verbose_name_plural': '\u0418\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u044f \u0432 \u043f\u0438\u0441\u044c\u043c\u0435',
},
),
migrations.CreateModel(
name='EmailSubject',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('subject', models.CharField(default='\u0422\u0435\u043c\u0430', max_length=256, verbose_name='\u0422\u0435\u043c\u0430 \u043f\u0438\u0441\u044c\u043c\u0430')),
('chance', models.DecimalField(default=1, verbose_name='\u0412\u0435\u0440\u043e\u044f\u0442\u043d\u043e\u0441\u0442\u044c', max_digits=4, decimal_places=2)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430 \u0441\u043e\u0437\u0434\u0430\u043d\u0438\u044f', null=True)),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='\u0414\u0430\u0442\u0430 \u043e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u044f', null=True)),
],
options={
'ordering': ['-created_at'],
'db_table': 'Delivery2_EmailSubject',
'verbose_name': '\u0422\u0435\u043c\u0430',
'verbose_name_plural': '\u0422\u0435\u043c\u044b',
},
),
migrations.RemoveField(
model_name='subject',
name='delivery',
),
migrations.RemoveField(
model_name='delivery',
name='template',
),
migrations.AddField(
model_name='emailtemplate',
name='name',
field=models.CharField(null=True, default=b'<built-in method now of type object at 0x83c4c20>', max_length=64, blank=True, unique=True, verbose_name='\u041d\u0430\u0437\u0432\u0430\u043d\u0438\u0435'),
),
migrations.AlterField(
model_name='delivery',
name='task_id',
field=models.CharField(max_length=255, null=True, verbose_name='task id', blank=True),
),
migrations.AlterField(
model_name='emailtemplate',
name='template',
field=models.FileField(upload_to=applications.delivery2.models.upload_to, null=True, verbose_name='\u0428\u0430\u0431\u043b\u043e\u043d', blank=True),
),
migrations.AlterField(
model_name='message',
name='subject',
field=models.ForeignKey(verbose_name='\u0423\u043a\u0430\u0437\u0430\u0442\u0435\u043b\u044c \u043d\u0430 subject', blank=True, to='delivery2.EmailSubject', null=True),
),
migrations.AlterModelTable(
name='emailtemplate',
table='Delivery2_EmailTemplate',
),
migrations.DeleteModel(
name='Subject',
),
migrations.AddField(
model_name='emailsubject',
name='delivery',
field=models.ForeignKey(to='delivery2.Delivery'),
),
migrations.AddField(
model_name='emailimagetemplate',
name='template',
field=models.ForeignKey(related_name='images', verbose_name='\u0428\u0430\u0431\u043b\u043e\u043d', to='delivery2.EmailTemplate'),
),
]
|
apache-2.0
| -7,083,899,874,331,063,000
| 50.380435
| 213
| 0.609477
| false
| 3.357244
| false
| false
| false
|
mammix2/ccoin-dev
|
contrib/pyminer/pyminer.py
|
1
|
6435
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 10464
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
mit
| 8,596,083,419,467,708,000
| 24.535714
| 84
| 0.648951
| false
| 2.83106
| false
| false
| false
|
John-Lin/invoice-net
|
website.py
|
1
|
1459
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from bottle import route, run, template, view
#from bottle import jinja2_view
from invoice_prize import *
@route('/hello')
def hello():
return "Hello World!"
@route('/invoice')
@view('invoice_template')
def invoive():
(results, date) = get_result()
date = date[0].decode('UTF-8')
special = prize(results, 0)
first = prize(results, 1)
second = prize(results, 2)
third = prize(results, 3)
fourth = prize(results, 4)
fifth = prize(results, 5)
sixth = prize(results, 6)
sixth_plus = prize(results, 7)
special2 = prize(results, 8)
return dict(date=date, special2=special2, special=special,
first=first, second=second, third=third, fourth=fourth,
fifth=fifth, sixth=sixth, sixth_plus=sixth_plus)
@route('/invoice_M')
@view('invoiceM_template')
def invoive():
(results, date) = get_result()
date = date[0].decode('UTF-8')
special = prize(results, 0)
first = prize(results, 1)
second = prize(results, 2)
third = prize(results, 3)
fourth = prize(results, 4)
fifth = prize(results, 5)
sixth = prize(results, 6)
sixth_plus = prize(results, 7)
special2 = prize(results, 8)
return dict(date=date, special2=special2, special=special,
first=first, second=second, third=third, fourth=fourth,
fifth=fifth, sixth=sixth, sixth_plus=sixth_plus)
run(host='localhost', port=8080, debug=True, reloader=True)
|
mit
| 5,745,852,764,212,994,000
| 27.607843
| 62
| 0.655243
| false
| 2.965447
| false
| false
| false
|
jeonghoonkang/BerePi
|
apps/data.go.kr/get_public_micro_particle.py
|
1
|
3613
|
# -*- coding: utf-8 -*-
# Author : https://github.com/kmlee408
# https://github.com/jeonghoonkang
'''
부산 URL= http://openapi.airkorea.or.kr/openapi/services/rest/ArpltnInforInqireSvc/getCtprvnRltmMesureDnsty?serviceKey=fCRWi0DoCfoCPMHyDwai3trva10y4qb8mh9aysoHzvLKDWw6Q2bWOsvuM4%2BsRdvE4dPiKqBFD7vj7%2FM2noCe2g%3D%3D&ver=1.3&pageSize=10&pageNo=1&sidoName=%EB%B6%80%EC%82%B0&startPage=1&numOfRows=100
실행 방법= $python mdust_pusan.py
(지역을 바꾸고 싶으면 misaemunji 함수 안에 location = '경기' 와 같은 식으로 변경)
(측정 가능 지역: 서울, 부산, 대구, 인천, 광주, 대전, 울산, 경기, 강원, 충북, 충남, 전북, 전남, 경북, 경남, 제주, 세종)
'''
import requests
from urllib import urlencode, quote_plus
from bs4 import BeautifulSoup
import pandas as pd
import keytxt
# 서비스키는 data.go.kr 에서 받아야 함
# https://www.data.go.kr/dataset/15000581/openapi.do?mypageFlag=Y
service_key = keytxt.key
def misaemunji(service_key, location=None, spot=None):
#location으로 가능한 것: 서울, 부산, 대구, 인천, 광주, 대전, 울산, 경기, 강원, 충북, 충남, 전북, 전남, 경북, 경남, 제주, 세종
#시도별 실시간 측정 조회 api
URL ='http://openapi.airkorea.or.kr/openapi/services/rest/ArpltnInforInqireSvc/getCtprvnRltmMesureDnsty?serviceKey='
# URL 인자 설정 및 인코딩
queryParams = '&' + urlencode({quote_plus('numOfRows') : '100', # 최대로 설정
quote_plus('pageSize'): '10',
quote_plus('pageNo') : '1',
quote_plus('startPage') :'1',
quote_plus('sidoName') : location,
quote_plus('ver') : '1.3'
})
if location == None :
exit ('you shoud write location such like 부산')
r = requests.get(URL+service_key+queryParams)
html = r.text
soup = BeautifulSoup(html, 'html.parser') #parsing
info_ = soup.select('item')
misae_station = {}
for info__ in info_:
datetime_ = info__.datatime.text
list_ = [str(info__.pm10value.text),str(info__.pm25value.text)]
# list 미세먼지 측정값 2가지
misae_station[info__.stationname.text.encode('utf-8')] =list_
# misae_station 은 기상대 이름별로 pm2.5, pm10 데이터를 담고 있음
#dataframe 생성
index_list = ['미세먼지10','초미세먼지2.5']
df = pd.DataFrame(misae_station, index = index_list)
if spot != None :
if spot in misae_station:
'''
print('측정시간 : ' + str(datetime_)), 2018-11-08 20:00
print('측정지역 : ')
print(location)
print(spot)
print('(단위 : ㎍/㎥)')
print misae_station[spot][1]
'''
return (str(datetime_), str(spot), 'pm2.5', misae_station[spot][1] )
def get_public_mise(loc='서울', station='강남구'):
kangnam = misaemunji(service_key, location=loc, spot=station)
return kangnam
if __name__ == '__main__':
kangnam = misaemunji(service_key, location='서울', spot='강남구')
#location으로 가능한 것: 서울, 부산, 대구, 인천, 광주, 대전, 울산, 경기, 강원, 충북, 충남, 전북, 전남, 경북, 경남, 제주, 세종
print kangnam
|
bsd-2-clause
| 2,974,692,860,236,816,000
| 36.134146
| 300
| 0.560601
| false
| 1.979114
| false
| false
| false
|
rowinggolfer/openmolar2
|
src/lib_openmolar/admin/db_orm/admin_teeth_present.py
|
1
|
3093
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
## ##
## Copyright 2010-2012, Neil Wallace <neil@openmolar.com> ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###############################################################################
'''
Provides a DemoGenerator for teeth_present table
provides schema and insert query for the teeth_present table
data on which teeth are present in the patients mouth
'''
from random import randint
from PyQt4 import QtSql
from lib_openmolar.common.db_orm import InsertableRecord
TABLENAME = "teeth_present"
class DemoGenerator(object):
def __init__(self, database):
q_query= QtSql.QSqlQuery(
"select min(ix), max(ix) from patients", database)
if q_query.first():
self.min_patient_id = q_query.value(0).toInt()[0]
self.max_patient_id = q_query.value(1).toInt()[0]
else:
self.min_patient_id, self.max_patient_id = 0,0
self.length = self.max_patient_id - self.min_patient_id
self.record = InsertableRecord(database, TABLENAME)
self.record.remove(self.record.indexOf("dent_key"))
self.record.remove(self.record.indexOf('checked_date'))
def demo_queries(self):
'''
return a list of queries to populate a demo database
'''
for patient_id in xrange(self.min_patient_id, self.max_patient_id+1):
self.record.clearValues()
#set values, or allow defaults
self.record.setValue('patient_id', patient_id)
self.record.setValue('checked_by', 'demo_installer')
yield self.record.insert_query
if __name__ == "__main__":
from lib_openmolar.admin.connect import DemoAdminConnection
sc = DemoAdminConnection()
sc.connect()
builder = DemoGenerator(sc)
print builder.demo_queries().next()
|
gpl-3.0
| -1,188,628,920,479,698,700
| 41.369863
| 79
| 0.515681
| false
| 4.52193
| false
| false
| false
|
dormouse/read
|
database/models.py
|
1
|
5390
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
from sqlalchemy import Column, ForeignKey
from sqlalchemy.dialects.sqlite import INTEGER, TEXT, DATETIME, BOOLEAN
from sqlalchemy.orm import column_property, relationship
from sqlalchemy.sql import func
from sqlalchemy import and_
from database.database import book_base, rss_base
class BookJob(book_base):
""" Jobs for book """
__tablename__ = 'book_job'
id = Column(INTEGER, primary_key=True)
type_code = Column(TEXT, ForeignKey('book_dict.code'))
type = relationship(
"BookDict",
primaryjoin="and_(BookJob.type_code==BookDict.code,"
"BookDict.name=='job_type')",
backref='job_type'
)
file_name = Column(TEXT)
url = Column(TEXT)
create_time = Column(DATETIME, default=datetime.datetime.utcnow)
last_update = Column(DATETIME, default=datetime.datetime.utcnow)
status_code = Column(TEXT, ForeignKey('book_dict.code'))
status = relationship(
"BookDict",
primaryjoin="and_(BookJob.status_code==BookDict.code,"
"BookDict.name=='job_status')",
backref='job_status'
)
def __init__(self, url):
self.url = url
def __repr__(self):
return 'BookJob %s' % self.url
class BookDict(book_base):
""" BookDict """
__tablename__ = 'book_dict'
id = Column(INTEGER, primary_key=True)
name = Column(TEXT)
code = Column(TEXT)
value = Column(TEXT)
class Node(rss_base):
__tablename__ = 'node'
id = Column(INTEGER, primary_key=True)
parent_id = Column(INTEGER, ForeignKey('node.id'))
category = Column(TEXT)
children = relationship("Node")
data_id = Column(INTEGER) # RssAction.id or RssFolder.id or RssFeed.id
rank = Column(INTEGER) # rank for display in tree
def __repr__(self):
return "Node:{}".format(self.id)
class RssCommand(rss_base):
__tablename__ = 'rss_command'
id = Column(INTEGER, primary_key=True)
title = Column(TEXT)
command = Column(TEXT)
def __repr__(self):
return "Commander:{}".format(self.title)
class RssFolder(rss_base):
__tablename__ = 'rss_folder'
id = Column(INTEGER, primary_key=True)
title = Column(TEXT)
def __repr__(self):
return "folder:{}".format(self.title)
class RssFeed(rss_base):
__tablename__ = 'rss_feed'
id = Column(INTEGER, primary_key=True)
title = Column(TEXT)
subtitle = Column(TEXT)
url = Column(TEXT)
encoding = Column(TEXT)
language = Column(TEXT)
author = Column(TEXT)
site_url = Column(TEXT)
published = Column(DATETIME)
updated = Column(DATETIME)
def __repr__(self):
return "feed:{}".format(self.title)
class RssItem(rss_base):
__tablename__ = 'rss_item'
id = Column(INTEGER, primary_key=True)
author = Column(TEXT)
feed_id = Column(INTEGER,
ForeignKey('rss_feed.id'),
info={'relationFieldName': 'feed'}
)
feed = relationship("RssFeed")
published = Column(DATETIME)
link = Column(TEXT)
title = Column(TEXT)
summary = Column(TEXT)
content = Column(TEXT)
is_read = Column(BOOLEAN)
@property
def foreignKeyFieldNames(self):
# a list of name of field which have foreign key
cols = self.__table__.columns
fieldNames = [col.name for col in cols]
return filter(self.isForeignKeyField, fieldNames)
@property
def foreignKeyRelationFieldNames(self):
return [self.relationFieldName(name) for name in
self.foreignKeyFieldNames]
@property
def allFieldNames(self):
cols = self.__table__.columns
fieldNames = [col.name for col in cols]
return fieldNames + self.foreignKeyRelationFieldNames
def __repr__(self):
return '<item {0}>'.format(self.title)
def updateByDict(self, dictData):
for name, value in dictData.item_rows():
setattr(self, name, value)
def isForeignKeyField(self, name):
""" 判断是否是一个外键字段 """
if self.__table__.columns[name].foreign_keys:
return True
else:
return False
def relationFieldName(self, name):
""" 返回外键字段对应的关系字段 """
cols = self.__table__.columns
relationName = dict(cols)[name].info['relationFieldName']
return relationName
def valuesAsDict(self, fieldNames=None):
names = fieldNames if fieldNames else self.allFieldNames
values = self.valuesAsList(names)
return dict(zip(names, values))
def valuesAsList(self, fieldNames):
"""
根据字段列表返回相应的值
:param fieldNames: 字段名称,类型:list
:return: 字段值,类型: list
"""
return [self.fieldValue(name) for name in fieldNames]
def fieldValue(self, fieldName):
"""
根据字段名称返回其值,关系字段返回其中文字典短名称
:param fieldName: 字段名称
:return: 字段值
"""
value = getattr(self, fieldName, None)
if fieldName == 'published':
value = value.strftime("%Y年%m月%d日 %X")
return value
# return value.value_short if isinstance(value, ModelCqDict) else value
|
lgpl-3.0
| 7,813,540,119,064,889,000
| 27.839779
| 79
| 0.615134
| false
| 3.536585
| false
| false
| false
|
fengkaicnic/traffic
|
traffic/crypto.py
|
1
|
12797
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Wrappers around standard crypto data elements.
Includes root and intermediate CAs, SSH key_pairs and x509 certificates.
"""
from __future__ import absolute_import
import hashlib
import os
import string
from traffic import context
from traffic import db
from traffic import exception
from traffic import flags
from traffic.openstack.common import cfg
from traffic.openstack.common import log as logging
from traffic.openstack.common import timeutils
from traffic import utils
LOG = logging.getLogger(__name__)
crypto_opts = [
cfg.StrOpt('ca_file',
default='cacert.pem',
help=_('Filename of root CA')),
cfg.StrOpt('key_file',
default=os.path.join('private', 'cakey.pem'),
help=_('Filename of private key')),
cfg.StrOpt('crl_file',
default='crl.pem',
help=_('Filename of root Certificate Revocation List')),
cfg.StrOpt('keys_path',
default='$state_path/keys',
help=_('Where we keep our keys')),
cfg.StrOpt('ca_path',
default='$state_path/CA',
help=_('Where we keep our root CA')),
cfg.BoolOpt('use_project_ca',
default=False,
help=_('Should we use a CA for each project?')),
cfg.StrOpt('user_cert_subject',
default='/C=US/ST=California/O=OpenStack/'
'OU=trafficDev/CN=%.16s-%.16s-%s',
help=_('Subject for certificate for users, %s for '
'project, user, timestamp')),
cfg.StrOpt('project_cert_subject',
default='/C=US/ST=California/O=OpenStack/'
'OU=trafficDev/CN=project-ca-%.16s-%s',
help=_('Subject for certificate for projects, %s for '
'project, timestamp')),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(crypto_opts)
def ca_folder(project_id=None):
if FLAGS.use_project_ca and project_id:
return os.path.join(FLAGS.ca_path, 'projects', project_id)
return FLAGS.ca_path
def ca_path(project_id=None):
return os.path.join(ca_folder(project_id), FLAGS.ca_file)
def key_path(project_id=None):
return os.path.join(ca_folder(project_id), FLAGS.key_file)
def crl_path(project_id=None):
return os.path.join(ca_folder(project_id), FLAGS.crl_file)
def fetch_ca(project_id=None):
if not FLAGS.use_project_ca:
project_id = None
ca_file_path = ca_path(project_id)
if not os.path.exists(ca_file_path):
raise exception.CryptoCAFileNotFound(project_id=project_id)
with open(ca_file_path, 'r') as cafile:
return cafile.read()
def ensure_ca_filesystem():
"""Ensure the CA filesystem exists."""
ca_dir = ca_folder()
if not os.path.exists(ca_path()):
genrootca_sh_path = os.path.join(os.path.dirname(__file__),
'CA',
'genrootca.sh')
start = os.getcwd()
utils.ensure_tree(ca_dir)
os.chdir(ca_dir)
utils.execute("sh", genrootca_sh_path)
os.chdir(start)
def _generate_fingerprint(public_key_file):
(out, err) = utils.execute('ssh-keygen', '-q', '-l', '-f', public_key_file)
fingerprint = out.split(' ')[1]
return fingerprint
def generate_fingerprint(public_key):
with utils.tempdir() as tmpdir:
try:
pubfile = os.path.join(tmpdir, 'temp.pub')
with open(pubfile, 'w') as f:
f.write(public_key)
return _generate_fingerprint(pubfile)
except exception.ProcessExecutionError:
raise exception.InvalidKeypair()
def generate_key_pair(bits=1024):
# what is the magic 65537?
with utils.tempdir() as tmpdir:
keyfile = os.path.join(tmpdir, 'temp')
utils.execute('ssh-keygen', '-q', '-b', bits, '-N', '',
'-t', 'rsa', '-f', keyfile, '-C', 'Generated by traffic')
fingerprint = _generate_fingerprint('%s.pub' % (keyfile))
if not os.path.exists(keyfile):
raise exception.FileNotFound(keyfile)
private_key = open(keyfile).read()
public_key_path = keyfile + '.pub'
if not os.path.exists(public_key_path):
raise exception.FileNotFound(public_key_path)
public_key = open(public_key_path).read()
return (private_key, public_key, fingerprint)
def fetch_crl(project_id):
"""Get crl file for project."""
if not FLAGS.use_project_ca:
project_id = None
crl_file_path = crl_path(project_id)
if not os.path.exists(crl_file_path):
raise exception.CryptoCRLFileNotFound(project_id)
with open(crl_file_path, 'r') as crlfile:
return crlfile.read()
def decrypt_text(project_id, text):
private_key = key_path(project_id)
if not os.path.exists(private_key):
raise exception.ProjectNotFound(project_id=project_id)
try:
dec, _err = utils.execute('openssl',
'rsautl',
'-decrypt',
'-inkey', '%s' % private_key,
process_input=text)
return dec
except exception.ProcessExecutionError:
raise exception.DecryptionFailure()
def revoke_cert(project_id, file_name):
"""Revoke a cert by file name."""
start = os.getcwd()
os.chdir(ca_folder(project_id))
# NOTE(vish): potential race condition here
utils.execute('openssl', 'ca', '-config', './openssl.cnf', '-revoke',
file_name)
utils.execute('openssl', 'ca', '-gencrl', '-config', './openssl.cnf',
'-out', FLAGS.crl_file)
os.chdir(start)
def revoke_certs_by_user(user_id):
"""Revoke all user certs."""
admin = context.get_admin_context()
for cert in db.certificate_get_all_by_user(admin, user_id):
revoke_cert(cert['project_id'], cert['file_name'])
def revoke_certs_by_project(project_id):
"""Revoke all project certs."""
# NOTE(vish): This is somewhat useless because we can just shut down
# the vpn.
admin = context.get_admin_context()
for cert in db.certificate_get_all_by_project(admin, project_id):
revoke_cert(cert['project_id'], cert['file_name'])
def revoke_certs_by_user_and_project(user_id, project_id):
"""Revoke certs for user in project."""
admin = context.get_admin_context()
for cert in db.certificate_get_all_by_user_and_project(admin,
user_id, project_id):
revoke_cert(cert['project_id'], cert['file_name'])
def _project_cert_subject(project_id):
"""Helper to generate user cert subject."""
return FLAGS.project_cert_subject % (project_id, timeutils.isotime())
def _user_cert_subject(user_id, project_id):
"""Helper to generate user cert subject."""
return FLAGS.user_cert_subject % (project_id, user_id, timeutils.isotime())
def generate_x509_cert(user_id, project_id, bits=1024):
"""Generate and sign a cert for user in project."""
subject = _user_cert_subject(user_id, project_id)
with utils.tempdir() as tmpdir:
keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key'))
csrfile = os.path.join(tmpdir, 'temp.csr')
utils.execute('openssl', 'genrsa', '-out', keyfile, str(bits))
utils.execute('openssl', 'req', '-new', '-key', keyfile, '-out',
csrfile, '-batch', '-subj', subject)
private_key = open(keyfile).read()
csr = open(csrfile).read()
(serial, signed_csr) = sign_csr(csr, project_id)
fname = os.path.join(ca_folder(project_id), 'newcerts/%s.pem' % serial)
cert = {'user_id': user_id,
'project_id': project_id,
'file_name': fname}
db.certificate_create(context.get_admin_context(), cert)
return (private_key, signed_csr)
def _ensure_project_folder(project_id):
if not os.path.exists(ca_path(project_id)):
geninter_sh_path = os.path.join(os.path.dirname(__file__),
'CA',
'geninter.sh')
start = os.getcwd()
os.chdir(ca_folder())
utils.execute('sh', geninter_sh_path, project_id,
_project_cert_subject(project_id))
os.chdir(start)
def generate_vpn_files(project_id):
project_folder = ca_folder(project_id)
key_fn = os.path.join(project_folder, 'server.key')
crt_fn = os.path.join(project_folder, 'server.crt')
if os.path.exists(crt_fn):
return
# NOTE(vish): The 2048 is to maintain compatibility with the old script.
# We are using "project-vpn" as the user_id for the cert
# even though that user may not really exist. Ultimately
# this will be changed to be launched by a real user. At
# that point we will can delete this helper method.
key, csr = generate_x509_cert('project-vpn', project_id, 2048)
with open(key_fn, 'w') as keyfile:
keyfile.write(key)
with open(crt_fn, 'w') as crtfile:
crtfile.write(csr)
def sign_csr(csr_text, project_id=None):
if not FLAGS.use_project_ca:
project_id = None
if not project_id:
return _sign_csr(csr_text, ca_folder())
_ensure_project_folder(project_id)
project_folder = ca_folder(project_id)
return _sign_csr(csr_text, ca_folder(project_id))
def _sign_csr(csr_text, ca_folder):
with utils.tempdir() as tmpdir:
inbound = os.path.join(tmpdir, 'inbound.csr')
outbound = os.path.join(tmpdir, 'outbound.csr')
with open(inbound, 'w') as csrfile:
csrfile.write(csr_text)
LOG.debug(_('Flags path: %s'), ca_folder)
start = os.getcwd()
# Change working dir to CA
utils.ensure_tree(ca_folder)
os.chdir(ca_folder)
utils.execute('openssl', 'ca', '-batch', '-out', outbound, '-config',
'./openssl.cnf', '-infiles', inbound)
out, _err = utils.execute('openssl', 'x509', '-in', outbound,
'-serial', '-noout')
serial = string.strip(out.rpartition('=')[2])
os.chdir(start)
with open(outbound, 'r') as crtfile:
return (serial, crtfile.read())
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# http://code.google.com/p/boto
def compute_md5(fp):
"""Compute an md5 hash.
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file pointer will be
reset to the beginning of the file before the method returns.
:rtype: tuple
:returns: the hex digest version of the MD5 hash
"""
m = hashlib.md5()
fp.seek(0)
s = fp.read(8192)
while s:
m.update(s)
s = fp.read(8192)
hex_md5 = m.hexdigest()
# size = fp.tell()
fp.seek(0)
return hex_md5
|
apache-2.0
| -3,505,826,213,502,774,300
| 34.64624
| 79
| 0.616238
| false
| 3.684711
| false
| false
| false
|
arpitmathur/CourseAvailabilityChecker
|
courseCheck.py
|
1
|
1898
|
__author__ = 'Arpit'
import find
import time
import sys
import gmailer
#initialize datastructures
courses = []
semester = []
email = []
flag = 0
#parse changeme
with open('CHANGEME.txt') as fp:
for line in fp:
if(line[0] == "\n" or line[0] == "#"):
continue
line = line.rstrip()
if(line == "START" or line == "EMAIL START"):
continue
elif(line == "EMAIL END"):
break
elif(line == "END"):
flag = 2
elif(flag == 0):
semester = (line.rsplit(','))
flag = 1
elif(flag == 1):
courses.append(line.rsplit(','))
elif(flag == 2):
email = (line.rsplit(','))
flag = 0
count = 0
sleepTime = 300
#while a course isn't available
while courses:
count = count + 1
if count!=1:
print ("Please wait for " + str(sleepTime/60) + " minutes before the next attempt!")
#sleep five minutes
time.sleep(sleepTime)
print ("Aaaaaaaand we're back! \n")
print ('Attempt: ' + str(count))
try:
for course in list(courses):
print ("Checking: " + str(course[0]) + ' ' + str(course[1]) + ' - CRN: ' + str(course[2]))
#check availability
flag = find.search(semester, course)
if( flag == 1):
print ('Success!')
print ('Sending email now!')
courses.remove(course)
try:
gmailer.sendemail(email[0], email[0], "", str(course[0]) + " " + str(course[1]) + " IS OPEN", "The CRN is " + str(course[2]) + ". Register now!", email[0], email[1] )
except:
raise ValueError()
else:
print ("It's Closed!")
except ValueError:
print ("Fix your email credentials!")
sys.exit()
except:
print ("oops")
|
mit
| 7,397,628,063,815,961,000
| 27.343284
| 186
| 0.494731
| false
| 3.811245
| false
| false
| false
|
argonnexraydetector/RoachFirmPy
|
Roach2DevelopmentTree/pyfiles/pca.py
|
1
|
3520
|
import numpy as np
from scipy import linalg
import random as rnd
import matplotlib
import matplotlib.pyplot
'''
execfile('pca.py')
p = pulseTrain(1000)
e = eigens(p)
plot(e['eigenvectors'][0])
plot(e['eigenvectors'][1])
testan(e,2);
'''
print 'running pca.py'
def makePulse(L=100.0,t1=10.0,t2=1.0,a1=1.0,a2=1.0,n=0.1):
rnd.seed(None)
e1=a1*np.exp(-1*np.arange(L)/t1);
e2=a2*(1.0 - np.exp(-1*np.arange(L)/t2));
p1=e1*e2
noise=[]
for k in range(int(L)): noise.append(rnd.gauss(0.0,n))
noise=np.array(noise)
p1=p1+noise
return(p1)
def pulseTrain(N=100):
plist=[]
for n in range(N):
amp = 0.5 + 0.02*rnd.random()
amp2 = 0.2 + 0.02*rnd.random()
xx=rnd.random()
if xx>=0.5: tc = 10
else: tc = 4
pls=makePulse(
a1=amp,
a2=amp2,
t2=4,
t1=tc,
n=0.001)
plist.append(pls.tolist())
D=np.array(plist).transpose()
plotTrain(D)
return(D)
def plotTrain(D):
matplotlib.pyplot.figure(1)
N=D.shape[0]
L=D.shape[1]
matplotlib.pyplot.clf()
for k in range(N):
matplotlib.pyplot.plot(D.transpose()[k])
matplotlib.pyplot.figure(2)
matplotlib.pyplot.clf()
matplotlib.pyplot.pcolor(D)
def eigens(D):
Z=np.dot(D,D.transpose() )
#Z =np.cov(D)
evals,evecs=linalg.eig(Z)
evals = np.real(evals)
evecs = np.real(evecs)
matplotlib.pyplot.figure(1)
matplotlib.pyplot.clf()
matplotlib.pyplot.plot(np.real(evals))
matplotlib.pyplot.figure(2)
matplotlib.pyplot.clf()
matplotlib.pyplot.pcolor(evecs * evals)
matplotlib.pyplot.figure(3)
matplotlib.pyplot.clf()
matplotlib.pyplot.pcolor(Z)
matplotlib.pyplot.figure(4)
matplotlib.pyplot.plot(evecs * evals)
retdata = {}
retdata['eigenvalues'] = np.real(evals)
retdata['eigenvectors'] = np.real(evecs).transpose()
retdata['covariance'] = Z
return(retdata)
def eigenPulseTrain(eigendata,numcomponents=2,N=100):
pulsestruct =np.array( [ [0.1,1.0],[1.0,0.1] , [0.5,0.5] , [0.1,-1.0]])
pulses = []
for n in range(N):
pulse = np.array([0.0] * len(eigendata['eigenvectors'][0]) )
r = rand()
psindex = floor(rand() * len(pulsestruct))
ps = pulsestruct[psindex]
ps = ps* (1.0 + 0.2*rand(numcomponents))
for c in range(numcomponents):
eigpulse = eigendata['eigenvectors'][c]
pulse = pulse + eigpulse * ps[c]
pulses.append(pulse)
pulses = np.array(pulses)
figure(1)
clf()
plot(pulses.transpose())
return(pulses)
def testan(eigendata,numcomponents):
#p = pulseTrain().transpose()
p = eigenPulseTrain(eigendata)
figure(10)
Rvals = []
for pulse in p:
rvalp = [0.0] * (1+numcomponents)
energy = 0.0
for c in range(numcomponents):
filt = eigendata['eigenvectors'][c]
fp = np.convolve(pulse,filt)
rvalp[c] =(np.dot(fp,fp))
#rvalp[c] =max(fp)
energy = energy + rvalp[c]
rvalp[numcomponents] = energy
Rvals.append(rvalp)
if numcomponents==2:
plot(rvalp[0],rvalp[1],'.')
return(np.array(Rvals) )
|
gpl-2.0
| -1,943,123,093,516,862,700
| 20.469512
| 75
| 0.537216
| false
| 3.037101
| false
| false
| false
|
why2pac/dp-tornado
|
dp_tornado/helper/io/image/__init__.py
|
1
|
12413
|
# -*- coding: utf-8 -*-
import tempfile
from dp_tornado.engine.helper import Helper as dpHelper
class ImageHelper(dpHelper):
def compare(self, i1, i2, error=0):
i1 = self.load(i1)
i2 = self.load(i2)
if not i1 or not i2:
return None
s1 = i1.size
s2 = i2.size
if s1[0] != s2[0] or s2[1] != s2[1]:
print('size ne,', s1, s2)
return False
i1 = i1.load()
i2 = i2.load()
for i in range(s1[0]):
for j in range(s1[1]):
if i1[i, j] != i2[i, j]:
if error:
for k in range(len(i1[i, j])):
if abs(i1[i, j][k] - i2[i, j][k]) > error:
print('pixel ne,', i1[i, j], i2[i, j], abs(i1[i, j][k] - i2[i, j][k]), error)
return False
else:
return False
return True
def _driver(self, options=None, **kwargs):
if not options and kwargs:
options = kwargs
if options and 'driver' in options and options['driver'] == 'wand':
return self.helper.io.image.driver.wand
return self.helper.io.image.driver.pillow
def load(self, src, options=None, **kwargs):
if not options and kwargs:
options = kwargs
tmp = None
drivers = []
pillow_image = self.helper.io.image.driver.pillow.Image
wand_image = self.helper.io.image.driver.wand.Image
if pillow_image:
drivers.append(pillow_image)
if wand_image:
drivers.append(wand_image)
try:
if isinstance(src, tuple(drivers)):
return src
elif self.helper.web.url.validate(src):
code, res = self.helper.web.http.get.raw(src)
if code != 200:
raise Exception('The specified image url is invalid.')
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.write(res)
tmp.close()
tmp = tmp.name
else:
tmp = None
if not tmp and not src:
raise Exception('The specified image is invalid.')
img = self._driver(options=options).load(tmp if tmp else src)
if not img:
raise Exception('The specified image is invalid.')
return img
except Exception as e:
self.logging.exception(e)
return False
finally:
if tmp:
self.helper.io.file.remove(tmp)
def execute(self, src, fn, options=None, **kwargs):
if not options and kwargs:
options = kwargs
img = self.load(src, options=options)
if not img:
return False
try:
return fn(img, options)
except Exception as e:
self.logging.exception(e)
return False
def size(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
if not img:
return -1, -1
return img.width, img.height
return self.execute(src, fn, options=options)
def crop(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
crop = kwargs['crop'] if 'crop' in kwargs else None
if not crop:
return img
e_top = 0
e_left = 0
e_right = 0
e_bottom = 0
if self.helper.misc.type.check.string(crop):
crop = crop.split(',')
crop = [int(e.strip()) for e in crop]
if self.helper.misc.type.check.numeric(crop):
e_top = e_left = e_right = e_bottom = crop
elif isinstance(crop, (tuple, list)):
if len(crop) == 1:
e_top = e_left = e_right = e_bottom = crop[0]
elif len(crop) == 2:
e_top = e_bottom = crop[0]
e_left = e_right = crop[1]
elif len(crop) == 4:
e_top = crop[0]
e_right = crop[1]
e_bottom = crop[2]
e_left = crop[3]
img = self._driver(options=kwargs).crop(img, e_left, e_top, img.size[0] - e_right, img.size[1] - e_bottom)
return img
return self.execute(src, fn, options=options)
def border(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
border = int(kwargs['border']) if 'border' in kwargs else 0
border_color = kwargs['border_color'] if 'border_color' in kwargs else '#000000'
if not border:
return img
if '_org' in kwargs and 'radius' in kwargs and kwargs['radius']:
return img
img = self._driver(options=kwargs).border(img, border, border_color)
return img
return self.execute(src, fn, options=options)
def radius(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
radius = int(kwargs['radius'] or 0) if 'radius' in kwargs else None
border = int(kwargs['border']) if 'border' in kwargs else 0
border_color = kwargs['border_color'] if 'border_color' in kwargs else '#000000'
if not radius:
return img
elif '__radius_processed__' in img.__dict__:
return img
img = self._driver(options=kwargs).radius(img, radius, border, border_color)
img.__dict__['__radius_processed__'] = True
return img
return self.execute(src, fn, options=options)
def colorize(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
colorize = kwargs['colorize'] if 'colorize' in kwargs else None
if not colorize:
return img
img = self._driver(options=kwargs).colorize(img, colorize)
return img
return self.execute(src, fn, options=options)
def resize(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
size = kwargs['size'] if 'size' in kwargs else None
mode = kwargs['mode'] if 'mode' in kwargs else None
scale = int(kwargs['scale']) if 'scale' in kwargs else 1
limit = True if 'limit' in kwargs and kwargs['limit'] else False
border = int(kwargs['border']) if 'border' in kwargs else 0
if not size:
return img
width_new, height_new = size
width_origin, height_origin = img.size
if scale > 1:
if limit:
scale_max_width = float(width_origin) / float(width_new)
scale_max_height = float(height_origin) / float(height_new)
scale_max = min(scale, scale_max_width, scale_max_height)
else:
scale_max = scale
if scale_max > 1:
width_new = int(width_new * scale_max)
height_new = int(height_new * scale_max)
if not width_new:
width_new = width_origin * height_new / height_origin
mode = self.helper.io.image.mode.resize
if not height_new:
height_new = height_origin * width_new / width_origin
mode = self.helper.io.image.mode.resize
if border:
width_new -= border * 2
height_new -= border * 2
if not mode:
mode = self.helper.io.image.mode.resize
if mode not in self.helper.io.image.mode.modes:
raise Exception('The specified mode is not supported.')
seqs = []
for i, im in self._driver(options=kwargs).iter_seqs(img, kwargs):
# Image Resizing
if mode == self.helper.io.image.mode.center:
im = self._driver(options=kwargs).resize(im, width_new, height_new, kwargs)
elif mode == self.helper.io.image.mode.fill:
ratio_origin = float(width_origin) / float(height_origin)
ratio_new = float(width_new) / float(height_new)
if ratio_origin > ratio_new:
tw = int(round(height_new * ratio_origin))
im = self._driver(options=kwargs).resize(im, tw, height_new)
left = int(round((tw - width_new) / 2.0))
im = self._driver(options=kwargs).crop(im, left, 0, left + width_new, height_new)
elif ratio_origin < ratio_new:
th = int(round(width_new / ratio_origin))
im = self._driver(options=kwargs).resize(im, width_new, th)
top = int(round((th - height_new) / 2.0))
im = self._driver(options=kwargs).crop(im, 0, top, width_new, top + height_new)
else:
im = self._driver(options=kwargs).resize(im, width_new, height_new)
elif mode == self.helper.io.image.mode.resize:
if width_new > width_origin or height_new > height_origin:
width_new = width_origin
height_new = height_origin
im = self._driver(options=kwargs).resize(im, width_new, height_new)
seqs.append(im)
img = seqs[0]
seqs.remove(img)
img.__dict__['__frames__'] = seqs
return img
return self.execute(src, fn, options=options)
def save(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
ext = kwargs['format'] if 'format' in kwargs else None
dest = kwargs['dest'] if 'dest' in kwargs else None
if not dest:
return None
if not ext and self.helper.misc.type.check.string(dest):
ext = self.helper.io.path.ext(dest, dot='').lower()
if not ext and self.helper.misc.type.check.string(src):
ext = self.helper.io.path.ext(src, dot='').lower()
if not ext and '_org' in kwargs and kwargs['_org'] and self.helper.misc.type.check.string(kwargs['_org']):
ext = self.helper.io.path.ext(kwargs['_org'], dot='').lower()
if dest == 's3':
# TODO
return False
if not self._driver(options=kwargs).save(img, ext, dest, kwargs):
return False
return True
return self.execute(src, fn, options=options)
def manipulate(self, src, options=None, **kwargs):
if not options and kwargs:
options = kwargs
options['_org'] = src
try:
img = self.load(src, options=options)
# Crop
img = self.crop(img, options=options)
if not img:
return False
# Resize
img = self.resize(img, options=options)
if not img:
return False
# Radius
img = self.radius(img, options=options)
if not img:
return False
# Border
img = self.border(img, options=options)
if not img:
return False
# Colorize
img = self.colorize(img, options=options)
if not img:
return False
# Save
saved = self.save(img, options=options)
if saved is None:
return img
elif saved is False:
return False
return True
except Exception as e:
self.logging.exception(e)
return False
|
mit
| 5,356,887,573,378,849,000
| 29.649383
| 118
| 0.498107
| false
| 4.183687
| false
| false
| false
|
kgarrison343/recipe-site
|
polls/views.py
|
1
|
1213
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from .models import Question, Choice
# Create your views here.
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
return Question.objects.order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a valid choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
|
mit
| -1,490,559,948,873,557,200
| 30.921053
| 82
| 0.693322
| false
| 3.912903
| false
| false
| false
|
bobmcwhirter/drools
|
lib/utility-scripts/docbot-masseur.py
|
1
|
2159
|
#!/usr/bin/python
#
# This script will flatten out a folder based docbook manual into a docbot friendly "flat" structure
# (and update links in files accordingly)
# Author: Michael Neale
#
import os, sys, shutil
def flatten(root, output) :
if not os.path.isdir(output):
os.mkdir(output)
if not os.path.isdir(os.path.join(output, "images")):
os.mkdir(os.path.join(output, "images"))
sections = {}
top_files = []
names = os.listdir(root)
for name in names:
if os.path.isdir(os.path.join(root, name)) :
if not name == ".svn":
flattenDir(root, name, output, sections)
else:
if name.endswith(".xml") :
top_files.append(name)
elif name != ".svn":
shutil.copyfile(os.path.join(root, name), os.path.join(output, name))
for file in top_files:
contents = open(os.path.join(root, file), "r").read()
for section in sections:
contents = contents.replace(section, sections[section])
outfile = open(os.path.join(output, file), "w")
outfile.write(contents)
def flattenDir(root, dir, output, sections):
docs = []
images = []
names = os.listdir(os.path.join(root, dir))
for name in names:
if name.endswith(".xml"):
docs.append(name)
else:
if name != ".svn":
images.append(name)
shutil.copyfile(os.path.join(root, dir, name), os.path.join(output, "images", dir + "_" + name))
for doc in docs:
new_name = dir + "_" + doc
sections[dir + "/" + doc] = new_name
file = open(os.path.join(root, dir, doc), "r").read()
outfile = open(os.path.join(output, new_name), "w")
for img in images:
file = file.replace(img, "images/" + dir + "_" + img)
outfile.write(file)
if len(sys.argv) < 2:
print "2 arguments required: <path to root of documentation> <output path>. eg: docbot-masseur.py ./something ./output"
else:
flatten(sys.argv[1], sys.argv[2])
|
apache-2.0
| 6,316,199,486,616,234,000
| 31.223881
| 123
| 0.552571
| false
| 3.628571
| false
| false
| false
|
ristorantino/fiscalberry
|
Traductores/TraductorFiscal.py
|
1
|
7099
|
# -*- coding: utf-8 -*-
from Traductores.TraductorInterface import TraductorInterface
import math
class TraductorFiscal(TraductorInterface):
def dailyClose(self, type):
"Comando X o Z"
# cancelar y volver a un estado conocido
self.comando.cancelAnyDocument()
self.comando.start()
ret = self.comando.dailyClose(type)
self.comando.close()
return ret
def imprimirAuditoria(self, desde, hasta):
"Imprimir Auditoria"
#Solo compatible para Epson 1G y 2G por el momento...
#desde & hasta parametros que pueden ser números de zetas o fechas en formato ddmmyyyy
self.comando.start()
ret = self.comando.imprimirAuditoria(desde, hasta)
self.comando.close()
return ret
def getStatus(self, *args):
"getStatus"
self.comando.start()
ret = self.comando.getStatus(list(args))
self.comando.close()
return ret
def setHeader(self, *args):
"SetHeader"
self.comando.start()
ret = self.comando.setHeader(list(args))
self.comando.close()
return ret
def setTrailer(self, *args):
"SetTrailer"
self.comando.start()
ret = self.comando.setTrailer(list(args))
self.comando.close()
return ret
def openDrawer(self, *args):
"Abrir caja registradora"
self.comando.start()
ret = self.comando.openDrawer()
self.comando.close()
return ret
def getLastNumber(self, tipo_cbte):
"Devuelve el último número de comprobante"
self.comando.start()
letra_cbte = tipo_cbte[-1] if len(tipo_cbte) > 1 else None
ret = self.comando.getLastNumber(letra_cbte)
self.comando.close()
return ret
def cancelDocument(self, *args):
"Cancelar comprobante en curso"
self.comando.start()
self.comando.cancelAnyDocument()
self.comando.close()
def printTicket(self, encabezado=None, items=[], pagos=[], percepciones=[], addAdditional=None, setHeader=None, setTrailer=None):
if setHeader:
self.setHeader(*setHeader)
if setTrailer:
self.setTrailer(*setTrailer)
self.comando.start()
try:
if encabezado:
self._abrirComprobante(**encabezado)
else:
self._abrirComprobante()
for item in items:
self._imprimirItem(**item)
if percepciones:
for percepcion in percepciones:
self._imprimirPercepcion(**percepcion)
if pagos:
for pago in pagos:
self._imprimirPago(**pago)
if addAdditional:
self.comando.addAdditional(**addAdditional)
rta = self._cerrarComprobante()
self.comando.close()
return rta
except Exception, e:
self.cancelDocument()
raise
def _abrirComprobante(self,
tipo_cbte="T", # tique
tipo_responsable="CONSUMIDOR_FINAL",
tipo_doc="SIN_CALIFICADOR",
nro_doc=" ", # sin especificar
nombre_cliente=" ",
domicilio_cliente=" ",
referencia=None, # comprobante original (ND/NC)
**kwargs
):
"Creo un objeto factura (internamente) e imprime el encabezado"
# crear la estructura interna
self.factura = {"encabezado": dict(tipo_cbte=tipo_cbte,
tipo_responsable=tipo_responsable,
tipo_doc=tipo_doc, nro_doc=nro_doc,
nombre_cliente=nombre_cliente,
domicilio_cliente=domicilio_cliente,
referencia=referencia),
"items": [], "pagos": [], "percepciones": []}
printer = self.comando
letra_cbte = tipo_cbte[-1] if len(tipo_cbte) > 1 else None
# mapear el tipo de cliente (posicion/categoria)
pos_fiscal = printer.ivaTypes.get(tipo_responsable)
# mapear el numero de documento según RG1361
doc_fiscal = printer.docTypes.get(tipo_doc)
ret = False
# enviar los comandos de apertura de comprobante fiscal:
if tipo_cbte.startswith('T'):
if letra_cbte:
ret = printer.openTicket(letra_cbte)
else:
ret = printer.openTicket()
elif tipo_cbte.startswith("F"):
ret = printer.openBillTicket(letra_cbte, nombre_cliente, domicilio_cliente,
nro_doc, doc_fiscal, pos_fiscal)
elif tipo_cbte.startswith("ND"):
ret = printer.openDebitNoteTicket(letra_cbte, nombre_cliente,
domicilio_cliente, nro_doc, doc_fiscal,
pos_fiscal)
elif tipo_cbte.startswith("NC"):
ret = printer.openBillCreditTicket(letra_cbte, nombre_cliente,
domicilio_cliente, nro_doc, doc_fiscal,
pos_fiscal, referencia)
return ret
def _imprimirItem(self, ds, qty, importe, alic_iva=21., itemNegative=False, discount=0, discountDescription='',
discountNegative=False):
"Envia un item (descripcion, cantidad, etc.) a una factura"
if importe < 0:
importe = math.fabs(importe)
itemNegative = True
self.factura["items"].append(dict(ds=ds, qty=qty,
importe=importe, alic_iva=alic_iva, itemNegative=itemNegative,
discount=discount, discountDescription=discountDescription,
discountNegative=discountNegative))
# Nota: no se calcula neto, iva, etc (deben venir calculados!)
if discountDescription == '':
discountDescription = ds
return self.comando.addItem(ds, float(qty), float(importe), float(alic_iva),
itemNegative, float(discount), discountDescription, discountNegative)
def _imprimirPago(self, ds, importe):
"Imprime una linea con la forma de pago y monto"
self.factura["pagos"].append(dict(ds=ds, importe=importe))
return self.comando.addPayment(ds, float(importe))
def _imprimirPercepcion(self, ds, importe):
"Imprime una linea con nombre de percepcion y monto"
self.factura["percepciones"].append(dict(ds=ds, importe=importe))
return self.comando.addPerception(ds, float(importe))
def _cerrarComprobante(self, *args):
"Envia el comando para cerrar un comprobante Fiscal"
return self.comando.closeDocument()
|
mit
| -7,636,467,503,041,819,000
| 36.539683
| 133
| 0.552361
| false
| 3.80633
| false
| false
| false
|
k-j-m/Pyxon
|
pyxon/decode.py
|
1
|
5564
|
# Dict of the form:
# { cls: [propname]}
# cls: class that has been written with the @sprop annotation
# propname: name of the property
class_sprops = {}
# Dict of the form:
# {cls: {name:(fn, inv_fn)}}
# cls: class that has been written with @cprop annotations
# name: class attribute name
# fn: function to turn json data into the corresponding attribute type
# inv_fn: inverse of fn
class_cprops = {}
# Dict of the form:
# {AbstractClass:specifier_property}
# AbstractClass: the class that we're trying to (de)serialize
# specifier_property: the name of the json property that
# will indicate the concrete class name
specifier_properties = {}
# Dict of the form {AbstractClass: {label: ConcreteClass}}
# Used to retrieve the concrete implementation of an
# abstract class based on a string label.
class_specifiers = {}
# {ConcreteClass: (AbstractClass, concrete_label)}
conc_to_abstract = {}
def add_type_property(data,cls):
"""
Given some JSON data and the class from which it was produced,
this function returns the JSON data with any required type
annotations added to it.
"""
if not cls in conc_to_abstract:
return data
abstract_cls, concrete_label = conc_to_abstract[cls]
prop_name = specifier_properties[abstract_cls]
data[prop_name] = concrete_label
return data
class MetaSProp(type):
"""
Metaclass designed specifically to let us use dot notation
for specifying simple class properties. This metaclass
contains the decorator logic for the @cprop decorator.
"""
def __getattr__(prop_cls,key):
def sprop2(cls):
simple_props = class_sprops.get(cls,[])
simple_props.append(key)
class_sprops[cls]=simple_props
return cls
return sprop2
class sprop:
"""
Decorator used to add simple properties to a class.
The logic for this decorator is contained in the metaclass
MetaSProp. The reason for this is to allow simple dot
notation to specify parameter.
Example:
>>> @sprop.x
>>> @sprop.y
>>> class Foo(object): pass
"""
__metaclass__ = MetaSProp
class MetaCProp(type):
"""
Metaclass for the cprop calculated property decorator.
This class contains all of the decorator logic. The reason
for using a metaclass rather than something simpler is
to allow us to use dot notation when adding calculated
properties.
"""
def __getattr__(prop_cls,key):
def cprop2(f1, f2):
def cprop3(cls):
cprops = class_cprops.get(cls,{})
cprops[key]=(f1,f2)
class_cprops[cls]=cprops
return cls
return cprop3
return cprop2
class cprop:
"""
Decorator for adding calculated properties to a class.
A calculated property is needed when the json data can't
be added to the class directly, for example when creating
some other user classes from the data before adding as
properties.
The decorator needs to be given 2 functions as arguments:
fun1: a function that takes JSON data and converts to some
other data type
fun2: the inverse of fun1, which takes some data type and
converts it into JSON data
Note: ideally the following will hold for any value of x
>>> fun2(fun1(x)) == x
Example:
@sprop.x
class Foo(object): pass
@cprop.y(f1=obj(Foo), f2=unobjectify)
class Bar(object): pass
"""
__metaclass__ = MetaCProp
# Decorator annotations
def subtyped(using):
"""
Decorator used to indicate that a class will be subtyped.
The using= parameter is used to indicate which JSON
property will contain the name of the subclass. A sensible
value for thsi will be @type, but this wil all depend on
how you have set up the rest of the system.
Example:
@subtyped(using='@type')
class Foo(object): pass
"""
# Because this is a parameterised decorator that we call, we
# now need to create and return the decorator proper.
def subtyped2(cls):
specifier_properties[cls]=using
return cls
return subtyped2
def extending(super_cls, named):
"""
This decorator is used to indicate which superclass a class
extends. This could potentially be interpreted from the classes
mro, but that starts to get tricky and we would still need to
add extra info to say what the class will be named in the data.
This label is needed because we can't necessarily rely on the
class name and the class label in the data being the same.
Example:
@extending(Foo, named='Bar')
class Baz(Foo): pass
"""
def extending2(cls):
conc_to_abstract[cls]=super_cls,named
clsmap = class_specifiers.get(super_cls,{})
clsmap[named]=cls
class_specifiers[super_cls]=clsmap
return cls
return extending2
def conc2(data, cls):
"""
Returns the appropriate concrete class of a subtyped class
based on the content of some JSON data.
If the class is not subtyped then it gets returned.
"""
s1 = set(specifier_properties.keys())
s2 = set(class_specifiers.keys())
assert s1==s2, "You need to use @subtyped and @extending as a pair!:\n%s\n%s" % (str(s1), str(s2))
if not cls in specifier_properties:
return cls
prop_name = specifier_properties[cls]
cls_label = data[prop_name]
concrete_cls = class_specifiers[cls][cls_label]
return concrete_cls
|
mit
| 950,680,876,732,445,200
| 28.913978
| 102
| 0.663192
| false
| 3.994257
| false
| false
| false
|
twitter/pants
|
src/python/pants/subsystem/subsystem_client_mixin.py
|
1
|
6246
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import object
from twitter.common.collections import OrderedSet
from pants.option.arg_splitter import GLOBAL_SCOPE
from pants.option.optionable import OptionableFactory
from pants.option.scope import ScopeInfo
from pants.util.objects import datatype
class SubsystemClientError(Exception): pass
class SubsystemDependency(datatype([
'subsystem_cls',
'scope',
'removal_version',
'removal_hint',
]), OptionableFactory):
"""Indicates intent to use an instance of `subsystem_cls` scoped to `scope`."""
def __new__(cls, subsystem_cls, scope, removal_version=None, removal_hint=None):
return super(SubsystemDependency, cls).__new__(cls, subsystem_cls, scope, removal_version, removal_hint)
def is_global(self):
return self.scope == GLOBAL_SCOPE
@property
def optionable_cls(self):
# Fills the OptionableFactory contract.
return self.subsystem_cls
@property
def options_scope(self):
"""The subscope for options of `subsystem_cls` scoped to `scope`.
This is the scope that option values are read from when initializing the instance
indicated by this dependency.
"""
if self.is_global():
return self.subsystem_cls.options_scope
else:
return self.subsystem_cls.subscope(self.scope)
class SubsystemClientMixin(object):
"""A mixin for declaring dependencies on subsystems.
Must be mixed in to an Optionable.
"""
@classmethod
def subsystem_dependencies(cls):
"""The subsystems this object uses.
Override to specify your subsystem dependencies. Always add them to your superclass's value.
Note: Do not call this directly to retrieve dependencies. See subsystem_dependencies_iter().
:return: A tuple of SubsystemDependency instances.
In the common case where you're an optionable and you want to get an instance scoped
to you, call subsystem_cls.scoped(cls) to get an appropriate SubsystemDependency.
As a convenience, you may also provide just a subsystem_cls, which is shorthand for
SubsystemDependency(subsystem_cls, GLOBAL SCOPE) and indicates that we want to use
the global instance of that subsystem.
"""
return tuple()
@classmethod
def subsystem_dependencies_iter(cls):
"""Iterate over the direct subsystem dependencies of this Optionable."""
for dep in cls.subsystem_dependencies():
if isinstance(dep, SubsystemDependency):
yield dep
else:
yield SubsystemDependency(dep, GLOBAL_SCOPE, removal_version=None, removal_hint=None)
@classmethod
def subsystem_closure_iter(cls):
"""Iterate over the transitive closure of subsystem dependencies of this Optionable.
:rtype: :class:`collections.Iterator` of :class:`SubsystemDependency`
:raises: :class:`pants.subsystem.subsystem_client_mixin.SubsystemClientMixin.CycleException`
if a dependency cycle is detected.
"""
seen = set()
dep_path = OrderedSet()
def iter_subsystem_closure(subsystem_cls):
if subsystem_cls in dep_path:
raise cls.CycleException(list(dep_path) + [subsystem_cls])
dep_path.add(subsystem_cls)
for dep in subsystem_cls.subsystem_dependencies_iter():
if dep not in seen:
seen.add(dep)
yield dep
for d in iter_subsystem_closure(dep.subsystem_cls):
yield d
dep_path.remove(subsystem_cls)
for dep in iter_subsystem_closure(cls):
yield dep
class CycleException(Exception):
"""Thrown when a circular subsystem dependency is detected."""
def __init__(self, cycle):
message = 'Cycle detected:\n\t{}'.format(' ->\n\t'.join(
'{} scope: {}'.format(optionable_cls, optionable_cls.options_scope)
for optionable_cls in cycle))
super(SubsystemClientMixin.CycleException, self).__init__(message)
@classmethod
def known_scope_infos(cls):
"""Yield ScopeInfo for all known scopes for this optionable, in no particular order.
:rtype: set of :class:`pants.option.scope.ScopeInfo`
:raises: :class:`pants.subsystem.subsystem_client_mixin.SubsystemClientMixin.CycleException`
if a dependency cycle is detected.
"""
known_scope_infos = set()
optionables_path = OrderedSet() # To check for cycles at the Optionable level, ignoring scope.
def collect_scope_infos(optionable_cls, scoped_to, removal_version=None, removal_hint=None):
if optionable_cls in optionables_path:
raise cls.CycleException(list(optionables_path) + [optionable_cls])
optionables_path.add(optionable_cls)
scope = (optionable_cls.options_scope if scoped_to == GLOBAL_SCOPE
else optionable_cls.subscope(scoped_to))
scope_info = ScopeInfo(
scope,
optionable_cls.options_scope_category,
optionable_cls,
removal_version=removal_version,
removal_hint=removal_hint
)
if scope_info not in known_scope_infos:
known_scope_infos.add(scope_info)
for dep in scope_info.optionable_cls.subsystem_dependencies_iter():
# A subsystem always exists at its global scope (for the purpose of options
# registration and specification), even if in practice we only use it scoped to
# some other scope.
#
# NB: We do not apply deprecations to this implicit global copy of the scope, because if
# the intention was to deprecate the entire scope, that could be accomplished by
# deprecating all options in the scope.
collect_scope_infos(dep.subsystem_cls, GLOBAL_SCOPE)
if not dep.is_global():
collect_scope_infos(dep.subsystem_cls,
scope,
removal_version=dep.removal_version,
removal_hint=dep.removal_hint)
optionables_path.remove(scope_info.optionable_cls)
collect_scope_infos(cls, GLOBAL_SCOPE)
return known_scope_infos
|
apache-2.0
| 7,956,763,797,131,338,000
| 36.401198
| 108
| 0.686519
| false
| 4.087696
| false
| false
| false
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/operations/virtual_machine_extension_images_operations.py
|
1
|
10932
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class VirtualMachineExtensionImagesOperations(object):
"""VirtualMachineExtensionImagesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2017-03-30".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-03-30"
self.config = config
def get(
self, location, publisher_name, type, version, custom_headers=None, raw=False, **operation_config):
"""Gets a virtual machine extension image.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name:
:type publisher_name: str
:param type:
:type type: str
:param version:
:type version: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualMachineExtensionImage or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtensionImage or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions/{version}'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'type': self._serialize.url("type", type, 'str'),
'version': self._serialize.url("version", version, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineExtensionImage', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_types(
self, location, publisher_name, custom_headers=None, raw=False, **operation_config):
"""Gets a list of virtual machine extension image types.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name:
:type publisher_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtensionImage]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineExtensionImage]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_versions(
self, location, publisher_name, type, filter=None, top=None, orderby=None, custom_headers=None, raw=False, **operation_config):
"""Gets a list of virtual machine extension image versions.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name:
:type publisher_name: str
:param type:
:type type: str
:param filter: The filter to apply on the operation.
:type filter: str
:param top:
:type top: int
:param orderby:
:type orderby: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtensionImage]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'type': self._serialize.url("type", type, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineExtensionImage]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
mit
| -586,315,393,133,818,900
| 43.620408
| 181
| 0.644621
| false
| 4.440292
| true
| false
| false
|
kamailio/kamcli
|
kamcli/commands/cmd_db.py
|
1
|
28309
|
import os
import sys
import click
from sqlalchemy import create_engine
from sqlalchemy.sql import text
from sqlalchemy.exc import SQLAlchemyError
from kamcli.cli import pass_context
from kamcli.ioutils import ioutils_dbres_print
from kamcli.ioutils import ioutils_formats_list
from kamcli.dbutils import dbutils_exec_sqlfile
KDB_GROUP_BASIC = ["standard"]
KDB_GROUP_STANDARD = [
"acc",
"lcr",
"domain",
"group",
"permissions",
"registrar",
"usrloc",
"msilo",
"alias_db",
"uri_db",
"speeddial",
"avpops",
"auth_db",
"pdt",
"dialog",
"dispatcher",
"dialplan",
"topos",
]
KDB_GROUP_EXTRA = [
"imc",
"cpl",
"siptrace",
"domainpolicy",
"carrierroute",
"drouting",
"userblacklist",
"userblocklist",
"htable",
"purple",
"uac",
"pipelimit",
"mtree",
"sca",
"mohqueue",
"rtpproxy",
"rtpengine",
"secfilter",
]
KDB_GROUP_PRESENCE = ["presence", "rls"]
KDB_GROUP_UID = [
"uid_auth_db",
"uid_avp_db",
"uid_domain",
"uid_gflags",
"uid_uri_db",
]
@click.group(
"db", help="Raw database operations", short_help="Raw database operations"
)
@pass_context
def cli(ctx):
pass
@cli.command("query", short_help="Run SQL statement")
@click.option(
"oformat",
"--output-format",
"-F",
type=click.Choice(["raw", "json", "table", "dict"]),
default=None,
help="Format the output",
)
@click.option(
"ostyle",
"--output-style",
"-S",
default=None,
help="Style of the output (tabulate table format)",
)
@click.argument("query", metavar="<query>")
@pass_context
def db_query(ctx, oformat, ostyle, query):
e = create_engine(ctx.gconfig.get("db", "rwurl"))
res = e.execute(query.encode("ascii", "ignore").decode())
ioutils_dbres_print(ctx, oformat, ostyle, res)
@cli.command("connect", short_help="Launch db cli and connect to database")
@pass_context
def db_connect(ctx):
dbtype = ctx.gconfig.get("db", "type")
if dbtype.lower() == "mysql":
scmd = ("mysql -h {0} -u {1} -p{2} {3}").format(
ctx.gconfig.get("db", "host"),
ctx.gconfig.get("db", "rwuser"),
ctx.gconfig.get("db", "rwpassword"),
ctx.gconfig.get("db", "dbname"),
)
elif dbtype == "postgresql":
scmd = ('psql "postgresql://{0}:{1}@{2}/{3}"').format(
ctx.gconfig.get("db", "rwuser"),
ctx.gconfig.get("db", "rwpassword"),
ctx.gconfig.get("db", "host"),
ctx.gconfig.get("db", "dbname"),
)
elif dbtype == "sqlite":
scmd = ("sqlite3 {0} ").format(
ctx.gconfig.get("db", "dbpath"),
)
else:
ctx.log("unsupported database type [%s]", dbtype)
sys.exit()
os.system(scmd)
@cli.command("clirun", short_help="Run SQL statement via cli")
@click.argument("query", metavar="<query>")
@pass_context
def db_clirun(ctx, query):
dbtype = ctx.gconfig.get("db", "type")
if dbtype == "mysql":
scmd = ('mysql -h {0} -u {1} -p{2} -e "{3} ;" {4}').format(
ctx.gconfig.get("db", "host"),
ctx.gconfig.get("db", "rwuser"),
ctx.gconfig.get("db", "rwpassword"),
query,
ctx.gconfig.get("db", "dbname"),
)
elif dbtype == "postgresql":
scmd = ('psql "postgresql://{0}:{1}@{2}/{3}" -c "{4} ;"').format(
ctx.gconfig.get("db", "rwuser"),
ctx.gconfig.get("db", "rwpassword"),
ctx.gconfig.get("db", "host"),
ctx.gconfig.get("db", "dbname"),
query,
)
elif dbtype == "sqlite":
scmd = ('sqlite3 {0} "{1} "').format(
ctx.gconfig.get("db", "dbpath"),
query,
)
else:
ctx.log("unsupported database type [%s]", dbtype)
sys.exit()
os.system(scmd)
@cli.command("clishow", short_help="Show content of table via cli")
@click.argument("table", metavar="<table>")
@pass_context
def db_clishow(ctx, table):
dbtype = ctx.gconfig.get("db", "type")
if dbtype == "mysql":
scmd = (
'mysql -h {0} -u {1} -p{2} -e "select * from {3} ;" {4}'
).format(
ctx.gconfig.get("db", "host"),
ctx.gconfig.get("db", "rwuser"),
ctx.gconfig.get("db", "rwpassword"),
table,
ctx.gconfig.get("db", "dbname"),
)
elif dbtype == "postgresql":
scmd = (
'psql "postgresql://{0}:{1}@{2}/{3}" -c "select * from {4} ;"'
).format(
ctx.gconfig.get("db", "rwuser"),
ctx.gconfig.get("db", "rwpassword"),
ctx.gconfig.get("db", "host"),
ctx.gconfig.get("db", "dbname"),
table,
)
elif dbtype == "sqlite":
scmd = ('sqlite3 {0} "select * from {1} "').format(
ctx.gconfig.get("db", "dbpath"),
table,
)
else:
ctx.log("unsupported database type [%s]", dbtype)
sys.exit()
os.system(scmd)
@cli.command("clishowg", short_help="Show content of table via cli")
@click.argument("table", metavar="<table>")
@pass_context
def db_clishowg(ctx, table):
dbtype = ctx.gconfig.get("db", "type")
if dbtype == "mysql":
scmd = (
r'mysql -h {0} -u {1} -p{2} -e "select * from {3} \G" {4}'
).format(
ctx.gconfig.get("db", "host"),
ctx.gconfig.get("db", "rwuser"),
ctx.gconfig.get("db", "rwpassword"),
table,
ctx.gconfig.get("db", "dbname"),
)
elif dbtype == "postgresql":
scmd = (
'psql "postgresql://{0}:{1}@{2}/{3}" -c "\\x" -c "select * from {4} ;" -c "\\x"'
).format(
ctx.gconfig.get("db", "rwuser"),
ctx.gconfig.get("db", "rwpassword"),
ctx.gconfig.get("db", "host"),
ctx.gconfig.get("db", "dbname"),
table,
)
elif dbtype == "sqlite":
scmd = ('sqlite3 -line {0} "select * from {1} "').format(
ctx.gconfig.get("db", "dbpath"),
table,
)
else:
ctx.log("unsupported database type [%s]", dbtype)
sys.exit()
os.system(scmd)
@cli.command("show", short_help="Show content of a table")
@click.option(
"oformat",
"--output-format",
"-F",
type=click.Choice(ioutils_formats_list),
default=None,
help="Format the output",
)
@click.option(
"ostyle",
"--output-style",
"-S",
default=None,
help="Style of the output (tabulate table format)",
)
@click.argument("table", metavar="<table>")
@pass_context
def db_show(ctx, oformat, ostyle, table):
ctx.vlog("Content of database table [%s]", table)
e = create_engine(ctx.gconfig.get("db", "rwurl"))
res = e.execute("select * from {0}".format(table))
ioutils_dbres_print(ctx, oformat, ostyle, res)
@cli.command(
"showcreate", short_help="Show create statement of of a database table"
)
@click.option(
"oformat",
"--output-format",
"-F",
type=click.Choice(ioutils_formats_list),
default=None,
help="Format the output",
)
@click.option(
"ostyle",
"--output-style",
"-S",
default=None,
help="Style of the output (tabulate table format)",
)
@click.argument("table", metavar="<table>")
@pass_context
def db_showcreate(ctx, oformat, ostyle, table):
ctx.vlog("Show create of database table [%s]", table)
dbtype = ctx.gconfig.get("db", "type")
if dbtype == "mysql":
e = create_engine(ctx.gconfig.get("db", "rwurl"))
res = e.execute("show create table {0}".format(table))
ioutils_dbres_print(ctx, oformat, ostyle, res)
elif dbtype == "postgresql":
scmd = ('psql "postgresql://{0}:{1}@{2}/{3}" -c "\\d {4} "').format(
ctx.gconfig.get("db", "rwuser"),
ctx.gconfig.get("db", "rwpassword"),
ctx.gconfig.get("db", "host"),
ctx.gconfig.get("db", "dbname"),
table,
)
os.system(scmd)
elif dbtype == "sqlite":
scmd = ('sqlite3 {0} ".schema {1} "').format(
ctx.gconfig.get("db", "dbpath"),
table,
)
os.system(scmd)
else:
ctx.log("unsupported database type [%s]", dbtype)
@cli.command("runfile", short_help="Run SQL statements in a file")
@click.argument("fname", metavar="<fname>")
@pass_context
def db_runfile(ctx, fname):
"""Run SQL statements in a file
\b
Parameters:
<fname> - name to the file with the SQL statements
"""
ctx.vlog("Run statements in the file [%s]", fname)
e = create_engine(ctx.gconfig.get("db", "rwurl"))
dbutils_exec_sqlfile(ctx, e, fname)
def db_create_mysql_host_users(
ctx,
e,
nousers,
nogrants,
dbname,
dbhost,
dbrwuser,
dbrwpassword,
dbrouser,
dbropassword,
):
if not nousers:
e.execute(
"CREATE USER {0!r}@{1!r} IDENTIFIED BY {2!r}".format(
dbrwuser, dbhost, dbrwpassword
)
)
if not nogrants:
e.execute(
"GRANT ALL PRIVILEGES ON {0}.* TO {1!r}@{2!r}".format(
dbname, dbrwuser, dbhost
)
)
if not nousers:
e.execute(
"CREATE USER {0!r}@{1!r} IDENTIFIED BY {2!r}".format(
dbrouser, dbhost, dbropassword
)
)
if not nogrants:
e.execute(
"GRANT SELECT PRIVILEGES ON {0}.* TO {1!r}@{2!r}".format(
dbname, dbrouser, dbhost
)
)
def db_create_mysql_users(ctx, e, dbname, nousers, nogrants):
dbhost = ctx.gconfig.get("db", "host")
dbrwuser = ctx.gconfig.get("db", "rwuser")
dbrwpassword = ctx.gconfig.get("db", "rwpassword")
dbrouser = ctx.gconfig.get("db", "rouser")
dbropassword = ctx.gconfig.get("db", "ropassword")
dbaccesshost = ctx.gconfig.get("db", "accesshost")
db_create_mysql_host_users(
ctx, e, dbname, dbhost, dbrwuser, dbrwpassword, dbrouser, dbropassword
)
if dbhost != "localhost":
db_create_mysql_host_users(
ctx,
e,
nousers,
nogrants,
dbname,
"localhost",
dbrwuser,
dbrwpassword,
dbrouser,
dbropassword,
)
if len(dbaccesshost) > 0:
db_create_mysql_host_users(
ctx,
e,
nousers,
nogrants,
dbname,
dbaccesshost,
dbrwuser,
dbrwpassword,
dbrouser,
dbropassword,
)
def db_create_sql_group(ctx, e, dirpath, dbgroup):
for t in dbgroup:
fname = dirpath + "/" + t + "-create.sql"
dbutils_exec_sqlfile(ctx, e, fname)
def db_create_sql_table_groups(ctx, e, ldirectory, alltables):
db_create_sql_group(ctx, e, ldirectory, KDB_GROUP_BASIC)
db_create_sql_group(ctx, e, ldirectory, KDB_GROUP_STANDARD)
option = "y"
if not alltables:
print("Do you want to create extra tables? (y/n):", end=" ")
option = input()
if option == "y":
db_create_sql_group(ctx, e, ldirectory, KDB_GROUP_EXTRA)
if not alltables:
print("Do you want to create presence tables? (y/n):", end=" ")
option = input()
if option == "y":
db_create_sql_group(ctx, e, ldirectory, KDB_GROUP_PRESENCE)
if not alltables:
print("Do you want to create uid tables? (y/n):", end=" ")
option = input()
if option == "y":
db_create_sql_group(ctx, e, ldirectory, KDB_GROUP_UID)
def db_create_mysql(ctx, ldbname, ldirectory, nousers, nogrants, alltables):
e = create_engine(ctx.gconfig.get("db", "adminurl"))
e.execute("create database {0}".format(ldbname))
db_create_mysql_users(ctx, e, ldbname, nousers, nogrants)
e.execute("use {0}".format(ldbname))
db_create_sql_table_groups(ctx, e, ldirectory, alltables)
def db_create_postgresql(
ctx, ldbname, ldirectory, nousers, nogrants, nofunctions, alltables
):
scmd = (
'psql "postgresql://{0}:{1}@{2}" -c "create database {3} "'
).format(
ctx.gconfig.get("db", "adminuser"),
ctx.gconfig.get("db", "adminpassword"),
ctx.gconfig.get("db", "host"),
ldbname,
)
os.system(scmd)
e = create_engine(ctx.gconfig.get("db", "adminurl"))
if not nogrants:
e.execute(
"CREATE USER {0} WITH PASSWORD '{1}';".format(
ctx.gconfig.get("db", "rwuser"),
ctx.gconfig.get("db", "rwpassword"),
)
)
e.execute(
"GRANT CONNECT ON DATABASE {0} TO {1};".format(
ldbname,
ctx.gconfig.get("db", "rwuser"),
)
)
if ctx.gconfig.get("db", "rwuser") != ctx.gconfig.get("db", "rouser"):
e.execute(
"CREATE USER {0} WITH PASSWORD '{1}';".format(
ctx.gconfig.get("db", "rouser"),
ctx.gconfig.get("db", "ropassword"),
)
)
e.execute(
"GRANT CONNECT ON DATABASE {0} TO {1};".format(
ldbname,
ctx.gconfig.get("db", "rouser"),
)
)
e.dispose()
e = create_engine(
"{0}+{1}://{2}:{3}@{4}/{5}".format(
ctx.gconfig.get("db", "type"),
ctx.gconfig.get("db", "driver"),
ctx.gconfig.get("db", "rwuser"),
ctx.gconfig.get("db", "rwpassword"),
ctx.gconfig.get("db", "host"),
ldbname,
)
)
if not nofunctions:
e.execute(
"CREATE FUNCTION concat(text, text) RETURNS text AS 'SELECT $1 || $2;' LANGUAGE 'sql';"
)
e.execute(
"CREATE FUNCTION rand() RETURNS double precision AS 'SELECT random();' LANGUAGE 'sql';"
)
db_create_sql_table_groups(ctx, e, ldirectory, alltables)
e.dispose()
e = create_engine(ctx.gconfig.get("db", "adminurl"))
if not nogrants:
e.execute(
"GRANT ALL PRIVILEGES ON DATABASE {0} TO {1};".format(
ldbname,
ctx.gconfig.get("db", "rwuser"),
)
)
if ctx.gconfig.get("db", "rwuser") != ctx.gconfig.get("db", "rouser"):
e.execute(
"GRANT SELECT ON DATABASE {0} TO {1};".format(
ldbname,
ctx.gconfig.get("db", "rouser"),
)
)
def db_create_sqlite(ctx, ldbname, ldirectory, alltables):
e = create_engine(
"{0}+{1}:///{2}".format(
ctx.gconfig.get("db", "type"),
ctx.gconfig.get("db", "driver"),
ldbname,
)
)
db_create_sql_table_groups(ctx, e, ldirectory, alltables)
@cli.command("create", short_help="Create database structure")
@click.option(
"dbname",
"--dbname",
"-d",
default="",
help="Database name or path to the folder for database",
)
@click.option(
"scriptsdirectory",
"--scripts-directory",
"-s",
default="",
help="Path to the directory with db schema files",
)
@click.option(
"nousers",
"--no-users",
"-U",
is_flag=True,
help="Do not create users",
)
@click.option(
"nogrants",
"--no-grants",
"-G",
is_flag=True,
help="Do not grant privileges",
)
@click.option(
"nofunctions",
"--no-functions",
"-F",
is_flag=True,
help="Do not create additional SQL functions",
)
@click.option(
"alltables",
"--all-tables",
"-a",
is_flag=True,
help="Create all tables without asking for confirmation",
)
@pass_context
def db_create(
ctx, dbname, scriptsdirectory, nousers, nogrants, nofunctions, alltables
):
"""Create database structure
\b
"""
dbtype = ctx.gconfig.get("db", "type")
if dbtype == "sqlite":
ldbname = ctx.gconfig.get("db", "dbpath")
else:
ldbname = ctx.gconfig.get("db", "dbname")
if len(dbname) > 0:
ldbname = dbname
ldirectory = ctx.gconfig.get("db", "scriptsdirectory")
if len(scriptsdirectory) > 0:
ldirectory = scriptsdirectory
ctx.vlog("Creating database [%s] structure", ldbname)
if dbtype == "mysql":
db_create_mysql(ctx, ldbname, ldirectory, nousers, nogrants, alltables)
return
elif dbtype == "postgresql":
db_create_postgresql(
ctx, ldbname, ldirectory, nousers, nogrants, nofunctions, alltables
)
return
elif dbtype == "sqlite":
db_create_sqlite(ctx, ldbname, ldirectory, alltables)
return
else:
ctx.vlog("Database type [%s] not supported yet", dbtype)
return
@cli.command("create-dbonly", short_help="Create database only")
@click.option(
"dbname",
"--dbname",
"-d",
default="",
help="Database name or path to the folder for database",
)
@pass_context
def db_create_dbonly(ctx, dbname):
"""Create database only
\b
"""
ctx.vlog("Creating only database [%s]", dbname)
dbtype = ctx.gconfig.get("db", "type")
if dbtype == "sqlite":
ldbname = ctx.gconfig.get("db", "dbpath")
else:
ldbname = ctx.gconfig.get("db", "dbname")
if len(dbname) > 0:
ldbname = dbname
if dbtype == "mysql":
e = create_engine(ctx.gconfig.get("db", "adminurl"))
e.execute("create database {0}".format(ldbname))
elif dbtype == "postgresql":
scmd = (
'psql "postgresql://{0}:{1}@{2}" -c "create database {3} "'
).format(
ctx.gconfig.get("db", "adminuser"),
ctx.gconfig.get("db", "adminpassword"),
ctx.gconfig.get("db", "host"),
ldbname,
)
os.system(scmd)
elif dbtype == "sqlite":
ctx.vlog("Database file for type [%s] is created on first use", dbtype)
else:
ctx.vlog("Database type [%s] not supported yet", dbtype)
return
@cli.command("drop", short_help="Drop database")
@click.option(
"dbname",
"--dbname",
"-d",
default="",
help="Database name or path to the database",
)
@click.option(
"yes",
"--yes",
"-y",
is_flag=True,
help="Do not ask for confirmation",
)
@pass_context
def db_drop(ctx, dbname, yes):
"""Drop database
\b
"""
dbtype = ctx.gconfig.get("db", "type")
if dbtype == "sqlite":
ldbname = ctx.gconfig.get("db", "dbpath")
else:
ldbname = ctx.gconfig.get("db", "dbname")
if len(dbname) > 0:
ldbname = dbname
if not yes:
print("Dropping database. Are you sure? (y/n):", end=" ")
option = input()
if option != "y":
ctx.vlog("Skip dropping database [%s]", ldbname)
return
ctx.vlog("Dropping database [%s]", ldbname)
if dbtype == "mysql":
e = create_engine(ctx.gconfig.get("db", "adminurl"))
e.execute("drop database {0}".format(ldbname))
elif dbtype == "postgresql":
scmd = (
'psql "postgresql://{0}:{1}@{2}" -c "drop database {3} "'
).format(
ctx.gconfig.get("db", "adminuser"),
ctx.gconfig.get("db", "adminpassword"),
ctx.gconfig.get("db", "host"),
ldbname,
)
os.system(scmd)
elif dbtype == "sqlite":
if not os.path.isfile(ldbname):
ctx.vlog("Database file [%s] does not exist", ldbname)
else:
os.remove(ldbname)
return
else:
ctx.vlog("Database type [%s] not supported yet", dbtype)
return
def db_create_tables_list(ctx, directory, group):
dbtype = ctx.gconfig.get("db", "type")
if dbtype != "mysql":
ctx.vlog("Database type [%s] not supported yet", dbtype)
return
ldirectory = ""
if len(directory) > 0:
ldirectory = directory
e = create_engine(ctx.gconfig.get("db", "rwurl"))
db_create_sql_group(ctx, e, ldirectory, group)
@cli.command("create-tables-basic", short_help="Create basic database tables")
@click.option(
"scriptsdirectory",
"--scripts-directory",
"-s",
default="",
help="Path to the directory with db schema files",
)
@pass_context
def db_create_tables_basic(ctx, scriptsdirectory):
"""Create basic database tables
\b
"""
ldirectory = ctx.gconfig.get("db", "scriptsdirectory")
if len(scriptsdirectory) > 0:
ldirectory = scriptsdirectory
db_create_tables_list(ctx, ldirectory, KDB_GROUP_BASIC)
@cli.command(
"create-tables-standard", short_help="Create standard database tables"
)
@click.option(
"scriptsdirectory",
"--scripts-directory",
"-s",
default="",
help="Path to the directory with db schema files",
)
@pass_context
def db_create_tables_standard(ctx, scriptsdirectory):
"""Create standard database tables
\b
"""
ldirectory = ctx.gconfig.get("db", "scriptsdirectory")
if len(scriptsdirectory) > 0:
ldirectory = scriptsdirectory
db_create_tables_list(ctx, ldirectory, KDB_GROUP_STANDARD)
@cli.command("create-tables-extra", short_help="Create extra database tables")
@click.option(
"scriptsdirectory",
"--scripts-directory",
"-s",
default="",
help="Path to the directory with db schema files",
)
@pass_context
def db_create_tables_extra(ctx, scriptsdirectory):
"""Create extra database tables
\b
"""
ldirectory = ctx.gconfig.get("db", "scriptsdirectory")
if len(scriptsdirectory) > 0:
ldirectory = scriptsdirectory
db_create_tables_list(ctx, ldirectory, KDB_GROUP_EXTRA)
@cli.command(
"create-tables-presence", short_help="Create presence database tables"
)
@click.option(
"scriptsdirectory",
"--scripts-directory",
"-s",
default="",
help="Path to the directory with db schema files",
)
@pass_context
def db_create_tables_presence(ctx, scriptsdirectory):
"""Create presence database tables
\b
"""
ldirectory = ctx.gconfig.get("db", "scriptsdirectory")
if len(scriptsdirectory) > 0:
ldirectory = scriptsdirectory
db_create_tables_list(ctx, ldirectory, KDB_GROUP_PRESENCE)
@cli.command("create-tables-uid", short_help="Create uid database tables")
@click.option(
"scriptsdirectory",
"--scripts-directory",
"-s",
default="",
help="Path to the directory with db schema files",
)
@pass_context
def db_create_tables_uid(ctx, scriptsdirectory):
"""Create uid database tables
\b
"""
ldirectory = ctx.gconfig.get("db", "scriptsdirectory")
if len(scriptsdirectory) > 0:
ldirectory = scriptsdirectory
db_create_tables_list(ctx, ldirectory, KDB_GROUP_UID)
@cli.command(
"create-tables-group",
short_help="Create the group of database tables for a specific extension",
)
@click.option(
"scriptsdirectory",
"--scripts-directory",
"-s",
default="",
help="Path to the directory with db schema files",
)
@click.argument("gname", metavar="<gname>")
@pass_context
def db_create_tables_group(ctx, scriptsdirectory, gname):
"""Create the group of database tables for a specific extension
\b
Parameters:
<gname> - the name of the group of tables
"""
ldirectory = ctx.gconfig.get("db", "scriptsdirectory")
if len(scriptsdirectory) > 0:
ldirectory = scriptsdirectory
e = create_engine(ctx.gconfig.get("db", "rwurl"))
fpath = ldirectory + "/" + gname + "-create.sql"
dbutils_exec_sqlfile(ctx, e, fpath)
@cli.command("grant", short_help="Create db access users and grant privileges")
@click.option(
"dbname",
"--dbname",
"-d",
default="",
help="Database name",
)
@pass_context
def db_grant(ctx, dbname):
"""Create db access users and grant privileges
\b
"""
dbtype = ctx.gconfig.get("db", "type")
if dbtype != "mysql":
ctx.vlog("Database type [%s] not supported yet", dbtype)
return
ldbname = ctx.gconfig.get("db", "dbname")
if len(dbname) > 0:
ldbname = dbname
ctx.vlog("Creating only database [%s]", ldbname)
e = create_engine(ctx.gconfig.get("db", "adminurl"))
db_create_mysql_users(ctx, e, ldbname, False, False)
def db_revoke_host_users(ctx, e, dbname, dbhost, dbrwuser, dbrouser):
e.execute(
"REVOKE ALL PRIVILEGES ON {0}.* TO {1!r}@{2!r}".format(
dbname, dbrwuser, dbhost
)
)
e.execute("DROP USER {0!r}@{1!r}".format(dbrwuser, dbhost))
e.execute(
"REVOKE SELECT PRIVILEGES ON {0}.* TO {1!r}@{2!r}".format(
dbname, dbrouser, dbhost
)
)
e.execute("DROP USER {0!r}@{1!r}".format(dbrouser, dbhost))
def db_revoke_users(ctx, e, dbname):
dbhost = ctx.gconfig.get("db", "host")
dbrwuser = ctx.gconfig.get("db", "rwuser")
dbrouser = ctx.gconfig.get("db", "rouser")
dbaccesshost = ctx.gconfig.get("db", "accesshost")
db_revoke_host_users(ctx, e, dbname, dbhost, dbrwuser, dbrouser)
if dbhost != "localhost":
db_revoke_host_users(
ctx,
e,
dbname,
"localhost",
dbrwuser,
dbrouser,
)
if len(dbaccesshost) > 0:
db_revoke_host_users(
ctx,
e,
dbname,
dbaccesshost,
dbrwuser,
dbrouser,
)
@cli.command("revoke", short_help="Revoke db access privileges")
@click.option(
"dbname",
"--dbname",
"-d",
default="",
help="Database name",
)
@pass_context
def db_revoke(ctx, dbname):
"""Revoke db access privileges
\b
"""
dbtype = ctx.gconfig.get("db", "type")
if dbtype != "mysql":
ctx.vlog("Database type [%s] not supported yet", dbtype)
return
ldbname = ctx.gconfig.get("db", "dbname")
if len(dbname) > 0:
ldbname = dbname
ctx.vlog("Revoke access to database [%s]", ldbname)
e = create_engine(ctx.gconfig.get("db", "adminurl"))
db_revoke_users(ctx, e, ldbname)
@cli.command(
"version-set", short_help="Set the version number for a table structure"
)
@click.option(
"vertable",
"--version-table",
default="version",
help="Name of the table with version records",
)
@click.argument("table", metavar="<table>")
@click.argument("version", metavar="<version>", type=int)
@pass_context
def db_version_set(ctx, vertable, table, version):
"""Set the version number for a table structure
\b
Parameters:
<table> - Name of the table to set the version for
<version> - Version number
"""
e = create_engine(ctx.gconfig.get("db", "rwurl"))
e.execute(
"delete from {0} where table_name={1!r}".format(
vertable.encode("ascii", "ignore").decode(),
table.encode("ascii", "ignore").decode(),
)
)
e.execute(
"insert into {0} (table_name, table_version) values ({1!r}, {2})".format(
vertable.encode("ascii", "ignore").decode(),
table.encode("ascii", "ignore").decode(),
version,
)
)
@cli.command(
"version-get", short_help="Get the version number for a table structure"
)
@click.option(
"vertable",
"--version-table",
default="version",
help="Name of the table with version records",
)
@click.option(
"oformat",
"--output-format",
"-F",
type=click.Choice(["raw", "json", "table", "dict"]),
default=None,
help="Format the output",
)
@click.option(
"ostyle",
"--output-style",
"-S",
default=None,
help="Style of the output (tabulate table format)",
)
@click.argument("table", metavar="<table>")
@pass_context
def db_version_get(ctx, vertable, oformat, ostyle, table):
"""Get the version number for a table structure
\b
Parameters:
<table> - Name of the table to get the version for
"""
e = create_engine(ctx.gconfig.get("db", "rwurl"))
res = e.execute(
"select * from {0} where table_name={1!r}".format(
vertable.encode("ascii", "ignore").decode(),
table.encode("ascii", "ignore").decode(),
)
)
ioutils_dbres_print(ctx, oformat, ostyle, res)
|
gpl-2.0
| 5,029,327,672,222,936,000
| 26.863189
| 99
| 0.560528
| false
| 3.357329
| true
| false
| false
|
aidin36/beneath-a-binary-sky
|
src/actions/water_action.py
|
1
|
2052
|
# This file is part of Beneath a Binary Sky.
# Copyright (C) 2016, Aidin Gharibnavaz <aidin@aidinhut.com>
#
# Beneath a Binary Sky is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Beneath a Binary Sky is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Beneath a Binary Sky. If not, see
# <http://www.gnu.org/licenses/>.
import time
from actions.action import Action
from actions.exceptions import InvalidArgumentsError, RobotHaveNoWaterError
from world.world import World
from database.exceptions import LockAlreadyAquiredError
class WaterAction(Action):
def __init__(self):
super().__init__()
self._world = World()
def do_action(self, robot, args):
'''Waters the square robot stands on.
@param robot: Instance of `objects.robot.Robot'.
'''
if len(args) != 1:
raise InvalidArgumentsError("`water' action takes no arguments.")
if not robot.get_has_water():
raise RobotHaveNoWaterError("Robot does not carry water.")
try:
square = self._world.get_square(robot.get_location(), for_update=True)
except LockAlreadyAquiredError:
# Waiting a little, and trying one more time.
time.sleep(0.02)
square = self._world.get_square(robot.get_location(), for_update=True)
# Note: we don't raise an exception if there's no plant. A robot can waste its water.
plant = square.get_plant()
if plant is not None:
plant.set_water_level(100)
robot.set_honor(robot.get_honor() + 1)
robot.set_has_water(False)
|
gpl-3.0
| 8,472,158,464,183,048,000
| 35
| 93
| 0.679825
| false
| 3.842697
| false
| false
| false
|
mdsalman729/flexpret_project
|
emulator/concurrit-poplsyntax/concurrit-poplsyntax/bench/pfscan/inputs/in2/config/getpthreadfunctions.py
|
1
|
1909
|
##
# getpthreadfunctions.py - outputs the pthread man page to mapthread.txt
# parses the latter, creates a dictionary with pairs
# (functionname, list of function args where last element is result type)
# marshals dictionary to pthreaddict file
#
# Author - Christos Stergiou (chster@eecs.berkeley.edu)
#
import os,re,marshal
os.system('man pthread | col -b > manpthread.txt')
filemp = open('manpthread.txt')
filedict = open('pthreaddict','w')
try:
pfuncs = dict()
previousmatch = False
funcargtypesstr = ''
funcname = ''
funcrettype = ''
for line in filemp:
line = line.rstrip('\n')
funcargtypeslist = []
if previousmatch:
previousmatch = False
funcargtypesstr = funcargtypesstr + ' ' + line.strip()[0:-2]
else:
#matchobj = re.search('[\t ]*[([a-zA-Z0-9_]+)[\t ]+([a-zA-Z0-9_]+)\(([a-z]+.*$)', line)
matchobj = re.search('[\t ]*([a-zA-Z0-9_]+( \*)?)[\t ]*([a-zA-Z0-9_]+)\(([a-z]+.*$)', line)
if matchobj:
funcname = matchobj.group(3)
funcrettype = matchobj.group(1)
funcargtypesstr = matchobj.group(4);
if not re.search(';$', matchobj.group(4)):
# function arguments continue to next line
previousmatch = True
continue
else:
# remove ');' from end of line
funcargtypesstr = funcargtypesstr[0:-2]
if matchobj or previousmatch:
funcargtypeslist = re.split(', ', funcargtypesstr)
funcargtypeslist.reverse()
funcargtypeslist.append(funcrettype)
funcargtypeslist.reverse()
print funcname,"->",funcargtypeslist
pfuncs[funcname] = funcargtypeslist
finally:
marshal.dump(pfuncs,filedict)
filemp.close()
filedict.close()
|
bsd-3-clause
| -8,134,398,863,236,522,000
| 33.709091
| 103
| 0.566789
| false
| 3.735812
| false
| false
| false
|
henaras/sahara
|
sahara/service/volumes.py
|
1
|
8618
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from sahara import conductor as c
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LE
from sahara.utils import cluster_progress_ops as cpo
from sahara.utils.openstack import cinder
from sahara.utils.openstack import nova
from sahara.utils import poll_utils
conductor = c.API
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('api_version', 'sahara.utils.openstack.cinder',
group='cinder')
def _count_instances_to_attach(instances):
result = 0
for instance in instances:
if instance.node_group.volumes_per_node > 0:
result += 1
return result
def _count_volumes_to_mount(instances):
return sum([inst.node_group.volumes_per_node for inst in instances])
def attach_to_instances(instances):
instances_to_attach = _count_instances_to_attach(instances)
if instances_to_attach == 0:
return
cpo.add_provisioning_step(
instances[0].cluster_id, _("Attach volumes to instances"),
instances_to_attach)
with context.ThreadGroup() as tg:
for instance in instances:
if instance.node_group.volumes_per_node > 0:
with context.set_current_instance_id(instance.instance_id):
tg.spawn(
'attach-volumes-for-instance-%s'
% instance.instance_name, _attach_volumes_to_node,
instance.node_group, instance)
@poll_utils.poll_status(
'await_attach_volumes', _("Await for attaching volumes to instances"),
sleep=2)
def _await_attach_volumes(instance, devices):
return _count_attached_devices(instance, devices) == len(devices)
@cpo.event_wrapper(mark_successful_on_exit=True)
def _attach_volumes_to_node(node_group, instance):
ctx = context.ctx()
size = node_group.volumes_size
volume_type = node_group.volume_type
devices = []
for idx in range(1, node_group.volumes_per_node + 1):
display_name = "volume_" + instance.instance_name + "_" + str(idx)
device = _create_attach_volume(
ctx, instance, size, volume_type,
node_group.volume_local_to_instance, display_name,
node_group.volumes_availability_zone)
devices.append(device)
LOG.debug("Attached volume {device} to instance".format(device=device))
_await_attach_volumes(instance, devices)
paths = instance.node_group.storage_paths()
for idx in range(0, instance.node_group.volumes_per_node):
LOG.debug("Mounting volume {volume} to instance"
.format(volume=devices[idx]))
_mount_volume(instance, devices[idx], paths[idx])
LOG.debug("Mounted volume to instance")
@poll_utils.poll_status(
'volume_available_timeout', _("Await for volume become available"),
sleep=1)
def _await_available(volume):
volume = cinder.get_volume(volume.id)
if volume.status == 'error':
raise ex.SystemError(_("Volume %s has error status") % volume.id)
return volume.status == 'available'
def _create_attach_volume(ctx, instance, size, volume_type,
volume_local_to_instance, name=None,
availability_zone=None):
if CONF.cinder.api_version == 1:
kwargs = {'size': size, 'display_name': name}
else:
kwargs = {'size': size, 'name': name}
kwargs['volume_type'] = volume_type
if availability_zone is not None:
kwargs['availability_zone'] = availability_zone
if volume_local_to_instance:
kwargs['scheduler_hints'] = {'local_to_instance': instance.instance_id}
volume = cinder.client().volumes.create(**kwargs)
conductor.append_volume(ctx, instance, volume.id)
_await_available(volume)
resp = nova.client().volumes.create_server_volume(
instance.instance_id, volume.id, None)
return resp.device
def _count_attached_devices(instance, devices):
code, part_info = instance.remote().execute_command('cat /proc/partitions')
count = 0
for line in part_info.split('\n')[1:]:
tokens = line.split()
if len(tokens) > 3:
dev = '/dev/' + tokens[3]
if dev in devices:
count += 1
return count
def mount_to_instances(instances):
if len(instances) == 0:
return
cpo.add_provisioning_step(
instances[0].cluster_id,
_("Mount volumes to instances"), _count_volumes_to_mount(instances))
with context.ThreadGroup() as tg:
for instance in instances:
with context.set_current_instance_id(instance.instance_id):
devices = _find_instance_volume_devices(instance)
# Since formating can take several minutes (for large disks)
# and can be done in parallel, launch one thread per disk.
for idx in range(0, instance.node_group.volumes_per_node):
tg.spawn(
'mount-volume-%d-to-node-%s' %
(idx, instance.instance_name),
_mount_volume_to_node, instance, idx, devices[idx])
def _find_instance_volume_devices(instance):
volumes = nova.client().volumes.get_server_volumes(instance.instance_id)
devices = [volume.device for volume in volumes]
return devices
@cpo.event_wrapper(mark_successful_on_exit=True)
def _mount_volume_to_node(instance, idx, device):
LOG.debug("Mounting volume {device} to instance".format(device=device))
mount_point = instance.node_group.storage_paths()[idx]
_mount_volume(instance, device, mount_point)
LOG.debug("Mounted volume to instance")
def _mount_volume(instance, device_path, mount_point):
with instance.remote() as r:
try:
# Mount volumes with better performance options:
# - reduce number of blocks reserved for root to 1%
# - use 'dir_index' for faster directory listings
# - use 'extents' to work faster with large files
# - disable journaling
# - enable write-back
# - do not store access time
fs_opts = '-m 1 -O dir_index,extents,^has_journal'
mount_opts = '-o data=writeback,noatime,nodiratime'
r.execute_command('sudo mkdir -p %s' % mount_point)
r.execute_command('sudo mkfs.ext4 %s %s' % (fs_opts, device_path))
r.execute_command('sudo mount %s %s %s' %
(mount_opts, device_path, mount_point))
except Exception:
LOG.error(_LE("Error mounting volume to instance"))
raise
def detach_from_instance(instance):
for volume_id in instance.volumes:
_detach_volume(instance, volume_id)
_delete_volume(volume_id)
@poll_utils.poll_status(
'detach_volume_timeout', _("Await for volume become detached"), sleep=2)
def _await_detach(volume_id):
volume = cinder.get_volume(volume_id)
if volume.status not in ['available', 'error']:
return False
return True
def _detach_volume(instance, volume_id):
volume = cinder.get_volume(volume_id)
try:
LOG.debug("Detaching volume {id} from instance".format(id=volume_id))
nova.client().volumes.delete_server_volume(instance.instance_id,
volume_id)
except Exception:
LOG.error(_LE("Can't detach volume {id}").format(id=volume.id))
detach_timeout = CONF.timeouts.detach_volume_timeout
LOG.debug("Waiting {timeout} seconds to detach {id} volume".format(
timeout=detach_timeout, id=volume_id))
_await_detach(volume_id)
def _delete_volume(volume_id):
LOG.debug("Deleting volume {volume}".format(volume=volume_id))
volume = cinder.get_volume(volume_id)
try:
volume.delete()
except Exception:
LOG.error(_LE("Can't delete volume {volume}").format(
volume=volume.id))
|
apache-2.0
| -8,491,081,074,740,166,000
| 34.465021
| 79
| 0.640752
| false
| 3.840463
| false
| false
| false
|
niboshi/chainer
|
chainerx/_docs/routines.py
|
1
|
127367
|
import chainerx
from chainerx import _docs
def set_docs():
_docs_creation()
_docs_evaluation()
_docs_indexing()
_docs_linalg()
_docs_logic()
_docs_loss()
_docs_manipulation()
_docs_math()
_docs_sorting()
_docs_statistics()
_docs_connection()
_docs_normalization()
_docs_pooling()
_docs_rnn()
def _docs_creation():
_docs.set_doc(
chainerx.empty,
"""empty(shape, dtype, device=None)
Returns an array without initializing the elements.
Args:
shape (tuple of ints): Shape of the array.
dtype: Data type of the array.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
:class:`~chainerx.ndarray`: New array with elements not initialized.
.. seealso:: :func:`numpy.empty`
""")
_docs.set_doc(
chainerx.empty_like,
"""empty_like(a, device=None)
Returns a new array with same shape and dtype of a given array.
Args:
a (~chainerx.ndarray): Prototype array.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
:class:`~chainerx.ndarray`: New array with same shape and dtype as ``a`` \
with elements not initialized.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the prototype array.
.. seealso:: :func:`numpy.empty_like`
""")
_docs.set_doc(
chainerx.eye,
"""eye(N, M=None, k=0, dtype=float64, device=None)
Returns a 2-D array with ones on the diagonals and zeros elsewhere.
Args:
N (int): Number of rows.
M (int): Number of columns. M == N by default.
k (int): Index of the diagonal. Zero indicates the main diagonal,
a positive index an upper diagonal, and a negative index a lower
diagonal.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: A 2-D array with given diagonals filled with ones and
zeros elsewhere.
.. seealso:: :func:`numpy.eye`
""")
_docs.set_doc(
chainerx.tri,
"""tri(N, M=None, k=0, dtype=float32, device=None)
Returns a 2-D array with ones at and below the given diagonal
and zeros elsewhere.
Args:
N (int): Number of rows.
M (int): Number of columns. M == N by default.
k (int): Index of the diagonal. Zero indicates the main diagonal,
a positive index an upper diagonal, and a negative index a lower
diagonal.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: A 2-D array with given diagonals filled ones at and
below the given diagonal and zeros elsewhere.
.. seealso:: :func:`numpy.tri`
""")
_docs.set_doc(
chainerx.tril,
"""tril(m, k=0)
Lower triangle of an array.
Returns a copy of an array with elements above the k-th diagonal zeroed.
Args:
m (~chainerx.ndarray): Input array.
k (int): Index of the diagonal. Zero indicates the main diagonal,
a positive index an upper diagonal, and a negative index a lower
diagonal.
Returns:
~chainerx.ndarray: Lower triangle of ``m``.
.. seealso:: :func:`numpy.tril`
""")
_docs.set_doc(
chainerx.triu,
"""triu(m, k=0)
Upper triangle of an array.
Returns a copy of an array with elements below the k-th diagonal zeroed.
Args:
m (~chainerx.ndarray): Input array.
k (int): Index of the diagonal. Zero indicates the main diagonal,
a positive index an upper diagonal, and a negative index a lower
diagonal.
Returns:
~chainerx.ndarray: Upper triangle of ``m``.
.. seealso:: :func:`numpy.triu`
""")
_docs.set_doc(
chainerx.identity,
"""identity(n, dtype=None, device=None)
Returns a 2-D identity array.
It is equivalent to ``eye(n, n, dtype)``.
Args:
n (int): Number of rows and columns.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: A 2-D identity array.
.. seealso:: :func:`numpy.identity`
""")
_docs.set_doc(
chainerx.ones,
"""ones(shape, dtype, device=None)
Returns a new array of given shape and dtype, filled with ones.
Args:
shape (tuple of ints): Shape of the array.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
.. seealso:: :func:`numpy.ones`
""")
_docs.set_doc(
chainerx.ones_like,
"""ones_like(a, device=None)
Returns an array of ones with same shape and dtype as a given array.
Args:
a (~chainerx.ndarray): Prototype array.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the prototype array.
.. seealso:: :func:`numpy.ones_like`
""")
_docs.set_doc(
chainerx.zeros,
"""zeros(shape, dtype, device=None)
Returns a new array of given shape and dtype, filled with zeros.
Args:
shape (tuple of ints): Shape of the array.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
.. seealso:: :func:`numpy.zeros`
""")
_docs.set_doc(
chainerx.zeros_like,
"""zeros_like(a, device=None)
Returns an array of zeros with same shape and dtype as a given array.
Args:
a (~chainerx.ndarray): Prototype array.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the prototype array.
.. seealso:: :func:`numpy.zeros_like`
""")
_docs.set_doc(
chainerx.full,
"""full(shape, fill_value, dtype, device=None)
Returns a new array of given shape and dtype, filled with a given value.
Args:
shape (tuple of ints): Shape of the array.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
.. seealso:: :func:`numpy.full`
""")
_docs.set_doc(
chainerx.full_like,
"""full_like(a, fill_value, dtype=None, device=None)
Returns a full array with same shape and dtype as a given array.
Args:
a (~chainerx.ndarray): Prototype array.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the prototype array.
.. seealso:: :func:`numpy.full_like`
""")
_docs.set_doc(
chainerx.array,
"""array(object, dtype=None, copy=True, device=None)
Creates an array.
Args:
object: A :class:`~chainerx.ndarray` object or any other object that can be
passed to :func:`numpy.array`.
dtype: Data type. If omitted, it's inferred from the input.
copy (bool): If ``True``, the object is always copied. Otherwise, a copy
will only be made if it is needed to satisfy any of the other
requirements (dtype, device, etc.).
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the input array.
.. seealso:: :func:`numpy.array`
""")
_docs.set_doc(
chainerx.asarray,
"""asarray(a, dtype=None, device=None)
Converts an object to an array.
Args:
a: The source object.
dtype: Data type. If omitted, it's inferred from the input.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: Array interpretation of ``a``. If ``a`` is already an \
ndarray on the given device with matching dtype, no copy is performed.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the input array.
.. seealso:: :func:`numpy.asarray`
""")
_docs.set_doc(
chainerx.ascontiguousarray,
"""ascontiguousarray(a, dtype=None, device=None)
Returns a C-contiguous array.
Args:
a (~chainerx.ndarray): Source array.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: C-contiguous array. A copy will be made only if needed.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the input array.
.. seealso:: :func:`numpy.ascontiguousarray`
""")
_docs.set_doc(
chainerx.copy,
"""copy(a)
Creates a copy of a given array.
Args:
a (~chainerx.ndarray): Source array.
Returns:
~chainerx.ndarray: A copy array on the same device as ``a``.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.copy`
""")
_docs.set_doc(
chainerx.frombuffer,
"""frombuffer(buffer, dtype=float, count=-1, offset=0, device=None)
Returns a 1-D array interpretation of a buffer.
The given ``buffer`` memory must be usable on the given device, otherwise,
an error is raised.
Note:
The ``native`` backend requires a buffer of main memory, and
the ``cuda`` backend requires a buffer of CUDA memory.
No copy is performed.
Args:
buffer: An object that exposes the buffer interface.
dtype: Data type of the returned array.
count (int): Number of items to read. -1 means all data in the buffer.
offset (int): Start reading the buffer from this offset (in bytes).
device (~chainerx.Device): Device of the returned array.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: 1-D array interpretation of ``buffer``.
.. seealso:: :func:`numpy.frombuffer`
""")
_docs.set_doc(
chainerx.arange,
"""arange([start=0, ]stop, [step=1, ]dtype=None, device=None)
Returns an array with evenly spaced values within a given interval.
Values are generated within the half-open interval [``start``, ``stop``).
The first three arguments are mapped like the ``range`` built-in function,
i.e. ``start`` and ``step`` are optional.
Args:
start: Start of the interval.
stop: End of the interval.
step: Step width between each pair of consecutive values.
dtype: Data type specifier. It is inferred from other arguments by
default.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: The 1-D array of range values.
.. seealso:: :func:`numpy.arange`
""")
_docs.set_doc(
chainerx.linspace,
"""linspace(start, stop, num=50, endpoint=True, dtype=None, device=None)
Returns an array with evenly spaced numbers over a specified interval.
Instead of specifying the step width like :func:`chainerx.arange()`,
this function requires the total number of elements specified.
Args:
start: Start of the interval.
stop: End of the interval.
num: Number of elements.
endpoint (bool): If ``True``, the stop value is included as the last
element. Otherwise, the stop value is omitted.
dtype: Data type specifier. It is inferred from the start and stop
arguments by default.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: The 1-D array of ranged values.
.. seealso:: :func:`numpy.linspace`
""") # NOQA
_docs.set_doc(
chainerx.diag,
"""diag(v, k=0, device=None)
Returns a diagonal or a diagonal array.
Args:
v (~chainerx.ndarray): Array object.
k (int): Index of diagonals. Zero indicates the main diagonal, a
positive value an upper diagonal, and a negative value a lower
diagonal.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: If ``v`` is a 1-D array, then it returns a 2-D
array with the specified diagonal filled by ``v``. If ``v`` is a
2-D array, then it returns the specified diagonal of ``v``. In latter
case, if ``v`` is a :class:`chainerx.ndarray` object, then its view is
returned.
Note:
The argument ``v`` does not support array-like objects yet.
.. seealso:: :func:`numpy.diag`
""")
_docs.set_doc(
chainerx.diagflat,
"""diagflat(v, k=0, device=None)
Creates a diagonal array from the flattened input.
Args:
v (~chainerx.ndarray): Array object.
k (int): Index of diagonals. See :func:`chainerx.diag`.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: A 2-D diagonal array with the diagonal copied
from ``v``.
Note:
The argument ``v`` does not support array-like objects yet.
.. seealso:: :func:`numpy.diagflat`
""")
_docs.set_doc(
chainerx.meshgrid,
"""meshgrid(xi, indexing='xy')
Returns coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of N-D scalar/vector
fields over N-D grids, given one-dimensional coordinate arrays x1, x2,…, xn.
Args:
xi (sequence of :class:`~chainerx.ndarray`\\ s): 1-D arrays
representing the coordinates of a grid.
indexing (str): {‘xy’, ‘ij’}, optional
Cartesian (‘xy’, default) or matrix (‘ij’) indexing of output.
Returns:
list of :class:`~chainerx.ndarray`\\ s: For vectors x1, x2,…, ‘xn’ with
lengths Ni=len(xi), return (N1, N2, N3,...Nn) shaped arrays if
indexing=’ij’ or (N2, N1, N3,...Nn) shaped arrays if indexing=’xy’
with the elements of xi repeated to fill the matrix along the first
dimension for x1, the second for x2 and so on.
.. seealso:: :func:`numpy.meshgrid`
""")
def _docs_evaluation():
_docs.set_doc(
chainerx.accuracy,
"""accuracy(y, t, ignore_label=None)
Computes multiclass classification accuracy of the minibatch.
Args:
y (~chainerx.ndarray):
Array whose (i, j, k, ...)-th element indicates the score of
the class j at the (i, k, ...)-th sample.
The prediction label :math:`\\hat t` is calculated by the formula
:math:`\\hat t(i, k, ...) = \\operatorname{\\mathrm{argmax}}_j \
y(i, j, k, ...)`.
t (~chainerx.ndarray):
Array of ground truth labels.
ignore_label (int or None): Skip calculating accuracy
if the true label is ``ignore_label``.
Returns:
:func:`~chainerx.ndarray`: A variable holding a scalar \
array of the accuracy.
Note:
This function is non-differentiable.
.. seealso:: :func:`chainer.functions.accuracy`
.. admonition:: Example
We show the most common case, when ``y`` is the two dimensional array.
>>> y = chainerx.array([[0.1, 0.7, 0.2], # prediction label is 1
... [8.0, 1.0, 2.0], # prediction label is 0
... [-8.0, 1.0, 2.0], # prediction label is 2
... [-8.0, -1.0, -2.0]]) # prediction label is 1
>>> t = chainerx.array([1, 0, 2, 1], chainerx.int32)
>>> chainerx.accuracy(y, t) \
# 100% accuracy because all samples are correct
array(1., shape=(), dtype=float64, device='native:0')
>>> t = chainerx.array([1, 0, 0, 0], chainerx.int32)
>>> chainerx.accuracy(y, t) \
# 50% accuracy because 1st and 2nd samples are correct
array(0.5, shape=(), dtype=float64, device='native:0')
>>> chainerx.accuracy(y, t, ignore_label=0) \
# 100% accuracy because of ignoring the 2nd, 3rd and 4th samples.
array(1., shape=(), dtype=float64, device='native:0')
""")
def _docs_indexing():
_docs.set_doc(
chainerx.take,
"""take(a, indices, axis)
Takes elements from an array along an axis.
Args:
a (~chainerx.ndarray): Source array.
indices (~chainerx.ndarray):
The indices of the values to extract. When indices are out of bounds,
they are wrapped around.
axis (int): The axis over which to select values.
mode (str): Specifies how out-of-bounds indices will behave.
'raise' - raise an error
'wrap' - wrap around
'clip' - clip to the range
Returns:
:func:`~chainerx.ndarray`: Output array.
Note:
This function currently does not support ``axis=None``
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
Note:
The default mode for the native backend is 'raise', while for the cuda
backend is 'wrap' in order to prevent device synchronization.
'raise' mode is currently not supported in the CUDA backend.
.. seealso:: :func:`numpy.take`
""")
_docs.set_doc(
chainerx.where,
"""where(condition, x, y)
Return elements chosen from ``x`` or ``y`` depending on condition.
Args:
condition (~chainerx.ndarray): Where True, yield ``x``, otherwise
yield ``y``.
x (~chainerx.ndarray): Values from which to choose.
y (~chainerx.ndarray): Values from which to choose.
Returns:
:func:`~chainerx.ndarray`: An array with elements
from ``x`` where condition is True, and elements from ``y`` elsewhere.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x`` and ``y``.
.. seealso:: :func:`numpy.where`
""")
_docs.set_doc(
chainerx.nonzero,
"""nonzero(a)
Return the indices of the elements that are non-zero.
Args:
a (~chainerx.ndarray): Input array.
Returns:
tuple of :func:`~chainerx.ndarray`: Indices of elements that are non-zero.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :func:`numpy.nonzero`
""")
def _docs_linalg():
_docs.set_doc(
chainerx.dot,
"""dot(a, b)
Returns a dot product of two arrays.
For arrays with more than one axis, it computes the dot product along the last
axis of ``a`` and the second-to-last axis of ``b``. This is just a matrix
product if the both arrays are 2-D. For 1-D arrays, it uses their unique axis
as an axis to take dot product over.
Args:
a (~chainerx.ndarray): The left argument.
b (~chainerx.ndarray): The right argument.
Returns:
:class:`~chainerx.ndarray`: Output array.
Note:
This function currently does not support N > 2 dimensional arrays.
Note:
During backpropagation, this function propagates the gradient of the
output array to input arrays ``a`` and ``b``.
.. seealso:: :func:`numpy.dot`
""")
_docs.set_doc(
chainerx.linalg.solve,
"""solve(a, b)
Solves a linear matrix equation, or system of linear scalar equations.
It computes the exact solution of ``x`` in ``ax = b``,
where ``a`` is a square and full rank matrix,
``b`` can be a vector, or a rectangular matrix.
When ``b`` is matrix, its columns are treated as separate vectors
representing multiple right-hand sides.
Args:
a (~chainerx.ndarray): Coefficient matrix.
b (~chainerx.ndarray): "dependent variable" values.
Returns:
:class:`~chainerx.ndarray`:
Solution to the system ``ax = b``.
Shape is identical to ``b``.
Note:
The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
.. seealso:: :func:`numpy.linalg.solve`
""")
_docs.set_doc(
chainerx.linalg.inv,
"""inv(a)
Computes the inverse of a matrix.
This function computes matrix ``a_inv`` from square matrix
``a`` such that ``dot(a, a_inv) = dot(a_inv, a) = eye(a.shape[0])``.
Args:
a (~chainerx.ndarray): The matrix to be inverted.
Returns:
:class:`~chainerx.ndarray`: The inverse of a matrix.
Note:
The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
.. seealso:: :func:`numpy.linalg.inv`
""")
_docs.set_doc(
chainerx.linalg.svd,
"""svd(a, full_matrices=True, compute_uv=True)
Singular Value Decomposition.
Factorizes the matrix ``a`` into two unitary matrices ``U`` and ``Vt``, and
a 1-D array ``s`` of singular values such that
``a == U * S * Vt``, where ``S`` is a suitably shaped matrix of zeros with
main diagonal ``s`` and ``*`` represents a dot product.
Args:
a (~chainerx.ndarray): The input matrix with dimension ``(M, N)``.
full_matrices (bool): If True, it returns u and v with dimensions
``(M, M)`` and ``(N, N)``. Otherwise, the dimensions of u and v
are respectively ``(M, K)`` and ``(K, N)``, where
``K = min(M, N)``.
compute_uv (bool): If False, only singular values are computed.
Returns:
tuple of :class:`chainerx.ndarray`:
A tuple of ``(U, s, Vt)`` such that ``a = U * diag(s) * Vt``.
When ``compute_uv`` is False only singular values ``s`` are returned.
Note:
* The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
* The SVD is commonly written as `a = U * diag(s) * V^T`.
The ``Vt`` returned by this function is `V^T`.
* During backpropagation, this function requires ``U`` and ``Vt`` computed,
therefore differentiation does not work for ``compute_uv=False``.
* Backpropagation is not implemented for ``full_matrices=True``.
.. seealso:: :func:`numpy.linalg.svd`
""")
_docs.set_doc(
chainerx.linalg.pinv,
"""pinv(a, rcond=1e-15)
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its singular-value
decomposition (SVD) and including all large singular values.
Args:
a (~chainerx.ndarray): The input matrix to be pseudo-inverted.
rcond (float): Cutoff for small singular values.
Returns:
:class:`~chainerx.ndarray`: The pseudo-inverse of ``a``.
Note:
The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
.. seealso:: :func:`numpy.linalg.pinv`
""")
_docs.set_doc(
chainerx.linalg.qr,
"""qr(a, mode='reduced')
Compute the qr factorization of a matrix.
Factor the matrix ``a`` as *qr*, where ``q`` is orthonormal and ``r`` is
upper-triangular.
Args:
a (~chainerx.ndarray): Matrix to be factored.
mode (str): The mode of decomposition.
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,),
where ``(M, N)`` is the shape of the input matrix and ``K = min(M, N)``
Returns:
q (~chainerx.ndarray): A matrix with orthonormal columns.
r (~chainerx.ndarray): The upper-triangular matrix.
Note:
* The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
* Backpropagation is not implemented for non-square output matrix ``r``.
* Backpropagation is not implemented for 'r' or 'raw' modes.
.. seealso:: :func:`numpy.linalg.qr`
""")
_docs.set_doc(
chainerx.linalg.cholesky,
"""cholesky(a)
Computes the Cholesky decomposition of a matrix.
Returns the Cholesky decomposition, :math:`A = L L^T`,
for the square matrix ``a``.
Args:
a (~chainerx.ndarray): Symmetric positive-definite input matrix.
Returns:
:class:`~chainerx.ndarray`: Output array. Cholesky factor of ``a``.
Note:
The forward computation does not necessarily check if the input matrix is
symmetric (e.g. the native backend relying on LAPACK does not). However,
both the forward and the backward computations assume that it is and their
results are unspecified otherwise. The computed gradient is always a
symmetric matrix. More specifically, the gradient is computed as if the
function is restricted to a Riemannian submanifold of
:math:`R_{n \times n}` consisting just of positive-definite symmetric
matrices and is faithful to the mathematical definition of the Cholesky
decomposition.
Note:
* GPU implementation of the Cholesky decomposition routine is based on
cuSOLVER library. Older versions (<10.1) of it might not raise an error
for some non positive-definite matrices.
* The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
.. seealso:: :func:`numpy.linalg.cholesky`
""")
_docs.set_doc(
chainerx.linalg.eigh,
"""eigh(a, UPLO='L')
Compute the eigenvalues and eigenvectors of a real symmetric matrix.
Args:
a (~chainerx.ndarray): Real symmetric matrix whose eigenvalues
and eigenvectors are to be computed.
UPLO (str): Specifies whether the calculation is done with the lower
triangular part of a ('L', default) or the upper triangular part ('U').
Returns:
tuple of :class:`~chainerx.ndarray`:
Returns a tuple ``(w, v)``. ``w`` contains eigenvalues and
``v`` contains eigenvectors. ``v[:, i]`` is an eigenvector
corresponding to an eigenvalue ``w[i]``.
Note:
Although ``UPLO`` can be specified to ignore either the strictly lower or
upper part of the input matrix, the backward computation assumes that the
inputs is symmetric and the computed gradient is always a symmetric matrix
with respect to ``UPLO``. More specifically, the gradient is computed as if
the function is restricted to a Riemannian submanifold of
:math:`R_{n \times n}` consisting just of symmetric matrices and is
faithful to the mathematical definition of the eigenvalue decomposition of
symmetric matrices.
Note:
The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
.. seealso:: :func:`numpy.linalg.eigh`
""")
_docs.set_doc(
chainerx.linalg.eigvalsh,
"""eigvalsh(a, UPLO='L')
Compute the eigenvalues of a real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Args:
a (~chainerx.ndarray): Real symmetric matrix whose eigenvalues
and eigenvectors are to be computed.
UPLO (str): Specifies whether the calculation is done with the lower
triangular part of a (‘L’, default) or the upper triangular part (‘U’).
(optional).
Returns:
:class:`~chainerx.ndarray`: Returns eigenvalues as a vector.
Note:
* The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
* Backpropagation requires eigenvectors and, therefore, is not implemented
for this function. ``linalg.eigh`` should be used instead.
.. seealso:: :func:`numpy.linalg.eigvalsh`
""")
def _docs_logic():
_docs.set_doc(
chainerx.all,
"""all(x)
Test whether all array elements along a given axis evaluate to True.
Args:
x (~chainerx.ndarray): Input array.
axis (None or int or tuple of ints):
Axis or axes along which AND reduction is performed.
The flattened array is used by default.
keepdims (bool):
If this is set to ``True``, the reduced axes are left in the result
as dimensions with size one.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.all`
""")
_docs.set_doc(
chainerx.any,
"""any(x)
Test whether any array element along a given axis evaluate to True.
Args:
x (~chainerx.ndarray): Input array.
axis (None or int or tuple of ints):
Axis or axes along which OR reduction is performed.
The flattened array is used by default.
keepdims (bool):
If this is set to ``True``, the reduced axes are left in the result
as dimensions with size one.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.any`
""")
_docs.set_doc(
chainerx.logical_not,
"""logical_not(x)
Returns an array of NOT x element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.logical_not`
""")
_docs.set_doc(
chainerx.logical_and,
"""logical_and(x1, x2)
Returns an array of x1 AND x2 element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.logical_and`
""")
_docs.set_doc(
chainerx.logical_or,
"""logical_or(x1, x2)
Returns an array of x1 OR x2 element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.logical_or`
""")
_docs.set_doc(
chainerx.logical_xor,
"""logical_xor(x1, x2)
Returns an array of x1 XOR x2 element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.logical_xor`
""")
_docs.set_doc(
chainerx.greater,
"""greater(x1, x2)
Returns an array of (x1 > x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.greater`
""")
_docs.set_doc(
chainerx.greater_equal,
"""greater_equal(x1, x2)
Returns an array of (x1 >= x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.greater_equal`
""")
_docs.set_doc(
chainerx.less,
"""less(x1, x2)
Returns an array of (x1 < x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.less`
""")
_docs.set_doc(
chainerx.less_equal,
"""less_equal(x1, x2)
Returns an array of (x1 <= x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.less_equal`
""")
_docs.set_doc(
chainerx.equal,
"""equal(x1, x2)
Returns an array of (x1 == x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.equal`
""")
_docs.set_doc(
chainerx.not_equal,
"""not_equal(x1, x2)
Returns an array of (x1 != x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.not_equal`
""")
def _docs_loss():
_docs.set_doc(
chainerx.absolute_error,
"""Element-wise absolute error function.
Computes the element-wise absolute error :math:`L` between two inputs
:math:`x_1` and :math:`x_2` defined as follows.
.. math::
L = |x_1 - x_2|
Args:
x1 (~chainerx.ndarray): Input variable.
x2 (~chainerx.ndarray): Input variable.
Returns:
:class:`~chainerx.ndarray`: A variable holding an array representing
the absolute error of two inputs.
.. seealso:: :func:`chainer.functions.absolute_error`
""")
_docs.set_doc(
chainerx.squared_error,
"""Element-wise squared error function.
Computes the element-wise squared error :math:`L` between two inputs
:math:`x_1` and :math:`x_2` defined as follows.
.. math::
L = (x_1 - x_2)^2
Can be used to compute mean squared error by just calling `mean()`
on the output array.
Args:
x0 (~chainerx.ndarray): Input variable.
x1 (~chainerx.ndarray): Input variable.
Returns:
:class:`~chainerx.ndarray`: A variable holding an array representing
the squared error of two inputs.
.. seealso:: :func:`chainer.functions.squared_error`
""")
_docs.set_doc(
chainerx.huber_loss,
"""Element-wise Huber loss.
The Huber loss is similar to the squared error but is less sensitive to
outliers in the data. It is defined as
.. math::
L_{\\delta}(a) = \\left \\{ \\begin{array}{cc}
\\frac{1}{2} a^2 & {\\rm if~|a| \\leq \\delta} \\\\
\\delta (|a| - \\frac{1}{2} \\delta) & {\\rm otherwise,}
\\end{array} \\right.
where :math:`a = x - t` is the difference between the input :math:`x`
and the target :math:`t`.
See: `Huber loss - Wikipedia <https://en.wikipedia.org/wiki/Huber_loss>`_.
Args:
x (~chainerx.ndarray): Input variable.
t (~chainerx.ndarray): Target variable for regression.
delta (float): Constant variable for Huber loss function as used in
definition.
Returns:
:class:`~chainerx.ndarray`:
A variable object holding an array representing the Huber loss
:math:`L_{\\delta}` of the two inputs.
.. seealso:: :func:`chainer.functions.huber_loss`
""")
_docs.set_doc(
chainerx.gaussian_kl_divergence,
"""Element-wise KL-divergence of Gaussian variables from the standard one.
Given two variable ``mean`` representing :math:`\\mu` and ``ln_var``
representing :math:`\\log(\\sigma^2)`, this function calculates
the element-wise KL-divergence between the given multi-dimensional
Gaussian :math:`N(\\mu, S)` and the standard Gaussian :math:`N(0, I)`
.. math::
D_{\\mathbf{KL}}(N(\\mu, S) \\| N(0, I)),
where :math:`S` is a diagonal matrix such that :math:`S_{ii} = \\sigma_i^2`
and :math:`I` is an identity matrix.
Args:
mean (~chainerx.ndarray):
A variable representing mean of given
gaussian distribution, :math:`\\mu`.
ln_var (~chainerx.ndarray):
A variable representing logarithm of
variance of given gaussian distribution, :math:`\\log(\\sigma^2)`.
Returns:
:class:`~chainerx.ndarray`:
A variable representing KL-divergence between
given gaussian distribution and the standard gaussian.
.. seealso:: :func:`chainer.functions.gaussian_kl_divergence`
""")
_docs.set_doc(
chainerx.sigmoid_cross_entropy,
"""sigmoid_cross_entropy(x1, x2)
Element-wise cross entropy loss for pre-sigmoid activations.
Args:
x1 (~chainerx.ndarray): An array whose (i, j)-th element indicates the
unnormalized log probability of the j-th unit at the i-th example.
x2 (~chainerx.ndarray): An array whose (i, j)-th element indicates a signed
integer vector of ground truth labels 0 or 1. If ``x2[i, j] == -1``,
corresponding ``x1[i, j]`` is ignored. Loss is zero if all ground truth
labels are -1.
Returns:
:class:`~chainerx.ndarray`: An array of the cross entropy.
Note:
During backpropagation, this function propagates the gradient of the output
array to the input array ``x1`` only.
""")
_docs.set_doc(
chainerx.softmax_cross_entropy,
"""softmax_cross_entropy(x1, x2)
Element-wise cross entropy loss for pre-softmax activations.
Args:
x1 (~chainerx.ndarray): An array whose element indicates unnormalized log
probability: the first axis of the array represents the number of
samples, and the second axis represents the number of classes.
x2 (~chainerx.ndarray): A signed integer vector of ground truth labels. If
``x2[i] == -1``, corresponding ``x1[i]`` is ignored.
Returns:
:class:`~chainerx.ndarray`: An array of the cross entropy.
Note:
During backpropagation, this function propagates the gradient of the output
array to the input array ``x1`` only.
""")
def _docs_manipulation():
_docs.set_doc(
chainerx.reshape,
"""reshape(a, newshape)
Returns a reshaped array.
Args:
a (~chainerx.ndarray): Array to be reshaped.
newshape (int or tuple of ints): The new shape of the array to return.
If it is an integer, then it is treated as a tuple of length one.
It should be compatible with ``a.size``. One of the elements can be
-1, which is automatically replaced with the appropriate value to
make the shape compatible with ``a.size``.
Returns:
:class:`~chainerx.ndarray`: A reshaped view of ``a`` if possible,
otherwise a copy.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.reshape`
""")
_docs.set_doc(
chainerx.ravel,
"""ravel(a)
Returns a flattened array.
Args:
a (~chainerx.ndarray): Array to be flattened.
Returns:
:class:`~chainerx.ndarray`: A flattened view of ``a`` if possible,
otherwise a copy.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.ravel`
""")
_docs.set_doc(
chainerx.transpose,
"""transpose(a, axes=None)
Permutes the dimensions of an array.
Args:
a (~chainerx.ndarray): Array to permute the dimensions.
axes (tuple of ints): Permutation of the dimensions. This function reverses
the shape by default.
Returns:
~chainerx.ndarray: A view of ``a`` with the dimensions permuted.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.transpose`
""")
_docs.set_doc(
chainerx.broadcast_to,
"""broadcast_to(array, shape)
Broadcasts an array to a given shape.
Args:
array (~chainerx.ndarray): Array to broadcast.
shape (tuple of ints): The shape of the desired array.
Returns:
~chainerx.ndarray: Broadcasted view.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``array``.
.. seealso:: :func:`numpy.broadcast_to`
""")
_docs.set_doc(
chainerx.squeeze,
"""squeeze(a, axis=None)
Removes size-one axes from the shape of an array.
Args:
a (~chainerx.ndarray): Array to be reshaped.
axis (int or tuple of ints): Axes to be removed. This function removes all
size-one axes by default. If one of the specified axes is not of size
one, an exception is raised.
Returns:
~chainerx.ndarray: An array without (specified) size-one axes.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.squeeze`
""")
_docs.set_doc(
chainerx.concatenate,
"""concatenate(arrays, axis=0)
Joins arrays along an axis.
Args:
arrays (sequence of :class:`~chainerx.ndarray`\\ s): Arrays to be joined.
All of these should have the same dimensionalities except the specified
axis.
axis (int): The axis to join arrays along.
Returns:
~chainerx.ndarray: Joined array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``arrays``.
.. seealso:: :func:`numpy.concatenate`
""")
_docs.set_doc(
chainerx.stack,
"""stack(arrays, axis=0)
Stacks arrays along a new axis.
Args:
arrays (sequence of :class:`~chainerx.ndarray`\\ s): Arrays to be stacked.
axis (int): Axis along which the arrays are stacked.
Returns:
~chainerx.ndarray: Stacked array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``arrays``.
.. seealso:: :func:`numpy.stack`
""")
_docs.set_doc(
chainerx.hstack,
"""hstack(arrays)
Stack arrays in sequence horizontally (column wise).
Args:
arrays (sequence of :class:`~chainerx.ndarray`\\ s): Arrays to be stacked.
Returns:
~chainerx.ndarray: Stacked array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``arrays``.
.. seealso:: :func:`numpy.hstack`
""")
_docs.set_doc(
chainerx.vstack,
"""vstack(arrays)
Stack arrays in sequence vertically (row wise).
Args:
arrays (sequence of :class:`~chainerx.ndarray`\\ s): Arrays to be stacked.
Returns:
~chainerx.ndarray: Stacked array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``arrays``.
.. seealso:: :func:`numpy.vstack`
""")
_docs.set_doc(
chainerx.dstack,
"""dstack(arrays)
Stack arrays in sequence depth wise (along third axis).
Args:
arrays (sequence of :class:`~chainerx.ndarray`\\ s): Arrays to be stacked.
Returns:
~chainerx.ndarray: Stacked array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``arrays``.
.. seealso:: :func:`numpy.dstack`
""")
_docs.set_doc(
chainerx.atleast_2d,
"""atleast_2d(a)
View inputs as arrays with at least two dimensions.
Args:
a (~chainerx.ndarray): Array.
Returns:
~chainerx.ndarray: An array with a.ndim >= 2.
Copies are avoided where possible, and views with
two or more dimensions are returned.
Note:
* Arrays that already have two or more dimensions are preserved.
* During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``a``.
.. seealso:: :func:`numpy.atleast_2d`
""")
_docs.set_doc(
chainerx.atleast_3d,
"""atleast_3d(a)
View inputs as arrays with at least three dimensions.
Args:
a (~chainerx.ndarray): Array.
Returns:
~chainerx.ndarray: An array with a.ndim >= 3.
Copies are avoided where possible, and views with
three or more dimensions are returned.
Note:
* Arrays that already have three or more dimensions are preserved.
* During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``a``.
.. seealso:: :func:`numpy.atleast_3d`
""")
_docs.set_doc(
chainerx.split,
"""split(ary, indices_or_sections, axis=0)
Splits an array into multiple sub arrays along a given axis.
Args:
ary (~chainerx.ndarray): Array to split.
indices_or_sections (int or sequence of ints): A value indicating how to
divide the axis. If it is an integer, then is treated as the number of
sections, and the axis is evenly divided. Otherwise, the integers
indicate indices to split at. Note that a sequence on the device
memory is not allowed.
axis (int): Axis along which the array is split.
Returns:
list of :class:`~chainerx.ndarray`\\ s: A list of sub arrays. Each array \
is a partial view of the input array.
Note:
During backpropagation, this function propagates the gradients of the
output arrays to the input array ``ary``.
.. seealso:: :func:`numpy.split`
""")
_docs.set_doc(
chainerx.dsplit,
"""dsplit(ary, indices_or_sections)
Split array into multiple sub-arrays along the 3rd axis (depth).
Args:
ary (~chainerx.ndarray): Array to split.
indices_or_sections (int or sequence of ints): A value indicating how to
divide the axis. If it is an integer, then is treated as the number of
sections, and the axis is evenly divided. Otherwise, the integers
indicate indices to split at. Note that a sequence on the device
memory is not allowed.
Returns:
list of :class:`~chainerx.ndarray`\\ s: A list of sub arrays. Each array \
is a partial view of the input array.
Note:
During backpropagation, this function propagates the gradients of the
output arrays to the input array ``ary``.
.. seealso:: :func:`numpy.dsplit`
""")
_docs.set_doc(
chainerx.vsplit,
"""vsplit(ary, indices_or_sections)
Splits an array into multiple sub-arrays vertically (row-wise).
Args:
ary (~chainerx.ndarray): Array to split.
indices_or_sections (int or sequence of ints): A value indicating how to
divide the axis. If it is an integer, then is treated as the number of
sections, and the axis is evenly divided. Otherwise, the integers
indicate indices to split at. Note that a sequence on the device
memory is not allowed.
Returns:
list of :class:`~chainerx.ndarray`\\ s: A list of sub arrays. Each array \
is a partial view of the input array.
Note:
During backpropagation, this function propagates the gradients of the
output arrays to the input array ``ary``.
.. seealso:: :func:`numpy.vsplit`
""")
_docs.set_doc(
chainerx.hsplit,
"""hsplit(ary, indices_or_sections)
Split an array into multiple sub-arrays horizontally (column-wise).
Args:
ary (~chainerx.ndarray): Array to split.
indices_or_sections (int or sequence of ints): A value indicating how to
divide the axis. If it is an integer, then is treated as the number of
sections, and the axis is evenly divided. Otherwise, the integers
indicate indices to split at. Note that a sequence on the device
memory is not allowed.
Returns:
list of :class:`~chainerx.ndarray`\\ s: A list of sub arrays. Each array \
is a partial view of the input array.
Note:
During backpropagation, this function propagates the gradients of the
output arrays to the input array ``ary``.
.. seealso:: :func:`numpy.hsplit`
""")
_docs.set_doc(
chainerx.swapaxes,
"""swapaxes(a, axis1, axis2)
Interchange two axes of an array.
Args:
a (~chainerx.ndarray): Array to swapaxes.
axis1 (int): First Axis
axis2 (int): Second Axis
Returns:
~chainerx.ndarray: Swaped array.
Note:
* Output array is a view of the input array.
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``a``.
.. seealso:: :func:`numpy.swapaxes`
""")
_docs.set_doc(
chainerx.repeat,
"""repeat(a, repeats, axis=None)
Constructs an array by repeating a given array.
Args:
a (~chainerx.ndarray): Array to repeat.
repeats (int or tuple of ints): The number of times which each
element of a is repeated.
axis (int): The axis along which to repeat values.
Returns:
~chainerx.ndarray: The repeated output array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.repeat`
""")
_docs.set_doc(
chainerx.expand_dims,
"""expand_dims(a, axis)
Expand the shape of an array.
Args:
a (~chainerx.ndarray): Input Array.
axis (int): Position in the expanded axes where the new axis is placed.
Returns:
~chainerx.ndarray: Output array.
Note:
* Output array may or may not be a view of the input array.
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``a``.
.. seealso:: :func:`numpy.expand_dims`
""")
_docs.set_doc(
chainerx.flip,
"""flip(m, axis)
Reverse the order of elements in an array along the given axis.
Args:
m (~chainerx.ndarray): Input Array.
axis (int or tuple of ints): Axis or axes along which to flip over.
The default, axis=None, will flip over all of the axes of the input array.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the
axes specified in the tuple.
Returns:
~chainerx.ndarray: A view of m with the entries of axis reversed.
Since a view is returned, this operation is done in constant time.
Note:
* Output array is a view of the input array.
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``m``.
.. seealso:: :func:`numpy.flip`
""")
_docs.set_doc(
chainerx.fliplr,
"""fliplr(m)
Flip array in the left/right direction.
Args:
m (~chainerx.ndarray): Input Array.
Returns:
~chainerx.ndarray: A view of m with the columns reversed.
Since a view is returned, this operation is done in constant time.
Note:
* Output array is a view of the input array.
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``m``.
.. seealso:: :func:`numpy.fliplr`
""")
_docs.set_doc(
chainerx.flipud,
"""flipud(m)
Flip array in the up/down direction.
Args:
m (~chainerx.ndarray): Input Array.
Returns:
~chainerx.ndarray: A view of m with the rows reversed.
Since a view is returned, this operation is done in constant time.
Note:
* Output array is a view of the input array.
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``m``.
.. seealso:: :func:`numpy.flipud`
""")
_docs.set_doc(
chainerx.moveaxis,
"""moveaxis(a, source, destination)
Move axes of an array to new positions.
Other axes remain in their original order.
Args:
a (~chainerx.ndarray): Input Array.
source (int or tuple of ints): Original positions of the axes to move.
These must be unique.
destintation (int or tuple of ints): Destination positions for each of
the original axes. These must also be unique.
Returns:
~chainerx.ndarray: Array with moved axes. This array is a view of the
input array.
Note:
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``a``.
.. seealso:: :func:`numpy.moveaxis`
""")
def _docs_math():
_docs.set_doc(
chainerx.negative,
"""negative(x)
Numerical negative, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = -x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.negative`
""")
_docs.set_doc(
chainerx.add,
"""add(x1, x2)
Add arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 + x_2`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.add`
""")
_docs.set_doc(
chainerx.subtract,
"""subtract(x1, x2)
Subtract arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 - x_2`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.subtract`
""")
_docs.set_doc(
chainerx.multiply,
"""multiply(x1, x2)
Multiply arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 \\times x_2`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.multiply`
""")
_docs.set_doc(
chainerx.divide,
"""divide(x1, x2)
Divide arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\frac{x_1}{x_2}`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.divide`
""")
_docs.set_doc(
chainerx.sum,
"""sum(a, axis=None, keepdims=False)
Sum of array elements over a given axis.
Args:
a (~chainerx.ndarray): Input array.
axis (None or int or tuple of ints):
Axis or axes along which a sum is performed.
The flattened array is used by default.
keepdims (bool):
If this is set to ``True``, the reduced axes are left in the result
as dimensions with size one.
Returns:
:class:`~chainerx.ndarray`: The sum of input elements over a given axis.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.sum`
""")
_docs.set_doc(
chainerx.maximum,
"""maximum(x1, x2)
Maximum arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`:
Returned array: :math:`y = max(\\{x_1, x_2\\})`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.maximum`
""")
_docs.set_doc(
chainerx.minimum,
"""minimum(x1, x2)
Minimum arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`:
Returned array: :math:`y = min(\\{x_1, x_2\\})`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.minimum`
""")
_docs.set_doc(
chainerx.remainder,
"""remainder(x1, x2)
Return element-wise remainder of division.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`:
Returned array: The element-wise remainder of
the quotient ``floor_divide(x1, x2)``.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.remainder`
""")
_docs.set_doc(
chainerx.exp,
"""exp(x)
Numerical exponential, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\exp x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.exp`
""")
_docs.set_doc(
chainerx.log,
"""log(x)
Natural logarithm, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\ln x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.log`
""")
_docs.set_doc(
chainerx.log10,
"""log10(x)
Base 10 logarithm, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\log_{10} x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.log10`
""")
_docs.set_doc(
chainerx.log2,
"""log2(x)
Base 2 logarithm, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\log_{2} x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.log2`
""")
_docs.set_doc(
chainerx.log1p,
"""log1p(x)
Natural logarithm of one plus the input, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\log(1 + x)`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.log1p`
""")
_docs.set_doc(
chainerx.logsumexp,
"""logsumexp(x, axis=None, keepdims=False)
The log of the sum of exponentials of input array.
Args:
x (~chainerx.ndarray): Input array.
axis (None or int or tuple of ints):
Axis or axes along which a sum is performed.
The flattened array is used by default.
keepdims (bool):
If this is set to ``True``, the reduced axes are left in the result
as dimensions with size one.
Returns:
:class:`~chainerx.ndarray`: The log of the sum of exponentials of
input elements over a given axis.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
""")
_docs.set_doc(
chainerx.log_softmax,
"""log_softmax(x, axis=None)
The log of the softmax of input array.
Args:
x (~chainerx.ndarray): Input array.
axis (None or int or tuple of ints):
Axis or axes along which a sum is performed.
The flattened array is used by default.
Returns:
:class:`~chainerx.ndarray`: The log of the softmax of input elements
over a given axis.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
""")
_docs.set_doc(
chainerx.square,
"""square(x)
Returns the element-wise square of the input.
Args:
x (~chainerx.ndarray or scalar): Input data
Returns:
~chainerx.ndarray: Returned array: :math:`y = x * x`.
A scalar is returned if ``x`` is a scalar.
Note:
During backpropagation, this function propagates the gradient
of the output array to the input array ``x``.
.. seealso:: :data:`numpy.square`
""")
_docs.set_doc(
chainerx.sqrt,
"""sqrt(x)
Non-negative square-root, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\sqrt x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.sqrt`
""")
_docs.set_doc(
chainerx.sinh,
"""sinh(x)
Hyperbolic Sine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\sinh x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.sinh`
""")
_docs.set_doc(
chainerx.cosh,
"""cosh(x)
Hyperbolic Cosine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\cosh x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.cosh`
""")
_docs.set_doc(
chainerx.tanh,
"""tanh(x)
Element-wise hyperbolic tangent function.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\tanh x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.tanh`
""")
_docs.set_doc(
chainerx.sigmoid,
"""sigmoid(x)
Element-wise sigmoid logistic function.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array:
:math:`f(x) = (1 + \\exp(-x))^{-1}`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :func:`chainer.functions.sigmoid`
""")
_docs.set_doc(
chainerx.sin,
"""sin(x)
Sine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\sin x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.sin`
""")
_docs.set_doc(
chainerx.cos,
"""cos(x)
Cosine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\cos x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.cos`
""")
_docs.set_doc(
chainerx.ceil,
"""ceil(x)
Return the ceiling of the input, element-wise..
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: The ceiling of each element in array.
.. seealso:: :data:`numpy.ceil`
""")
_docs.set_doc(
chainerx.tan,
"""tan(x)
Tangent, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\tan x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.tan`
""")
_docs.set_doc(
chainerx.relu,
"""Rectified Linear Unit function.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\max (0, x)`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
""")
_docs.set_doc(
chainerx.tree_lstm,
"""tree_lstm(*inputs)
TreeLSTM unit as an activation function.
This function implements TreeLSTM units both for
N-ary TreeLSTM and Child-Sum TreeLSTM.
Let the children cell states
:math:`c_{\\text{1}}, c_{\\text{2}}, \\dots, c_{\\text{N}}`,
and the incoming signal :math:`x`.
First, the incoming signal :math:`x` is split into (3 + N) arrays
:math:`a, i, o, f_{\\text{1}}, f_{\\text{2}}, ..., f_{\\text{N}}`
of the same shapes along the second axis.
It means that :math:`x` 's second axis must have (3 + N) times
of the length of each :math:`c_{n}`.
The splitted input signals are corresponding to
- :math:`a` : sources of cell input
- :math:`i` : sources of input gate
- :math:`o` : sources of output gate
- :math:`f_{n}` : sources of forget gate for n-th ary
Second, it computes outputs as
.. math::
c &= \\tanh(a) \\text{sigmoid}(i) \\\\
& + c_{\\text{1}} \\text{sigmoid}(f_{\\text{1}}), \\\\
& + c_{\\text{2}} \\text{sigmoid}(f_{\\text{2}}), \\\\
& + ..., \\\\
& + c_{\\text{N}} \\text{sigmoid}(f_{\\text{N}}), \\\\
h &= \\tanh(c) \\text{sigmoid}(o).
These are returned as a tuple of (N + 1) variables.
Args:
inputs (list of :class:`~chainerx.array`): Variable arguments which
include all cell vectors from child-nodes, and an input vector.
Each of the cell vectors and the input vector is
:class:`~chainerx.array`.
The input vector must have the second dimension whose size
is (N + 3) times of that of each cell,
where N denotes the total number of cells.
Returns:
tuple: Two :class:`~chainerx.array` objects ``c`` and ``h``. ``c`` is
the updated cell state. ``h`` indicates the outgoing signal.
See the papers for details: `Improved Semantic Representations From
Tree-Structured Long Short-Term Memory Networks
<https://www.aclweb.org/anthology/P15-1150>`_ and
`A Fast Unified Model for Parsing and Sentence Understanding
<https://arxiv.org/pdf/1603.06021.pdf>`_.
Tai et al.'s N-Ary TreeLSTM is little extended in
Bowman et al., and this link is based on
the variant by Bowman et al.
Specifically, eq. 10 in Tai et al. only has one :math:`W` matrix
to be applied to :math:`x`, consistently for all children.
On the other hand, Bowman et al.'s model has multiple matrices,
each of which affects the forget gate for each child's cell individually.
.. admonition:: Example
Assuming ``y`` is the current input signal, ``c`` is the previous cell
state, and ``h`` is the previous output signal from an
:meth:`~chainerx.tree_lstm` function.
Each of ``y``, ``c`` and ``h`` has ``n_units`` channels.
Using 2-ary (binary) TreeLSTM,
most typical preparation of ``x`` is
>>> c1 = chainerx.ones((4, 10), dtype = chainerx.float32)
>>> c2 = chainerx.ones((4, 10), dtype = chainerx.float32)
>>> x = chainerx.ones((4, 50), dtype = chainerx.float32)
>>> c, h = chainerx.tree_lstm(c1, c2, x)
""")
_docs.set_doc(
chainerx.slstm,
"""slstm(c_prev1, c_prev2, x1, x2)
S-LSTM units as an activation function.
This function implements S-LSTM unit. It is an extension of LSTM unit
applied to tree structures.
The function is applied to binary trees. Each node has two child nodes.
It gets four arguments, previous cell states ``c_prev1`` and ``c_prev2``,
and input arrays ``x1`` and ``x2``.
First both input arrays ``x1`` and ``x2`` are split into eight arrays
:math:`a_1, i_1, f_1, o_1`, and :math:`a_2, i_2, f_2, o_2`. They have the
same shape along the second axis.
It means that ``x1`` and ``x2`` 's second axis must have 4 times
the length of ``c_prev1`` and ``c_prev2``.
The split input arrays are corresponding to
- :math:`a_i` : sources of cell input
- :math:`i_i` : sources of input gate
- :math:`f_i` : sources of forget gate
- :math:`o_i` : sources of output gate
It computes the updated cell state ``c`` and the outgoing signal
``h`` as.
.. math::
c &= \\tanh(a_1 + a_2) \\sigma(i_1 + i_2)
+ c_{\\text{prev}1} \\sigma(f_1)
+ c_{\\text{prev}2} \\sigma(f_2), \\\\
h &= \\tanh(c) \\sigma(o_1 + o_2),
where :math:`\\sigma` is the elementwise sigmoid function.
The function returns ``c`` and ``h`` as a tuple.
Args:
c_prev1 (:class:`~chainerx.array`):
Variable that holds the previous cell state of the first child
node. The cell state should be a zero array or the output of
the previous call of LSTM.
c_prev2 (:class:`~chainerx.array`):
Variable that holds the previous cell state of the second child
node.
x1 (:class:`~chainerx.array`):
Variable that holds the sources of cell input, input gate, forget
gate and output gate from the first child node. It must have the
second dimension whose size is four times of that of the cell
state.
x2 (:class:`~chainerx.array`):
Variable that holds the input sources from the second child node.
Returns:
tuple: Two :class:`~chainerx.array` objects ``c`` and ``h``. ``c`` is
the cell state. ``h`` indicates the outgoing signal.
See detail in paper: `Long Short-Term Memory Over Tree Structures
<https://arxiv.org/abs/1503.04881>`_.
.. admonition:: Example
Assuming ``c1``, ``c2`` is the previous cell state of children,
and ``h1``, ``h2`` is the previous outgoing signal from children.
Each of ``c1``, ``c2``, ``h1`` and ``h2`` has ``n_units`` channels.
Most typical preparation of ``x1``, ``x2`` is:
>>> n_units = 100
>>> c1 = chainerx.ones((1, n_units), np.float32)
>>> c2 = chainerx.ones((1, n_units), np.float32)
>>> x1 = chainerx.ones((1, 4 * n_units), chainerx.float32)
>>> x2 = chainerx.ones((1, 4 * n_units), chainerx.float32)
>>> c, h = chainerx.slstm(c1, c2, x1, x2)
""")
_docs.set_doc(
chainerx.arcsin,
"""arcsin(x)
Inverse sine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\arcsin x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.arcsin`
""")
_docs.set_doc(
chainerx.arccos,
"""arccos(x)
Trigonometric inverse cosine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\arccos x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.arccos`
""")
_docs.set_doc(
chainerx.arctan,
"""arctan(x)
Trigonometric inverse tangent, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\arctan x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.arctan`
""")
_docs.set_doc(
chainerx.arctan2,
"""arctan2(x1, x2)
Element-wise arc tangent of :math:`\\frac{x_1}{x_2}` choosing the quadrant
correctly.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returns an array where each element
represents :math:`\\theta` in the range :math:`[-\\pi, \\pi]`, such
that :math:`x_1 = r \\sin(\\theta)` and :math:`x_2 = r \\cos(\\theta)`
for some :math:`r > 0`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x1`` and/or ``x2``.
.. seealso:: :data:`numpy.arctan2`
""")
_docs.set_doc(
chainerx.arcsinh,
"""arcsinh(x)
Inverse hyperbolic sine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\arcsinh x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.arcsinh`
""")
_docs.set_doc(
chainerx.arccosh,
"""arccosh(x)
Inverse hypberbolic inverse cosine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\arccosh x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.arccosh`
""")
_docs.set_doc(
chainerx.fabs,
"""fabs(x)
Compute the absolute values element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: The absolute values of x, the returned values
are always floats.
.. seealso:: :data:`numpy.fabs`
""")
_docs.set_doc(
chainerx.sign,
"""sign(x)
Returns an element-wise indication of the sign of a number.
The sign function returns :math:`-1 if x < 0, 0 if x==0, 1 if x > 0`.
``nan`` is returned for ``nan`` inputs.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: The sign of x.
.. seealso:: :data:`numpy.sign`
""")
_docs.set_doc(
chainerx.floor,
"""floor(x)
Return the floor of the input, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: The floor of each element in array.
.. seealso:: :data:`numpy.floor`
""")
_docs.set_doc(
chainerx.isnan,
"""isnan(x)
Test element-wise for NaN and return result as a boolean array.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: True where ``x`` is NaN, false otherwise
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.isnan`
""")
_docs.set_doc(
chainerx.isfinite,
"""isfinite(x)
Test element-wise for finiteness (not infinity or not Not a Number).
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: True where x is not positive infinity,
negative infinity, or NaN; false otherwise.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.isfinite`
""")
_docs.set_doc(
chainerx.isinf,
"""isinf(x)
Test element-wise for positive or negative infinity.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: True where ``x`` is positive or negative
infinity, false otherwise.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.isinf`
""")
_docs.set_doc(
chainerx.bitwise_and,
"""bitwise_and(x1, x2)
Compute the bit-wise AND of two arrays element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array of integers.
x2 (~chainerx.ndarray or scalar): Input array of integers.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 \\& x_2`
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.bitwise_and`
""")
_docs.set_doc(
chainerx.bitwise_or,
"""bitwise_or(x1, x2)
Compute the bit-wise OR of two arrays element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array of integers.
x2 (~chainerx.ndarray or scalar): Input array of integers.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 | x_2`
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.bitwise_or`
""")
_docs.set_doc(
chainerx.bitwise_xor,
"""bitwise_xor(x1, x2)
Compute the bit-wise XOR of two arrays element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array of integers.
x2 (~chainerx.ndarray or scalar): Input array of integers.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 \\oplus x_2`
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.bitwise_xor`
""")
_docs.set_doc(
chainerx.left_shift,
"""left_shift(x1, x2)
Shift the bits of an integer to the left.
Args:
x1 (~chainerx.ndarray or scalar): Input array of integers.
x2 (~chainerx.ndarray or scalar): Input array of integers.
Returns:
:class:`~chainerx.ndarray`: Return `x1` with bits shifted `x2` times to the left.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.left_shift`
""") # NOQA
_docs.set_doc(
chainerx.right_shift,
"""right_shift(x1, x2)
Shift the bits of an integer to the right.
Args:
x1 (~chainerx.ndarray or scalar): Input array of integers.
x2 (~chainerx.ndarray or scalar): Input array of integers.
Returns:
:class:`~chainerx.ndarray`: Return `x1` with bits shifted `x2` times to the right.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.right_shift`
""") # NOQA
def _docs_sorting():
_docs.set_doc(
chainerx.argmax,
"""argmax(a, axis=None)
Returns the indices of the maximum along an axis.
Args:
a (~chainerx.ndarray): Array to take the indices of the maximum of.
axis (None or int): Along which axis to compute the maximum. The flattened
array is used by default.
Returns:
:class:`~chainerx.ndarray`: The indices of the maximum of ``a``, along the
axis if specified.
.. seealso:: :func:`numpy.argmax`
""")
_docs.set_doc(
chainerx.argmin,
"""argmin(a, axis=None)
Returns the indices of the minimum along an axis.
Args:
a (~chainerx.ndarray): Array to take the indices of the minimum of.
axis (None or int): Along which axis to compute the minimum. The flattened
array is used by default.
Returns:
:class:`~chainerx.ndarray`: The indices of the minimum of ``a``, along the
axis if specified.
.. seealso:: :func:`numpy.argmin`
""")
def _docs_statistics():
_docs.set_doc(
chainerx.amax,
"""amax(a, axis=None, keepdims=False)
Returns the maximum of an array or the maximum along an axis.
Note:
When at least one element is NaN, the corresponding max value will be NaN.
Args:
a (~chainerx.ndarray): Array to take the maximum.
axis (None or int or tuple of ints): Along which axis to take the maximum.
The flattened array is used by default.
If this is a tuple of ints, the maximum is selected over multiple
axes, instead of a single axis or all the axes.
keepdims (bool): If ``True``, the axis is remained as an axis of size one.
Returns:
:class:`~chainerx.ndarray`: The maximum of ``a``, along the axis if
specified.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.amax`
""")
_docs.set_doc(
chainerx.amin,
"""amin(a, axis=None, keepdims=False)
Returns the minimum of an array or the minimum along an axis.
Note:
When at least one element is NaN, the corresponding min value will be NaN.
Args:
a (~chainerx.ndarray): Array to take the minimum.
axis (None or int or tuple of ints): Along which axis to take the minimum.
The flattened array is used by default.
If this is a tuple of ints, the minimum is selected over multiple
axes, instead of a single axis or all the axes.
keepdims (bool): If ``True``, the axis is remained as an axis of size one.
Returns:
:class:`~chainerx.ndarray`: The minimum of ``a``, along the axis if
specified.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.amin`
""")
_docs.set_doc(
chainerx.mean,
"""mean(a, axis=None, keepdims=False)
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over the
flattened array by default, otherwise over the specified axis.
Args:
a (~chainerx.ndarray): Array to take the mean of.
axis (None or int or tuple of ints): Along which axis or axes to compute
the mean. The flattened array is used by default.
keepdims (bool): If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
Returns:
:class:`~chainerx.ndarray`: The mean of ``a``, along the axis or axes if
specified.
.. seealso:: :func:`numpy.mean`
""")
_docs.set_doc(
chainerx.var,
"""var(a, axis=None, keepdims=False)
Compute the arithmetic var along the specified axis.
Returns the var of the array elements. The var is taken over the flattened
array by default, otherwise over the specified axis.
Args:
a (~chainerx.ndarray): Array to take the var of.
axis (None or int or tuple of ints): Along which axis or axes to compute
the var. The flattened array is used by default.
keepdims (bool): If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
Returns:
:class:`~chainerx.ndarray`: The var of ``a``, along the axis or axes if
specified.
.. seealso:: :func:`numpy.var`
""")
def _docs_connection():
_docs.set_doc(
chainerx.conv,
"""conv(x, w, b=None, stride=1, pad=0, cover_all=False)
N-dimensional convolution.
This is an implementation of N-dimensional convolution which is generalized
two-dimensional convolution in ConvNets. It takes three arrays: the
input ``x``, the filter weight ``w`` and the bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`N` is the number of spatial dimensions.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output
channels, respectively.
- :math:`d_1, d_2, ..., d_N` are the size of each axis of the input's
spatial dimensions, respectively.
- :math:`k_1, k_2, ..., k_N` are the size of each axis of the filters,
respectively.
- :math:`l_1, l_2, ..., l_N` are the size of each axis of the output's
spatial dimensions, respectively.
- :math:`p_1, p_2, ..., p_N` are the size of each axis of the spatial
padding size, respectively.
Then the ``conv`` function computes correlations between filters
and patches of size :math:`(k_1, k_2, ..., k_N)` in ``x``.
Note that correlation here is equivalent to the inner product between
expanded tensors.
Patches are extracted at positions shifted by multiples of ``stride`` from
the first position ``(-p_1, -p_2, ..., -p_N)`` for each spatial axis.
Let :math:`(s_1, s_2, ..., s_N)` be the stride of filter application.
Then, the output size :math:`(l_1, l_2, ..., l_N)` is determined by the
following equations:
.. math::
l_n = (d_n + 2p_n - k_n) / s_n + 1 \\ \\ (n = 1, ..., N)
If ``cover_all`` option is ``True``, the filter will cover the all
spatial locations. So, if the last stride of filter does not cover the
end of spatial locations, an additional stride will be applied to the end
part of spatial locations. In this case, the output size is determined by
the following equations:
.. math::
l_n = (d_n + 2p_n - k_n + s_n - 1) / s_n + 1 \\ \\ (n = 1, ..., N)
Args:
x (:class:`~chainerx.ndarray`):
Input array of shape :math:`(n, c_I, d_1, d_2, ..., d_N)`.
w (:class:`~chainerx.ndarray`):
Weight array of shape :math:`(c_O, c_I, k_1, k_2, ..., k_N)`.
b (None or :class:`~chainerx.ndarray`):
One-dimensional bias array with length :math:`c_O` (optional).
stride (:class:`int` or :class:`tuple` of :class:`int` s):
Stride of filter applications :math:`(s_1, s_2, ..., s_N)`.
``stride=s`` is equivalent to ``(s, s, ..., s)``.
pad (:class:`int` or :class:`tuple` of :class:`int` s):
Spatial padding width for input arrays
:math:`(p_1, p_2, ..., p_N)`. ``pad=p`` is equivalent to
``(p, p, ..., p)``.
cover_all (bool): If ``True``, all spatial locations are convoluted
into some output pixels. It may make the output size larger.
`cover_all` needs to be ``False`` if you want to use ``cuda`` backend.
Returns:
~chainerx.ndarray:
Output array of shape :math:`(n, c_O, l_1, l_2, ..., l_N)`.
Note:
In ``cuda`` backend, this function uses cuDNN implementation for its
forward and backward computation.
Note:
In ``cuda`` backend, this function has following limitations yet:
- The ``cover_all=True`` option is not supported yet.
- The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
Note:
During backpropagation, this function propagates the gradient of the
output array to input arrays ``x``, ``w``, and ``b``.
.. seealso:: :func:`chainer.functions.convolution_nd`
.. admonition:: Example
>>> n = 10
>>> c_i, c_o = 3, 1
>>> d1, d2, d3 = 30, 40, 50
>>> k1, k2, k3 = 10, 10, 10
>>> p1, p2, p3 = 5, 5, 5
>>> x = chainerx.random.uniform(0, 1, (n, c_i, d1, d2, d3)).\
astype(np.float32)
>>> x.shape
(10, 3, 30, 40, 50)
>>> w = chainerx.random.uniform(0, 1, (c_o, c_i, k1, k2, k3)).\
astype(np.float32)
>>> w.shape
(1, 3, 10, 10, 10)
>>> b = chainerx.random.uniform(0, 1, (c_o)).astype(np.float32)
>>> b.shape
(1,)
>>> s1, s2, s3 = 2, 4, 6
>>> y = chainerx.conv(x, w, b, stride=(s1, s2, s3),\
pad=(p1, p2, p3))
>>> y.shape
(10, 1, 16, 11, 9)
>>> l1 = int((d1 + 2 * p1 - k1) / s1 + 1)
>>> l2 = int((d2 + 2 * p2 - k2) / s2 + 1)
>>> l3 = int((d3 + 2 * p3 - k3) / s3 + 1)
>>> y.shape == (n, c_o, l1, l2, l3)
True
>>> y = chainerx.conv(x, w, b, stride=(s1, s2, s3),\
pad=(p1, p2, p3), cover_all=True)
>>> y.shape == (n, c_o, l1, l2, l3 + 1)
True
""")
_docs.set_doc(
chainerx.conv_transpose,
"""conv_transpose(x, w, b=None, stride=1, pad=0, outsize=None)
N-dimensional transposed convolution.
This is an implementation of N-dimensional transposed convolution, which is
previously known as **deconvolution** in Chainer.
.. _Deconvolutional Networks: \
://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf
It takes three arrays: the input ``x``, the filter weight ``w``, and the
bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`N` is the number of spatial dimensions.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output
channels, respectively.
- :math:`d_1, d_2, ..., d_N` are the size of each axis of the input's
spatial dimensions, respectively.
- :math:`k_1, k_2, ..., k_N` are the size of each axis of the filters,
respectively.
- :math:`p_1, p_2, ..., p_N` are the size of each axis of the spatial
padding size, respectively.
- :math:`s_1, s_2, ..., s_N` are the stride of each axis of filter
application, respectively.
If ``outsize`` option is ``None``, the output size
:math:`(l_1, l_2, ..., l_N)` is determined by the following equations with
the items in the above list:
.. math::
l_n = s_n (d_n - 1) + k_n - 2 p_n \\ \\ (n = 1, ..., N)
If ``outsize`` option is given, the output size is determined by
``outsize``. In this case, the ``outsize`` :math:`(l_1, l_2, ..., l_N)`
must satisfy the following equations:
.. math::
d_n = \\lfloor (l_n + 2p_n - k_n) / s_n \\rfloor + 1 \\ \\ \
(n = 1, ..., N)
Args:
x (:class:`~chainerx.ndarray`):
Input array of shape :math:`(n, c_I, d_1, d_2, ..., d_N)`.
w (:class:`~chainerx.ndarray`):
Weight array of shape :math:`(c_I, c_O, k_1, k_2, ..., k_N)`.
b (None or :class:`~chainerx.ndarray`):
One-dimensional bias array with length :math:`c_O` (optional).
stride (:class:`int` or :class:`tuple` of :class:`int` s):
Stride of filter applications :math:`(s_1, s_2, ..., s_N)`.
``stride=s`` is equivalent to ``(s, s, ..., s)``.
pad (:class:`int` or :class:`tuple` of :class:`int` s):
Spatial padding width for input arrays
:math:`(p_1, p_2, ..., p_N)`. ``pad=p`` is equivalent to
``(p, p, ..., p)``.
outsize (None or :class:`tuple` of :class:`int` s):
Expected output size of deconvolutional operation. It should be a
tuple of ints :math:`(l_1, l_2, ..., l_N)`. Default value is
``None`` and the outsize is estimated by input size, stride and
pad.
Returns:
~chainerx.ndarray:
Output array of shape :math:`(n, c_O, l_1, l_2, ..., l_N)`.
Note:
During backpropagation, this function propagates the gradient of the
output array to input arrays ``x``, ``w``, and ``b``.
.. seealso:: :func:`chainer.functions.deconvolution_nd`
.. admonition:: Example
**Example1**: the case when ``outsize`` is not given.
>>> n = 10
>>> c_i, c_o = 3, 1
>>> d1, d2, d3 = 5, 10, 15
>>> k1, k2, k3 = 10, 10, 10
>>> p1, p2, p3 = 5, 5, 5
>>> x = chainerx.random.uniform(0, 1, (n, c_i, d1, d2, d3)).\
astype(np.float32)
>>> x.shape
(10, 3, 5, 10, 15)
>>> w = chainerx.random.uniform(0, 1, (c_i, c_o, k1, k2, k3)).\
astype(np.float32)
>>> w.shape
(3, 1, 10, 10, 10)
>>> b = chainerx.random.uniform(0, 1, (c_o)).astype(np.float32)
>>> b.shape
(1,)
>>> s1, s2, s3 = 2, 4, 6
>>> y = chainerx.conv_transpose(x, w, b, stride=(s1, s2, s3), \
pad=(p1, p2, p3))
>>> y.shape
(10, 1, 8, 36, 84)
>>> l1 = s1 * (d1 - 1) + k1 - 2 * p1
>>> l2 = s2 * (d2 - 1) + k2 - 2 * p2
>>> l3 = s3 * (d3 - 1) + k3 - 2 * p3
>>> y.shape == (n, c_o, l1, l2, l3)
True
**Example2**: the case when ``outsize`` is given.
>>> n = 10
>>> c_i, c_o = 3, 1
>>> d1, d2, d3 = 5, 10, 15
>>> k1, k2, k3 = 10, 10, 10
>>> p1, p2, p3 = 5, 5, 5
>>> x = chainerx.array(np.random.uniform(0, 1, (n, c_i, d1, d2, d3)).\
astype(np.float32))
>>> x.shape
(10, 3, 5, 10, 15)
>>> w = chainerx.array(np.random.uniform(0, 1, (c_i, c_o, k1, k2, k3)).\
astype(np.float32))
>>> w.shape
(3, 1, 10, 10, 10)
>>> b = chainerx.array(np.random.uniform(0, 1, (c_o)).astype(np.float32))
>>> b.shape
(1,)
>>> s1, s2, s3 = 2, 4, 6
>>> l1, l2, l3 = 9, 38, 87
>>> d1 == int((l1 + 2 * p1 - k1) / s1) + 1
True
>>> d2 == int((l2 + 2 * p2 - k2) / s2) + 1
True
>>> d3 == int((l3 + 2 * p3 - k3) / s3) + 1
True
>>> y = chainerx.conv_transpose(x, w, b, stride=(s1, s2, s3), \
pad=(p1, p2, p3), outsize=(l1, l2, l3))
>>> y.shape
(10, 1, 9, 38, 87)
>>> y.shape == (n, c_o, l1, l2, l3)
True
""")
_docs.set_doc(
chainerx.linear,
"""linear(x, W, b=None, n_batch_axis=1)
Linear function, or affine transformation.
It accepts two or three arguments: an input minibatch ``x``, a weight
matrix ``W``, and optionally a bias vector ``b``. It computes
.. math:: Y = xW^\\top + b.
Args:
x (~chainerx.ndarray):
Input array, which is a :math:`(s_1, s_2, ..., s_n)`-shaped array.
W (~chainerx.ndarray):
Weight variable of shape :math:`(M, N)`,
where :math:`(N = s_{\\rm n\\_batch\\_axes} * ... * s_n)`.
b (~chainerx.ndarray):
Bias variable (optional) of shape :math:`(M,)`.
n_batch_axes (int):
The number of batch axes. The default is 1. The input variable is
reshaped into (:math:`{\\rm n\\_batch\\_axes} + 1`)-dimensional
tensor. This should be greater than 0.
Returns:
:class:`~chainerx.ndarray`:
Output array with shape of
:math:`(s_1, ..., s_{\\rm n\\_batch\\_axes}, M)`.
Note:
During backpropagation, this function propagates the gradient of the
output array to input arrays ``x``, ``W`` and ``b``.
""")
_docs.set_doc(
chainerx.lstm,
"""lstm(c_prev, x)
Long Short-Term Memory units as an activation function.
This function implements LSTM units with forget gates. Let the previous
cell state ``c_prev`` and the input array ``x``.
First, the input array ``x`` is split into four arrays
:math:`a, i, f, o` of the same shapes along the second axis. It means that
``x`` 's second axis must have 4 times the ``c_prev`` 's second axis.
The split input arrays are corresponding to:
- :math:`a` : sources of cell input
- :math:`i` : sources of input gate
- :math:`f` : sources of forget gate
- :math:`o` : sources of output gate
Second, it computes the updated cell state ``c`` and the outgoing signal
``h`` as
.. math::
c &= \\tanh(a) \\sigma(i)
+ c_{\\text{prev}} \\sigma(f), \\\\
h &= \\tanh(c) \\sigma(o),
where :math:`\\sigma` is the elementwise sigmoid function.
These are returned as a tuple of two variables.
This function supports variable length inputs. The mini-batch size of
the current input must be equal to or smaller than that of the previous
one. When mini-batch size of ``x`` is smaller than that of ``c``, this
function only updates ``c[0:len(x)]`` and doesn't change the rest of ``c``,
``c[len(x):]``. So,
please sort input sequences in descending order of lengths before
applying the function.
Args:
c_prev (:class:`~chainerx.array`):
Variable that holds the previous cell state. The cell state
should be a zero array or the output of the previous call of LSTM.
x (:class:`~chainer.array`):
Variable that holds the sources of cell input, input gate, forget
gate and output gate. It must have the second dimension whose size
is four times of that of the cell state.
Returns:
tuple: Two :class:`~chainerx.array` objects ``c`` and ``h``.
``c`` is the updated cell state. ``h`` indicates the outgoing signal.
See the original paper proposing LSTM with forget gates:
`Long Short-Term Memory in Recurrent Neural Networks
<http://www.felixgers.de/papers/phd.pdf>`_.
.. admonition:: Example
Assuming ``y`` is the current incoming signal, ``c`` is the previous
cell state, and ``h`` is the previous outgoing signal from an ``lstm``
function. Each of ``y``, ``c`` and ``h`` has ``n_units`` channels.
Most typical preparation of ``x`` is
>>> n_units = 100
>>> c_prev = chainerx.zeros((1, n_units), chainerx.float32)
>>> x = chainerx.zeros((1, 4 * n_units), chainerx.float32)
>>> c, h = chainerx.lstm(c_prev, x)
It corresponds to calculate the input array ``x``, or the input
sources :math:`a, i, f, o`, from the current incoming signal ``y`` and
the previous outgoing signal ``h``. Different parameters are used for
different kind of input sources.
""")
def _docs_normalization():
_docs.set_doc(
chainerx.batch_norm,
"""batch_norm(x, gamma, beta, running_mean, running_var, eps=2e-5, \
decay=0.9, axis=None)
Batch normalization function.
It takes the input array ``x`` and two parameter arrays ``gamma`` and
``beta``. The parameter arrays must both have the same size.
Args:
x (~chainerx.ndarray): Input array.
gamma (~chainerx.ndarray): Scaling parameter of normalized data.
beta (~chainerx.ndarray): Shifting parameter of scaled normalized data.
running_mean (~chainerx.ndarray):
Running average of the mean. This is a running average of
the mean over several mini-batches using the decay parameter.
The function takes a previous running average, and updates
the array in-place by the new running average.
running_var (~chainerx.ndarray):
Running average of the variance. This is a running average of
the variance over several mini-batches using the decay parameter.
The function takes a previous running average, and updates
the array in-place by the new running average.
eps (float): Epsilon value for numerical stability.
decay (float): Decay rate of moving average. It is used during training.
axis (int, tuple of int or None):
Axis over which normalization is performed. When axis is ``None``,
the first axis is treated as the batch axis and will be reduced
during normalization.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x``, ``gamma`` and ``beta``.
See: `Batch Normalization: Accelerating Deep Network Training by Reducing\
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`_
""")
_docs.set_doc(
chainerx.fixed_batch_norm,
"""fixed_batch_norm(x, gamma, beta, mean, var, eps=2e-5, axis=None)
Batch normalization function with fixed statistics.
This is a variant of :func:`~chainerx.batch_norm`, where the mean
and array statistics are given by the caller as fixed variables.
Args:
x (~chainerx.ndarray): Input array.
gamma (~chainerx.ndarray): Scaling parameter of normalized data.
beta (~chainerx.ndarray): Shifting parameter of scaled normalized data.
mean (~chainerx.ndarray): Shifting parameter of input.
var (~chainerx.ndarray): Square of scaling parameter of input.
eps (float): Epsilon value for numerical stability.
axis (int, tuple of int or None):
Axis over which normalization is performed. When axis is ``None``,
the first axis is treated as the batch axis and will be reduced
during normalization.
Note:
During backpropagation, this function does not propagate gradients.
""")
def _docs_pooling():
_docs.set_doc(
chainerx.max_pool,
"""max_pool(x, ksize, stride=None, pad=0, cover_all=False)
Spatial max pooling function.
This acts similarly to :func:`~chainerx.conv`, but it computes the maximum
of input spatial patch for each channel without any parameter instead of
computing the inner products.
Args:
x (~chainerx.ndarray): Input array.
ksize (int or tuple of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s, s, ..., s)`` are equivalent. If
``None`` is specified, then it uses same stride as the pooling
window size.
pad (int or tuple of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
cover_all (bool): If ``True``, all spatial locations are pooled into
some output pixels. It may make the output size larger.
Returns:
:class:`~chainerx.ndarray`: Output array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``. This function is only
differentiable up to the second order.
.. note::
In ``cuda`` backend, only 2 and 3 dim arrays are supported as ``x``
because cuDNN pooling supports 2 and 3 spatial dimensions.
""")
_docs.set_doc(
chainerx.average_pool,
"""average_pool(x, ksize, stride=None, pad=0, pad_mode='ignore')
Spatial average pooling function.
This acts similarly to :func:`~chainerx.conv`, but it computes the average
of input spatial patch for each channel without any parameter instead of
computing the inner products.
Args:
x (~chainerx.ndarray): Input array.
ksize (int or tuple of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s, s, ..., s)`` are equivalent. If
``None`` is specified, then it uses same stride as the pooling
window size.
pad (int or tuple of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
pad_mode ({'zero', 'ignore'}): Specifies how padded region is treated.
* 'zero' -- the values in the padded region are treated as 0
* 'ignore' -- padded region is ignored (default)
Returns:
:class:`~chainerx.ndarray`: Output array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. note::
In ``cuda`` backend, only 2 and 3 dim arrays are supported as ``x``
because cuDNN pooling supports 2 and 3 spatial dimensions.
""")
def _docs_rnn():
_docs.set_doc(
chainerx.n_step_lstm,
"""n_step_lstm(n_layers, hx, cx, ws, bs, xs)
Stacked Uni-directional Long Short-Term Memory function.
This function calculates stacked Uni-directional LSTM with sequences.
This function gets an initial hidden state :math:`h_0`, an initial cell
state :math:`c_0`, an input sequence :math:`x`, weight matrices :math:`W`,
and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
i_t &= \\sigma(W_0 x_t + W_4 h_{t-1} + b_0 + b_4) \\\\
f_t &= \\sigma(W_1 x_t + W_5 h_{t-1} + b_1 + b_5) \\\\
o_t &= \\sigma(W_2 x_t + W_6 h_{t-1} + b_2 + b_6) \\\\
a_t &= \\tanh(W_3 x_t + W_7 h_{t-1} + b_3 + b_7) \\\\
c_t &= f_t \\cdot c_{t-1} + i_t \\cdot a_t \\\\
h_t &= o_t \\cdot \\tanh(c_t)
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Eight weight matrices and eight bias vectors are
required for each layer. So, when :math:`S` layers exist, you need to
prepare :math:`8S` weight matrices and :math:`8S` bias vectors.
If the number of layers ``n_layers`` is greater than :math:`1`, the input
of the ``k``-th layer is the hidden state ``h_t`` of the ``k-1``-th layer.
Note that all input variables except the first layer may have different
shape from the first layer.
Args:
n_layers(int): The number of layers.
hx (:class:`~chainerx.array`):
Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is the number of layers and
is equal to ``n_layers``, ``B`` is the mini-batch size, and ``N``
is the dimension of the hidden units.
cx (:class:`~chainerx.array`): Variable holding stacked cell states.
It has the same shape as ``hx``.
ws (list of list of :class:`~chainerx.array`): Weight matrices.
``ws[i]`` represents the weights for the i-th layer.
Each ``ws[i]`` is a list containing eight matrices.
``ws[i][j]`` corresponds to :math:`W_j` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 4`` are ``(N, I)``-shaped as
they are multiplied with input variables, where ``I`` is the size
of the input and ``N`` is the dimension of the hidden units. All
other matrices are ``(N, N)``-shaped.
bs (list of list of :class:`~chainerx.array`): Bias vectors.
``bs[i]`` represents the biases for the i-th layer.
Each ``bs[i]`` is a list containing eight vectors.
``bs[i][j]`` corresponds to :math:`b_j` in the equation.
The shape of each matrix is ``(N,)`` where ``N`` is the dimension
of the hidden units.
xs (list of :class:`~chainerx.array`):
A list of :class:`~chainerx.array`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is the
mini-batch size for time ``t``.
When sequences has different lengths, they must be
sorted in descending order of their lengths.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing three elements,
``hy``, ``cy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is the same as
``hx``.
- ``cy`` is an updated cell states whose shape is the same as
``cx``.
- ``ys`` is a list of :class:`~chainerx.array` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
the mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
.. note::
The dimension of hidden units is limited to only one size ``N``. If you
want to use variable dimension of hidden units, please use
:class:`chainerx.lstm`.
.. seealso::
:func:`chainerx.lstm`
.. admonition:: Example
>>> import chainerx as chx
>>> batchs = [3, 2, 1] # support variable length sequences
>>> in_size, out_size, n_layers = 3, 2, 2
>>> xs = [chx.ones((b, in_size)).astype(chx.float32) for b in batchs]
>>> [x.shape for x in xs]
[(3, 3), (2, 3), (1, 3)]
>>> h_shape = (n_layers, batchs[0], out_size)
>>> hx = chx.ones(h_shape).astype(chx.float32)
>>> cx = chx.ones(h_shape).astype(chx.float32)
>>> w_in = lambda i, j: in_size if i == 0 and j < 4 else out_size
>>> ws = []
>>> bs = []
>>> for n in range(n_layers):
... ws.append([chx.ones((out_size, w_in(n, i))).\
astype(np.float32) for i in range(8)])
... bs.append([chx.ones((out_size,)).astype(chx.float32) \
for _ in range(8)])
...
>>> ws[0][0].shape # ws[0][:4].shape are (out_size, in_size)
(2, 3)
>>> ws[1][0].shape # others are (out_size, out_size)
(2, 2)
>>> bs[0][0].shape
(2,)
>>> hy, cy, ys = chx.n_step_lstm(
... n_layers, hx, cx, ws, bs, xs)
>>> hy.shape
(2, 3, 2)
>>> cy.shape
(2, 3, 2)
>>> [y.shape for y in ys]
[(3, 2), (2, 2), (1, 2)]
""")
_docs.set_doc(
chainerx.n_step_bilstm,
"""n_step_bilstm(n_layers, hx, cx, ws, bs, xs)
Stacked Bi-directional Long Short-Term Memory function.
This function calculates stacked Bi-directional LSTM with sequences.
This function gets an initial hidden state :math:`h_0`, an initial cell
state :math:`c_0`, an input sequence :math:`x`, weight matrices :math:`W`,
and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
i^{f}_t &=& \\sigma(W^{f}_0 x_t + W^{f}_4 h_{t-1} + b^{f}_0 + b^{f}_4),
\\\\
f^{f}_t &=& \\sigma(W^{f}_1 x_t + W^{f}_5 h_{t-1} + b^{f}_1 + b^{f}_5),
\\\\
o^{f}_t &=& \\sigma(W^{f}_2 x_t + W^{f}_6 h_{t-1} + b^{f}_2 + b^{f}_6),
\\\\
a^{f}_t &=& \\tanh(W^{f}_3 x_t + W^{f}_7 h_{t-1} + b^{f}_3 + b^{f}_7),
\\\\
c^{f}_t &=& f^{f}_t \\cdot c^{f}_{t-1} + i^{f}_t \\cdot a^{f}_t,
\\\\
h^{f}_t &=& o^{f}_t \\cdot \\tanh(c^{f}_t),
\\\\
i^{b}_t &=& \\sigma(W^{b}_0 x_t + W^{b}_4 h_{t-1} + b^{b}_0 + b^{b}_4),
\\\\
f^{b}_t &=& \\sigma(W^{b}_1 x_t + W^{b}_5 h_{t-1} + b^{b}_1 + b^{b}_5),
\\\\
o^{b}_t &=& \\sigma(W^{b}_2 x_t + W^{b}_6 h_{t-1} + b^{b}_2 + b^{b}_6),
\\\\
a^{b}_t &=& \\tanh(W^{b}_3 x_t + W^{b}_7 h_{t-1} + b^{b}_3 + b^{b}_7),
\\\\
c^{b}_t &=& f^{b}_t \\cdot c^{b}_{t-1} + i^{b}_t \\cdot a^{b}_t, \\\\
h^{b}_t &=& o^{b}_t \\cdot \\tanh(c^{b}_t), \\\\
h_t &=& [h^{f}_t; h^{b}_t]
where :math:`W^{f}` is the weight matrices for forward-LSTM, :math:`W^{b}`
is weight matrices for backward-LSTM.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Eight weight matrices and eight bias vectors are
required for each layer of each direction. So, when :math:`S` layers
exist, you need to prepare :math:`16S` weight matrices and :math:`16S`
bias vectors.
If the number of layers ``n_layers`` is greater than :math:`1`, the input
of the ``k``-th layer is the hidden state ``h_t`` of the ``k-1``-th layer.
Note that all input variables except the first layer may have different
shape from the first layer.
Args:
n_layers(int): The number of layers.
hx (:class:`~chainerx.array`):
Variable holding stacked hidden states.
Its shape is ``(2S, B, N)`` where ``S`` is the number of layers and
is equal to ``n_layers``, ``B`` is the mini-batch size, and ``N``
is the dimension of the hidden units. Because of bi-direction, the
first dimension length is ``2S``.
cx (:class:`~chainerx.array`): Variable holding stacked cell states.
It has the same shape as ``hx``.
ws (list of list of :class:`~chainerx.array`): Weight matrices.
``ws[2 * l + m]`` represents the weights for the l-th layer of
the m-th direction. (``m == 0`` means the forward direction and
``m == 1`` means the backward direction.) Each ``ws[i]`` is a
list containing eight matrices. ``ws[i][j]`` corresponds to
:math:`W_j` in the equation. ``ws[0][j]`` and ``ws[1][j]`` where
``0 <= j < 4`` are ``(N, I)``-shaped because they are multiplied
with input variables, where ``I`` is the size of the input.
``ws[i][j]`` where ``2 <= i`` and ``0 <= j < 4`` are
``(N, 2N)``-shaped because they are multiplied with two hidden
layers :math:`h_t = [h^{f}_t; h^{b}_t]`. All other matrices are
``(N, N)``-shaped.
bs (list of list of :class:`~chainerx.array`): Bias vectors.
``bs[2 * l + m]`` represents the weights for the l-th layer of
m-th direction. (``m == 0`` means the forward direction and
``m == 1`` means the backward direction.)
Each ``bs[i]`` is a list containing eight vectors.
``bs[i][j]`` corresponds to :math:`b_j` in the equation.
The shape of each matrix is ``(N,)``.
xs (list of :class:`~chainerx.array`):
A list of :class:`~chainerx.array`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is the
mini-batch size for time ``t``.
When sequences has different lengths, they must be
sorted in descending order of their lengths.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing three elements,
``hy``, ``cy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is the same as
``hx``.
- ``cy`` is an updated cell states whose shape is the same as
``cx``.
- ``ys`` is a list of :class:`~chainer.array` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, 2N)`` where ``B_t``
is the mini-batch size for time ``t``, and ``N`` is size of
hidden units. Note that ``B_t`` is the same value as ``xs[t]``.
.. admonition:: Example
>>> import chainerx as chx
>>> batchs = [3, 2, 1] # support variable length sequences
>>> in_size, out_size, n_layers = 3, 2, 2
>>> dropout_ratio = 0.0
>>> xs = [chx.ones((b, in_size)).astype(chx.float32) for b in batchs]
>>> [x.shape for x in xs]
[(3, 3), (2, 3), (1, 3)]
>>> h_shape = (n_layers * 2, batchs[0], out_size)
>>> hx = chx.ones(h_shape).astype(chx.float32)
>>> cx = chx.ones(h_shape).astype(chx.float32)
>>> def w_in(i, j):
... if i == 0 and j < 4:
... return in_size
... elif i > 0 and j < 4:
... return out_size * 2
... else:
... return out_size
...
>>> ws = []
>>> bs = []
>>> for n in range(n_layers):
... for direction in (0, 1):
... ws.append([chx.ones((out_size, w_in(n, i))).\
astype(np.float32) for i in range(8)])
... bs.append([chx.ones((out_size,)).astype(chx.float32) \
for _ in range(8)])
...
>>> ws[0][0].shape # ws[0:2][:4].shape are (out_size, in_size)
(2, 3)
>>> ws[2][0].shape # ws[2:][:4].shape are (out_size, 2 * out_size)
(2, 4)
>>> ws[0][4].shape # others are (out_size, out_size)
(2, 2)
>>> bs[0][0].shape
(2,)
>>> hy, cy, ys = chx.n_step_bilstm(
... n_layers, hx, cx, ws, bs, xs)
>>> hy.shape
(4, 3, 2)
>>> cy.shape
(4, 3, 2)
>>> [y.shape for y in ys]
[(3, 4), (2, 4), (1, 4)]
""")
_docs.set_doc(
chainerx.n_step_gru,
"""n_step_gru(n_layers, hx, ws, bs, xs)
Stacked Uni-directional Gated Recurrent Unit function.
This function calculates stacked Uni-directional GRU with sequences.
This function gets an initial hidden state :math:`h_0`, an input
sequence :math:`x`, weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` for each time :math:`t`
from input :math:`x_t`.
.. math::
r_t &= \\sigma(W_0 x_t + W_3 h_{t-1} + b_0 + b_3) \\\\
z_t &= \\sigma(W_1 x_t + W_4 h_{t-1} + b_1 + b_4) \\\\
h'_t &= \\tanh(W_2 x_t + b_2 + r_t \\cdot (W_5 h_{t-1} + b_5)) \\\\
h_t &= (1 - z_t) \\cdot h'_t + z_t \\cdot h_{t-1}
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Six weight matrices and six bias vectors are
required for each layers. So, when :math:`S` layers exists, you need to
prepare :math:`6S` weight matrices and :math:`6S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
Args:
n_layers(int): Number of layers.
hx (~chainerx.array):
Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units.
ws (list of list of :class:`~chainerx.array`): Weight matrices.
``ws[i]`` represents weights for i-th layer.
Each ``ws[i]`` is a list containing six matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 3`` is ``(N, I)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of :class:`~chainerx.array`): Bias vectors.
``bs[i]`` represnents biases for i-th layer.
Each ``bs[i]`` is a list containing six vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of :class:`~chainerx.array`):
A list of :class:`~chainerx.array`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing two elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainerx.array` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``
""")
_docs.set_doc(
chainerx.n_step_bigru,
"""n_step_bigru(n_layers, hx, ws, bs, xs)
Stacked Bi-directional Gated Recurrent Unit function.
This function calculates stacked Bi-directional GRU with sequences.
This function gets an initial hidden state :math:`h_0`, an input
sequence :math:`x`, weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` for each time :math:`t`
from input :math:`x_t`.
.. math::
r^{f}_t &= \\sigma(W^{f}_0 x_t + W^{f}_3 h_{t-1} + b^{f}_0 + b^{f}_3)
\\\\
z^{f}_t &= \\sigma(W^{f}_1 x_t + W^{f}_4 h_{t-1} + b^{f}_1 + b^{f}_4)
\\\\
h^{f'}_t &= \\tanh(W^{f}_2 x_t + b^{f}_2 + r^{f}_t \\cdot (W^{f}_5
h_{t-1} + b^{f}_5)) \\\\
h^{f}_t &= (1 - z^{f}_t) \\cdot h^{f'}_t + z^{f}_t \\cdot h_{t-1}
\\\\
r^{b}_t &= \\sigma(W^{b}_0 x_t + W^{b}_3 h_{t-1} + b^{b}_0 + b^{b}_3)
\\\\
z^{b}_t &= \\sigma(W^{b}_1 x_t + W^{b}_4 h_{t-1} + b^{b}_1 + b^{b}_4)
\\\\
h^{b'}_t &= \\tanh(W^{b}_2 x_t + b^{b}_2 + r^{b}_t \\cdot (W^{b}_5
h_{t-1} + b^{b}_5)) \\\\
h^{b}_t &= (1 - z^{b}_t) \\cdot h^{b'}_t + z^{b}_t \\cdot h_{t-1}
\\\\
h_t &= [h^{f}_t; h^{b}_t] \\\\
where :math:`W^{f}` is weight matrices for forward-GRU, :math:`W^{b}` is
weight matrices for backward-GRU.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Six weight matrices and six bias vectors are
required for each layers. So, when :math:`S` layers exists, you need to
prepare :math:`6S` weight matrices and :math:`6S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
Args:
n_layers(int): Number of layers.
hx (:class:`~chainerx.array`):
Variable holding stacked hidden states.
Its shape is ``(2S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units.
ws (list of list of :class:`~chainerx.array`): Weight matrices.
``ws[i]`` represents weights for i-th layer.
Each ``ws[i]`` is a list containing six matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 3`` is ``(N, I)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of :class:`~chainerx.array`): Bias vectors.
``bs[i]`` represnents biases for i-th layer.
Each ``bs[i]`` is a list containing six vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of :class:`~chainerx.array`):
A list of :class:`~chainerx.array` holding input values.
Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing two elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainerx.array` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
""")
_docs.set_doc(
chainerx.n_step_rnn,
"""n_step_rnn(n_layers, hx, ws, bs, xs, activation='tanh')
Stacked Uni-directional RNN function for sequence inputs.
This function calculates stacked Uni-directional RNN with sequences.
This function gets an initial hidden state :math:`h_0`,
an initial cell state :math:`c_0`, an input sequence :math:`x`,
weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
h_t = f(W_0 x_t + W_1 h_{t-1} + b_0 + b_1)
where :math:`f` is an activation function.
Weight matrices :math:`W` contains two matrices :math:`W_0` and
:math:`W_1`. :math:`W_0` is a parameter for an input sequence.
:math:`W_1` is a parameter for a hidden state.
Bias matrices :math:`b` contains two matrices :math:`b_0` and :math:`b_1`.
:math:`b_0` is a parameter for an input sequence.
:math:`b_1` is a parameter for a hidden state.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Two weight matrices and two bias vectors are
required for each layer. So, when :math:`S` layers exist, you need to
prepare :math:`2S` weight matrices and :math:`2S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
Args:
n_layers(int): Number of layers.
hx (:class:`~chainerx.array`):
Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units.
ws (list of list of :class:`~chainerx.array`): Weight matrices.
``ws[i]`` represents weights for i-th layer.
Each ``ws[i]`` is a list containing two matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 1`` is ``(N, I)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of :class:`~chainerx.array`): Bias vectors.
``bs[i]`` represnents biases for i-th layer.
Each ``bs[i]`` is a list containing two vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of :class:`~chainerx.array`):
A list of :class:`~chainerx.array` holding input values.
Each element ``xs[t]`` holds input value for time ``t``.
Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
activation (str): Activation function name.
Please select ``tanh`` or ``relu``.
Returns:
tuple: This function returns a tuple containing two elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainerx.array` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
""")
_docs.set_doc(
chainerx.n_step_birnn,
"""n_step_birnn(n_layers, hx, ws, bs, xs, activation='tanh')
Stacked Bi-directional RNN function for sequence inputs.
This function calculates stacked Bi-directional RNN with sequences.
This function gets an initial hidden state :math:`h_0`, an initial
cell state :math:`c_0`, an input sequence :math:`x`,
weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
h^{f}_t &=& f(W^{f}_0 x_t + W^{f}_1 h_{t-1} + b^{f}_0 + b^{f}_1), \\\\
h^{b}_t &=& f(W^{b}_0 x_t + W^{b}_1 h_{t-1} + b^{b}_0 + b^{b}_1), \\\\
h_t &=& [h^{f}_t; h^{f}_t], \\\\
where :math:`f` is an activation function.
Weight matrices :math:`W` contains two matrices :math:`W^{f}` and
:math:`W^{b}`. :math:`W^{f}` is weight matrices for forward directional
RNN. :math:`W^{b}` is weight matrices for backward directional RNN.
:math:`W^{f}` contains :math:`W^{f}_0` for an input sequence and
:math:`W^{f}_1` for a hidden state.
:math:`W^{b}` contains :math:`W^{b}_0` for an input sequence and
:math:`W^{b}_1` for a hidden state.
Bias matrices :math:`b` contains two matrices :math:`b^{f}` and
:math:`b^{f}`. :math:`b^{f}` contains :math:`b^{f}_0` for an input sequence
and :math:`b^{f}_1` for a hidden state.
:math:`b^{b}` contains :math:`b^{b}_0` for an input sequence and
:math:`b^{b}_1` for a hidden state.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Two weight matrices and two bias vectors are
required for each layer. So, when :math:`S` layers exist, you need to
prepare :math:`2S` weight matrices and :math:`2S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
Args:
n_layers(int): Number of layers.
hx (:class:`~chainerx.array`):
Variable holding stacked hidden states.
Its shape is ``(2S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units. Because of bi-direction, the
first dimension length is ``2S``.
ws (list of list of :class:`~chainerx.array`): Weight matrices.
``ws[i + di]`` represents weights for i-th layer.
Note that ``di = 0`` for forward-RNN and ``di = 1`` for
backward-RNN.
Each ``ws[i + di]`` is a list containing two matrices.
``ws[i + di][j]`` is corresponding with ``W^{f}_j`` if ``di = 0``
and corresponding with ``W^{b}_j`` if ``di = 1`` in the equation.
Only ``ws[0][j]`` and ``ws[1][j]`` where ``0 <= j < 1`` are
``(I, N)`` shape as they are multiplied with input variables.
All other matrices has ``(N, N)`` shape.
bs (list of list of :class:`~chainerx.array`): Bias vectors.
``bs[i + di]`` represnents biases for i-th layer.
Note that ``di = 0`` for forward-RNN and ``di = 1`` for
backward-RNN.
Each ``bs[i + di]`` is a list containing two vectors.
``bs[i + di][j]`` is corresponding with ``b^{f}_j`` if ``di = 0``
and corresponding with ``b^{b}_j`` if ``di = 1`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of :class:`~chainerx.array`):
A list of :class:`~chainerx.array` holding input values.
Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
activation (str): Activation function name.
Please select ``tanh`` or ``relu``.
Returns:
tuple: This function returns a tuple containing two elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainerx.array` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t``
is mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
""")
|
mit
| 5,753,944,707,664,617,000
| 31.275539
| 86
| 0.637194
| false
| 3.433753
| false
| false
| false
|
jeremiedecock/snippets
|
python/pygtk/python_gtk3_pygobject/tree_view_cellrender_text_ellipsize.py
|
1
|
2818
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
This is a simple Python GTK+3 TreeView CellRenderText snippet.
See: http://python-gtk-3-tutorial.readthedocs.org/en/latest/cellrenderers.html#cellrenderertext
"""
from gi.repository import Gtk as gtk
from gi.repository import Pango as pango
# Countries, population (as in 2015) and continent.
DATA_LIST = [("China", 1370130000, "Asia"),
("India", 1271980000, "Asia"),
("United States", 321107000, "North America"),
("Indonesia", 255461700, "Asia"),
("Brazil", 204388000, "South America"),
("Pakistan", 189936000, "Asia"),
("Nigeria", 183523000, "Africa"),
("Bangladesh", 158425000, "Asia"),
("Russia", 146267288, "Eurasia"),
("Japan", 126880000, "Asia")]
def main():
window = gtk.Window()
window.set_default_size(300, 450)
window.set_border_width(18)
# Creating the ListStore model
liststore = gtk.ListStore(str, int, str)
for item in DATA_LIST:
liststore.append(list(item))
# Creating the treeview and add the columns
treeview = gtk.TreeView(liststore)
for column_index, column_title in enumerate(["Country", "Population", "Continent"]):
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn(column_title, renderer, text=column_index)
column.set_resizable(True) # Let the column be resizable
# Use ellipsize for the "Population" and "Continent" columns
if column_title in ("Population", "Continent"):
renderer.set_property("ellipsize", pango.EllipsizeMode.END)
renderer.set_property("ellipsize-set", True)
if column_title == "Population":
column.set_expand(True) # This column will use all the space left
treeview.append_column(column)
# Scrolled window
scrolled_window = gtk.ScrolledWindow()
scrolled_window.set_border_width(0)
scrolled_window.set_shadow_type(gtk.ShadowType.IN) # should be gtk.ShadowType.IN, gtk.ShadowType.OUT, gtk.ShadowType.ETCHED_IN or gtk.ShadowType.ETCHED_OUT
scrolled_window.set_policy(gtk.PolicyType.AUTOMATIC, gtk.PolicyType.ALWAYS) # should be gtk.PolicyType.AUTOMATIC, gtk.PolicyType.ALWAYS or gtk.PolicyType.NEVER
scrolled_window.add(treeview)
window.add(scrolled_window)
window.connect("delete-event", gtk.main_quit) # ask to quit the application when the close button is clicked
window.show_all() # display the window
gtk.main() # GTK+ main loop
if __name__ == '__main__':
main()
|
mit
| -8,133,782,412,095,077,000
| 39.228571
| 188
| 0.620384
| false
| 3.537688
| false
| false
| false
|
ikosenn/sms-log-handler
|
sms_log_handler/sms_handler.py
|
1
|
2049
|
import datetime
import logging
from typing import Dict
from .utils import import_from_string
class SMSHandler(logging.Handler):
def __init__(self, provider_config: Dict) -> None:
"""
Initializes the SMSHandler
params:
provider_config: The provider configurations.
{
provider_key: <key_id>
provider_secret: <secret_key>
provider_send_to: [<an array of phone numbers>]
}
"""
super().__init__(self)
self.provider_class_str = provider_config.get(
'provider_class',
'sms_log_handler.providers.africastalking.AfricasTalkingProvider')
self.provider_class = import_from_string(self.provider_class_str)
self.key = provider_config.get('provider_key', '')
self.secret = provider_config.get('provider_secret', '')
self.phone_numbers = provider_config.get('provider_send_to', [])
def emit(self, record) -> None:
"""
Sends the message
"""
to_send = self._construct_message(record)
sms_provider = self.provider_class(self.key, self.secret)
sms_provider.send(self.phone_numbers, to_send)
def _construct_message(self, record) -> str:
"""
Contruct and format the mesage to be sent.
i.e
MODULE: sms_log_handler.sms_handler
LEVEL: ERROR
TIME: 21, May 2017 10:54
MESSAGE: Duplicate records found in the user model
"""
msg = (
'MODULE: {module_path}\n\nLEVEL: {level}\n\nTIME: {time}\n\n'
'MESSAGE: {msg}')
date_time = datetime.datetime.fromtimestamp(record.created)
date_time = date_time.strftime('%d, %b %Y %H:%M')
formatted_msg = msg.format(
level=record.levelname, time=date_time, msg=record.getMessage(),
module_path=record.name, line_no=record.lineno)
return formatted_msg
|
mit
| 6,605,223,552,672,128,000
| 33.728814
| 79
| 0.564178
| false
| 4.04142
| true
| false
| false
|
QLRace/minqlx-plugins
|
spec_delay.py
|
1
|
2290
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
"""
Stops people spectating then quickly joining the 'free' team.
This is to stop people firing a rocket, then spectating and joining then
using the knockback from the rocket which would count as a strafe time.
"""
import minqlx
class spec_delay(minqlx.Plugin):
def __init__(self):
super().__init__()
self.add_hook("player_disconnect", self.handle_player_disconnect)
self.add_hook("team_switch_attempt", self.handle_team_switch_attempt)
self.add_hook("team_switch", self.handle_team_switch)
self.spec_delays = set()
def handle_player_disconnect(self, player, reason):
"""Sets spec delay when a player disconnects."""
self.spec_delays.add(player.steam_id)
self.allow_join(player)
def handle_team_switch_attempt(self, player, old_team, new_team):
"""Stops the player joining if spec delay is true."""
if new_team != "spectator" and old_team == "spectator" and player.steam_id in self.spec_delays:
player.tell("^6You must wait 15 seconds before joining after spectating")
return minqlx.RET_STOP_EVENT
def handle_team_switch(self, player, old_team, new_team):
"""Sets a delay on joining when the player joins spectator"""
if new_team == "spectator" and old_team == "free":
# Set spec delay
self.spec_delays.add(player.steam_id)
self.allow_join(player)
# This is only needed to stop \team s; team f
elif new_team == "free" and old_team == "spectator" and player.steam_id in self.spec_delays:
player.tell("^6You must wait 15 seconds before joining after spectating")
return minqlx.RET_STOP_EVENT
@minqlx.delay(15.1)
def allow_join(self, player):
"""Allows the player to join after 15.1 seconds."""
try:
self.spec_delays.remove(player.steam_id)
player.center_print("^6You can join now")
except KeyError:
return
except AttributeError:
return
|
gpl-3.0
| 6,870,827,062,680,719,000
| 41.407407
| 103
| 0.653275
| false
| 3.760263
| false
| false
| false
|
cmancone/mygrations
|
tests/formats/mysql/definitions/test_database.py
|
1
|
3304
|
import unittest
from mygrations.formats.mysql.file_reader.database import database as database_reader
from mygrations.formats.mysql.file_reader.create_parser import create_parser
class test_database(unittest.TestCase):
def _get_sample_db(self):
strings = [
"""
CREATE TABLE `logs` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`message` TEXT NOT NULL,
`traceback` text,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
""", """
CREATE TABLE `more_logs` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`more_messages` TEXT NOT NULL,
`traceback` text,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
]
return database_reader(strings)
def test_simple(self):
db1 = self._get_sample_db()
strings = [
"""
CREATE TABLE `logs` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`message` TEXT NOT NULL,
`traceback` text,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
""", """
CREATE TABLE `less_logs` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`more_messages` TEXT NOT NULL,
`traceback` text,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
]
db2 = database_reader(strings)
#differences = db2 - db1
#self.assertEquals( [], differences )
def test_add_table(self):
db = self._get_sample_db()
new_table = create_parser()
new_table.parse(
"""CREATE TABLE `log_changes` (
`id` INT(10) UNSIGNED NOT NULL AUTO_INCREMENT,
`log_id` INT(10) UNSIGNED NOT NULL,
`type_id` INT(10) UNSIGNED NOT NULL,
`change` VARCHAR(255),
PRIMARY KEY (id),
KEY `log_changes_log_id` (`log_id`),
KEY `log_changes_type_id` (`type_id`)
);
"""
)
db.add_table(new_table)
self.assertEquals(3, len(db.tables))
self.assertTrue('log_changes' in db.tables)
self.assertEquals(new_table, db.tables['log_changes'])
def test_remove_table(self):
db1 = self._get_sample_db()
db1.remove_table(db1.tables['more_logs'])
self.assertEquals(1, len(db1.tables))
self.assertTrue('logs' in db1.tables)
self.assertFalse('more_logs' in db1.tables)
def test_exception_on_remove_invalid_table(self):
db1 = self._get_sample_db()
new_table = create_parser()
new_table.parse(
"""CREATE TABLE `log_changes` (
`id` INT(10) UNSIGNED NOT NULL AUTO_INCREMENT,
`log_id` INT(10) UNSIGNED NOT NULL,
`type_id` INT(10) UNSIGNED NOT NULL,
`change` VARCHAR(255),
PRIMARY KEY (id),
KEY `log_changes_log_id` (`log_id`),
KEY `log_changes_type_id` (`type_id`)
);
"""
)
with self.assertRaises(ValueError):
db1.remove_table(new_table)
|
mit
| 6,008,796,254,061,185,000
| 31.07767
| 85
| 0.521186
| false
| 3.882491
| true
| false
| false
|
Tsumiki-Chan/Neko-Chan
|
commands/purge.py
|
1
|
1524
|
from functions import search, logger
DESC = "Delete x messages"
USAGE="purge [*amount*] [*user* `optional`]"
async def init(bot):
chat=bot.message.channel
try:
if len(bot.args) == 0:
await bot.sendMessage( "Didn't receive any arguments! Usage: {}".format(USAGE))
return False
try:
bot.args[0] = int(bot.args[0])
except:
await bot.sendMessage( "`{}` is not a valid number.".format(bot.args[0]))
return False
if len(bot.args) > 1:
if len(bot.message.raw_mentions)>0:
user = await search.user(chat, bot.message.raw_mentions[0])
else:
user = list(bot.args)
user.pop(0)
user = await search.user(chat, " ".join(user))
if user is not None:
def is_me(m):
check = (m.author == user)
if check:
bot.args[0] = bot.args[0]-1
return (bot.args[0]>=0)
await bot.client.purge_from(chat, limit=500, check=is_me)
#await bot.sendMessage( user.display_name))
else:
await bot.sendMessage( "Could not find any user with \"`{}`\"".format(user))
return False
else:
await bot.client.purge_from(chat, limit=bot.args[0]+1, check=None)
return False
except Exception:
logger.PrintException(bot.message)
return False
|
gpl-3.0
| -421,751,238,183,873,100
| 33.636364
| 92
| 0.509186
| false
| 3.917738
| false
| false
| false
|
mSOHU/http2
|
test/benchmark2.py
|
1
|
1422
|
# -*- coding: utf-8 -*-
"""
copied from https://github.com/bdarnell/tornado_http2/blob/master/tornado_http2/test/benchmark.py
"""
import time
import logging
from tornado.ioloop import IOLoop
from tornado.options import define, options, parse_command_line, enable_pretty_logging
from http2 import SimpleAsyncHTTP2Client
logging.getLogger('http2').setLevel(logging.INFO)
enable_pretty_logging()
define('n', help='number of queries', default=1000)
define('h', help='host', default='http2.akamai.com')
define('p', help='port', default=None, type=int)
define('s', help='use https, [1|0]', default=True)
define('c', help='max streams concurrency', default=30)
done_count = [0]
io_loop = IOLoop.instance()
def callback(value):
done_count[0] += 1
if done_count[0] == options.n:
io_loop.stop()
elapsed = time.time() - start_time
print 'HTTP/2: %d requests in %0.3fs: %f QPS' % (options.n, elapsed,
options.n / elapsed)
if __name__ == '__main__':
options.logging = "info"
parse_command_line()
client = SimpleAsyncHTTP2Client(
host=options.h, port=options.p,
secure=options.s, max_streams=options.c,
connect_timeout=5, enable_push=False,
initial_window_size=2**24-1,
)
start_time = time.time()
for i in range(options.n):
io_loop.add_callback(lambda: client.fetch('/', callback=callback))
io_loop.start()
|
apache-2.0
| -6,296,171,402,591,293,000
| 25.830189
| 97
| 0.658228
| false
| 3.22449
| false
| false
| false
|
masasin/advent_of_code_2015
|
day_11.py
|
1
|
3790
|
"""
http://adventofcode.com/day/10
--- Day 11: Corporate Policy ---
Santa's previous password expired, and he needs help choosing a new one.
To help him remember his new password after the old one expires, Santa has
devised a method of coming up with a password based on the previous one.
Corporate policy dictates that passwords must be exactly eight lowercase letters
(for security reasons), so he finds his new password by incrementing his old
password string repeatedly until it is valid.
Incrementing is just like counting with numbers: xx, xy, xz, ya, yb, and so on.
Increase the rightmost letter one step; if it was z, it wraps around to a, and
repeat with the next letter to the left until one doesn't wrap around.
Unfortunately for Santa, a new Security-Elf recently started, and he has imposed
some additional password requirements:
- Passwords must include one increasing straight of at least three letters,
like abc, bcd, cde, and so on, up to xyz. They cannot skip letters; abd
doesn't count.
- Passwords may not contain the letters i, o, or l, as these letters can be
mistaken for other characters and are therefore confusing.
- Passwords must contain at least two different, non-overlapping pairs of
letters, like aa, bb, or zz.
For example:
- hijklmmn meets the first requirement (because it contains the straight
hij) but fails the second requirement (because it contains i and l).
- abbceffg meets the third requirement (because it repeats bb and ff) but
fails the first requirement.
- abbcegjk fails the third requirement, because it only has one double
letter (bb).
- The next password after abcdefgh is abcdffaa.
- The next password after ghijklmn is ghjaabcc, because you eventually skip
all the passwords that start with ghi..., since i is not allowed.
Given Santa's current password (your puzzle input), what should his next
password be?
--- Part Two ---
Santa's password expired again. What's the next one?
"""
import re
from string import ascii_lowercase
def find_next_password(password, n=1):
for i in range(n):
password = increment_password(password)
while not validate(password):
password = increment_password(password)
return password
def validate(password):
# Requirement 2
if re.search(r"[iol]", password):
return False
# Requirement 1
for i in range(len(password) - 2):
if password[i:i+3] in ascii_lowercase:
break
else:
return False
# Requirement 3
return True if re.search(r"(\w)\1.*(\w)\2", password) else False
def increment_password(password):
if password.endswith("z"):
i_z = password.index("z")
n_z = len(password) - i_z
boundary_letter = password[i_z - 1]
return password[:i_z - 1] + next_letter(boundary_letter) + "a" * n_z
else:
return password[:-1] + next_letter(password[-1])
def next_letter(c):
try:
return ascii_lowercase[ascii_lowercase.index(c) + 1]
except IndexError: # z
return "a"
def part_one():
with open("inputs/day_11_input.txt") as fin:
password = fin.readline().strip()
print("Next password: {}".format(find_next_password(password)))
def part_two():
with open("inputs/day_11_input.txt") as fin:
password = fin.readline().strip()
print("Next password: {}".format(find_next_password(password, 2)))
def main():
with open("inputs/day_11_input.txt") as fin:
password = fin.readline().strip()
next_password = find_next_password(password)
print("Next password: {}".format(next_password))
print("Next next password: {}".format(find_next_password(next_password)))
if __name__ == "__main__":
main()
|
mit
| 9,128,645,441,959,390,000
| 31.956522
| 80
| 0.683113
| false
| 3.79
| false
| false
| false
|
usc-isi/extra-specs
|
nova/api/openstack/compute/contrib/quotas.py
|
1
|
3875
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import db
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova import exception
from nova import quota
authorize = extensions.extension_authorizer('compute', 'quotas')
class QuotaTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('quota_set', selector='quota_set')
root.set('id')
for resource in quota.quota_resources:
elem = xmlutil.SubTemplateElement(root, resource)
elem.text = resource
return xmlutil.MasterTemplate(root, 1)
class QuotaSetsController(object):
def _format_quota_set(self, project_id, quota_set):
"""Convert the quota object to a result dict"""
result = dict(id=str(project_id))
for resource in quota.quota_resources:
result[resource] = quota_set[resource]
return dict(quota_set=result)
def _validate_quota_limit(self, limit):
# NOTE: -1 is a flag value for unlimited
if limit < -1:
msg = _("Quota limit must be -1 or greater.")
raise webob.exc.HTTPBadRequest(explanation=msg)
@wsgi.serializers(xml=QuotaTemplate)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
sqlalchemy_api.authorize_project_context(context, id)
return self._format_quota_set(id,
quota.get_project_quotas(context, id))
except exception.NotAuthorized:
raise webob.exc.HTTPForbidden()
@wsgi.serializers(xml=QuotaTemplate)
def update(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
project_id = id
for key in body['quota_set'].keys():
if key in quota.quota_resources:
value = int(body['quota_set'][key])
self._validate_quota_limit(value)
try:
db.quota_update(context, project_id, key, value)
except exception.ProjectQuotaNotFound:
db.quota_create(context, project_id, key, value)
except exception.AdminRequired:
raise webob.exc.HTTPForbidden()
return {'quota_set': quota.get_project_quotas(context, project_id)}
@wsgi.serializers(xml=QuotaTemplate)
def defaults(self, req, id):
authorize(req.environ['nova.context'])
return self._format_quota_set(id, quota._get_default_quotas())
class Quotas(extensions.ExtensionDescriptor):
"""Quotas management support"""
name = "Quotas"
alias = "os-quota-sets"
namespace = "http://docs.openstack.org/compute/ext/quotas-sets/api/v1.1"
updated = "2011-08-08T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-quota-sets',
QuotaSetsController(),
member_actions={'defaults': 'GET'})
resources.append(res)
return resources
|
apache-2.0
| -5,099,529,885,917,966,000
| 33.598214
| 79
| 0.634065
| false
| 4.162191
| false
| false
| false
|
isotoma/alm.solrindex
|
alm/solrindex/schema.py
|
1
|
2814
|
"""Parser of a Solr schema.xml"""
from alm.solrindex.interfaces import ISolrField
from alm.solrindex.interfaces import ISolrFieldHandler
from alm.solrindex.interfaces import ISolrSchema
from elementtree.ElementTree import parse
from zope.component import getUtility
from zope.component import queryUtility
from zope.interface import implements
import logging
import urllib2
log = logging.getLogger(__name__)
class SolrSchema(object):
implements(ISolrSchema)
uniqueKey = None
defaultSearchField = None
def __init__(self, solr_uri=None):
self.fields = []
if solr_uri:
f = self.download_from(solr_uri)
try:
self.xml_init(f)
finally:
f.close()
def download_from(self, solr_uri):
"""Get schema.xml from a running Solr instance"""
schema_uris = ('%s/admin/file/?file=schema.xml', # solr 1.3
'%s/admin/get-file.jsp?file=schema.xml') # solr 1.2
for i, uri in enumerate(schema_uris):
uri = uri % solr_uri
log.debug('getting schema from %s', uri)
try:
f = urllib2.urlopen(uri)
except urllib2.URLError:
if i < len(schema_uris) - 1:
# try the next URI
continue
raise
return f
def xml_init(self, f):
"""Initialize this instance from a Solr schema.xml"""
tree = parse(f)
e = tree.find('uniqueKey')
if e is not None:
self.uniqueKey = e.text.strip()
e = tree.find('defaultSearchField')
if e is not None:
self.defaultSearchField = e.text.strip()
types = {}
for e in tree.findall('types/fieldType'):
types[e.attrib['name']] = e
for e in tree.findall('fields/field'):
t = types[e.attrib['type']]
self.fields.append(SolrField(e, t))
class SolrField(object):
implements(ISolrField)
_boolean_attrs = (
'indexed', 'stored', 'required', 'multiValued',
)
def __init__(self, elem, fieldType):
self.name = elem.attrib['name']
self.type = elem.attrib['type']
self.java_class = fieldType.attrib['class']
for attr in self._boolean_attrs:
value = elem.get(attr)
if value is not None:
value = {'true': True, 'false': False}[value.lower()]
setattr(self, attr, value)
handler = queryUtility(ISolrFieldHandler, name=self.name)
if handler is None:
handler = queryUtility(
ISolrFieldHandler, name=self.java_class)
if handler is None:
handler = getUtility(ISolrFieldHandler)
self.handler = handler
|
bsd-3-clause
| 2,881,713,634,405,479,000
| 29.586957
| 75
| 0.569652
| false
| 3.930168
| false
| false
| false
|
yaukwankiu/armor
|
tests/modifiedMexicanHatTest5a.py
|
1
|
2438
|
# supplementing modifiedMexicanHatTest5.py
# outputing the charts, given the results
import numpy as np
import matplotlib.pyplot as plt
from armor import pattern
from armor import defaultParameters as dp
dbz = pattern.DBZ
DS = pattern.DBZstream
dataFolder = dp.root + "labLogs/2014-5-2-modifiedMexicanHatTest5/"
outputFolder= dataFolder
WRFnames = [ "WRF"+("0"+str(v))[-2:] for v in range(1,21)]
sigmas = [1, 2, 4, 5, 8 ,10 ,16, 20, 32, 40, 64, 80, 128, 160, 256, 320,]
allWRFsStreamMean = 0.
dbzCount = 0
for WRFname in WRFnames:
ds = DS(dataFolder=dataFolder,
name="kongrey" + WRFname,
outputFolder="",
imageFolder="",
key1=WRFname, # keywords to pick out specific files
key2="LOGspec.dat",
key3="kongreywrf", #safety check
preload=True,
imageExtension = '.png', #added 2013-09-27
dataExtension = '.dat',
)
print "\n==================\nSaving histograms for ", ds.name
for dbzpattern in ds:
dbzCount += 1
streamMeanUpdate = np.array([(dbzpattern.matrix==v).sum() for v in sigmas])
allWRFsStreamMean = 1.* ((allWRFsStreamMean*(dbzCount -1)) + streamMeanUpdate ) / dbzCount
histogramName = "kongreywrf" + dbzpattern.dataTime + WRFname + "_LOGspec_histogram"+ ds.imageExtension
print dbzpattern.name, "->", histogramName
plt.clf()
dbzpattern.histogram(display=False, outputPath=outputFolder+histogramName)
plt.close()
plt.plot(sigmas, allWRFsStreamMean)
plt.title(ds.name + '- average laplacian-of-gaussian max-response spectrum for ' +str(dbzCount) + 'WRF patterns')
plt.savefig(outputFolder + ds.name + "_all_wrfs_average_LoG_max_response spectrum.png")
plt.close()
"""
# run modifiedMexicanHatTest6a.py and then:
allWRFsStreamMean = array([ 2562.4375, 655.5625, 526.15 , 741.51 , 858.6425,
1457.79 , 1710.095 , 2971.355 , 3561.9125, 4406.915 ,
1488.0375, 59.5925, 0. , 0. , 0. , 0. ])
streamMeanCOMPREF = streamMean
sigmas = np.array(sigmas)
plt.close()
plt.plot(sigmas, streamMeanCOMPREF)
plt.plot(sigmas[:-4]*4, allWRFsStreamMean[:-4]*16)
plt.title("COMPREF and WRFs mean max-response LOG spectra from Kong-Rey data")
plt.show()
"""
|
cc0-1.0
| -1,526,503,526,948,186,400
| 38.967213
| 113
| 0.609516
| false
| 3.101781
| false
| false
| false
|
matthiaskrgr/cppcheck
|
addons/naming.py
|
1
|
2383
|
#!/usr/bin/env python
#
# cppcheck addon for naming conventions
#
# Example usage (variable name must start with lowercase, function name must start with uppercase):
# $ cppcheck --dump path-to-src/
# $ python addons/naming.py --var='[a-z].*' --function='[A-Z].*' path-to-src/*.dump
#
import cppcheckdata
import sys
import re
RE_VARNAME = None
RE_PRIVATE_MEMBER_VARIABLE = None
RE_FUNCTIONNAME = None
for arg in sys.argv[1:]:
if arg[:6] == '--var=':
RE_VARNAME = arg[6:]
elif arg.startswith('--private-member-variable='):
RE_PRIVATE_MEMBER_VARIABLE = arg[arg.find('=')+1:]
elif arg[:11] == '--function=':
RE_FUNCTIONNAME = arg[11:]
FoundError = False
def reportError(token, severity, msg):
global FoundError
FoundError = True
sys.stderr.write(
'[' + token.file + ':' + str(token.linenr) + '] (' + severity + ') naming.py: ' + msg + '\n')
for arg in sys.argv[1:]:
if not arg[-5:] == '.dump':
continue
print('Checking ' + arg + '...')
data = cppcheckdata.parsedump(arg)
for cfg in data.configurations:
if len(data.configurations) > 1:
print('Checking ' + arg + ', config "' + cfg.name + '"...')
if RE_VARNAME:
for var in cfg.variables:
res = re.match(RE_VARNAME, var.nameToken.str)
if not res:
reportError(var.typeStartToken, 'style', 'Variable ' +
var.nameToken.str + ' violates naming convention')
if RE_PRIVATE_MEMBER_VARIABLE:
for var in cfg.variables:
if (var.access is None) or var.access != 'Private':
continue
res = re.match(RE_PRIVATE_MEMBER_VARIABLE, var.nameToken.str)
if not res:
reportError(var.typeStartToken, 'style', 'Private member variable ' +
var.nameToken.str + ' violates naming convention')
if RE_FUNCTIONNAME:
for scope in cfg.scopes:
if scope.type == 'Function':
res = re.match(RE_FUNCTIONNAME, scope.className)
if not res:
reportError(
scope.bodyStart, 'style', 'Function ' + scope.className + ' violates naming convention')
if FoundError:
print('FoundError')
sys.exit(1)
|
gpl-3.0
| -6,974,778,811,751,254,000
| 34.567164
| 116
| 0.5577
| false
| 3.849758
| false
| false
| false
|
enthought/traitsgui
|
enthought/pyface/action/action_item.py
|
1
|
4849
|
#------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enth373ought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
""" An action manager item that represents an actual action. """
# Enthought library imports.
from enthought.traits.api import Any, Instance, List, Property, Str
# Local imports.
from action import Action
from action_manager_item import ActionManagerItem
# Import the toolkit specific versions of the internal classes.
from enthought.pyface.toolkit import toolkit_object
_MenuItem = toolkit_object('action.action_item:_MenuItem')
_Tool = toolkit_object('action.action_item:_Tool')
_PaletteTool = toolkit_object('action.action_item:_PaletteTool')
class ActionItem(ActionManagerItem):
""" An action manager item that represents an actual action. """
#### 'ActionManagerItem' interface ########################################
# The item's unique identifier ('unique' in this case means unique within
# its group).
id = Property(Str)
#### 'ActionItem' interface ###############################################
# The action!
action = Instance(Action)
# The toolkit specific control created for this item.
control = Any
# The toolkit specific Id of the control created for this item.
#
# We have to keep the Id as well as the control because wx tool bar tools
# are created as 'wxObjectPtr's which do not have Ids, and the Id is
# required to manipulate the state of a tool via the tool bar 8^(
# FIXME v3: Why is this part of the public interface?
control_id = Any
#### Private interface ####################################################
# All of the internal instances that wrap this item.
_wrappers = List(Any)
###########################################################################
# 'ActionManagerItem' interface.
###########################################################################
#### Trait properties #####################################################
def _get_id(self):
""" Return's the item's Id. """
return self.action.id
#### Trait change handlers ################################################
def _enabled_changed(self, trait_name, old, new):
""" Static trait change handler. """
self.action.enabled = new
return
def _visible_changed(self, trait_name, old, new):
""" Static trait change handler. """
self.action.visible = True
return
###########################################################################
# 'ActionItem' interface.
###########################################################################
def add_to_menu(self, parent, menu, controller):
""" Adds the item to a menu. """
if (controller is None) or controller.can_add_to_menu(self.action):
wrapper = _MenuItem(parent, menu, self, controller)
# fixme: Martin, who uses this information?
if controller is None:
self.control = wrapper.control
self.control_id = wrapper.control_id
self._wrappers.append(wrapper)
return
def add_to_toolbar(self, parent, tool_bar, image_cache, controller,
show_labels=True):
""" Adds the item to a tool bar. """
if (controller is None) or controller.can_add_to_toolbar(self.action):
wrapper = _Tool(
parent, tool_bar, image_cache, self, controller, show_labels
)
# fixme: Martin, who uses this information?
if controller is None:
self.control = wrapper.control
self.control_id = wrapper.control_id
self._wrappers.append(wrapper)
return
def add_to_palette(self, tool_palette, image_cache, show_labels=True):
""" Adds the item to a tool palette. """
wrapper = _PaletteTool(tool_palette, image_cache, self, show_labels)
self._wrappers.append(wrapper)
return
def destroy(self):
""" Called when the action is no longer required.
By default this method calls 'destroy' on the action itself.
"""
self.action.destroy()
return
#### EOF ######################################################################
|
bsd-3-clause
| 2,791,425,329,490,104,300
| 32.673611
| 79
| 0.541761
| false
| 4.849
| false
| false
| false
|
simleo/pydoop-features
|
pyfeatures/app/deserialize.py
|
1
|
2362
|
# BEGIN_COPYRIGHT
#
# Copyright (C) 2014-2017 Open Microscopy Environment:
# - University of Dundee
# - CRS4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""\
Deserialize BioImgPlane records.
"""
import sys
import os
import warnings
from contextlib import closing
import errno
try:
from pyavroc import AvroFileReader
except ImportError:
from pyfeatures.pyavroc_emu import AvroFileReader
warnings.warn("pyavroc not found, using standard avro lib\n")
import numpy as np
from libtiff import TIFF
from pyfeatures.bioimg import BioImgPlane
# no schema needed for deserialization
def iterplanes(avro_file):
with open(avro_file, 'rb') as f:
reader = AvroFileReader(f)
for r in reader:
yield BioImgPlane(r)
def run(logger, args, extra_argv=None):
try:
os.makedirs(args.out_dir)
except OSError as e:
if e.errno != errno.EEXIST:
sys.exit('Cannot create output dir: %s' % e)
for p in iterplanes(args.avro_file):
pixels = p.get_xy()
out_tag = '%s-z%04d-c%04d-t%04d' % (p.name, p.z, p.c, p.t)
logger.info("writing plane %s", out_tag)
if args.img:
out_fn = os.path.join(args.out_dir, '%s.tif' % out_tag)
with closing(TIFF.open(out_fn, mode="w")) as fo:
fo.write_image(pixels)
else:
out_fn = os.path.join(args.out_dir, '%s.npy' % out_tag)
np.save(out_fn, pixels)
return 0
def add_parser(subparsers):
parser = subparsers.add_parser("deserialize", description=__doc__)
parser.add_argument('avro_file', metavar='AVRO_FILE')
parser.add_argument('out_dir', metavar='OUT_DIR')
parser.add_argument('--img', action='store_true',
help='write images instead of .npy dumps')
parser.set_defaults(func=run)
return parser
|
apache-2.0
| 2,729,805,358,433,821,700
| 29.675325
| 77
| 0.664691
| false
| 3.413295
| false
| false
| false
|
lakshmi-kannan/st2
|
st2common/st2common/models/api/action.py
|
1
|
24297
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from st2common.util import isotime
from st2common.util import schema as util_schema
from st2common import log as logging
from st2common.constants.pack import DEFAULT_PACK_NAME
from st2common.models.api.base import BaseAPI
from st2common.models.api.base import APIUIDMixin
from st2common.models.api.tag import TagsHelper
from st2common.models.api.notification import (NotificationSubSchemaAPI, NotificationsHelper)
from st2common.models.db.action import ActionDB
from st2common.models.db.actionalias import ActionAliasDB
from st2common.models.db.executionstate import ActionExecutionStateDB
from st2common.models.db.liveaction import LiveActionDB
from st2common.models.db.runner import RunnerTypeDB
from st2common.constants.action import LIVEACTION_STATUSES
from st2common.models.system.common import ResourceReference
__all__ = [
'ActionAPI',
'ActionCreateAPI',
'LiveActionAPI',
'LiveActionCreateAPI',
'RunnerTypeAPI',
'AliasExecutionAPI',
'ActionAliasAPI',
'ActionAliasMatchAPI'
]
LOG = logging.getLogger(__name__)
class RunnerTypeAPI(BaseAPI):
"""
The representation of an RunnerType in the system. An RunnerType
has a one-to-one mapping to a particular ActionRunner implementation.
"""
model = RunnerTypeDB
schema = {
"title": "Runner",
"description": "A handler for a specific type of actions.",
"type": "object",
"properties": {
"id": {
"description": "The unique identifier for the action runner.",
"type": "string",
"default": None
},
"uid": {
"type": "string"
},
"name": {
"description": "The name of the action runner.",
"type": "string",
"required": True
},
"description": {
"description": "The description of the action runner.",
"type": "string"
},
"enabled": {
"description": "Enable or disable the action runner.",
"type": "boolean",
"default": True
},
"runner_module": {
"description": "The python module that implements the "
"action runner for this type.",
"type": "string",
"required": True
},
"query_module": {
"description": "The python module that implements the "
"results tracker (querier) for the runner.",
"type": "string",
"required": False
},
"runner_parameters": {
"description": "Input parameters for the action runner.",
"type": "object",
"patternProperties": {
"^\w+$": util_schema.get_action_parameters_schema()
},
'additionalProperties': False
}
},
"additionalProperties": False
}
def __init__(self, **kw):
# Ideally, you should not do that. You should not redefine __init__ to validate and then set
# default values, instead you should define defaults in schema and use BaseAPI __init__
# validator to unwrap them. The problem here is that draft schema also contains default
# values and we don't want them to be unwrapped at the same time. I've tried to remove the
# default values from draft schema, but, either because of a bug or some weird intention, it
# has continued to resolve $ref'erenced properties against the initial draft schema, not the
# modified one
for key, value in kw.items():
setattr(self, key, value)
if not hasattr(self, 'runner_parameters'):
setattr(self, 'runner_parameters', dict())
@classmethod
def to_model(cls, runner_type):
name = runner_type.name
description = runner_type.description
enabled = getattr(runner_type, 'enabled', True)
runner_module = str(runner_type.runner_module)
runner_parameters = getattr(runner_type, 'runner_parameters', dict())
query_module = getattr(runner_type, 'query_module', None)
model = cls.model(name=name, description=description, enabled=enabled,
runner_module=runner_module, runner_parameters=runner_parameters,
query_module=query_module)
return model
class ActionAPI(BaseAPI, APIUIDMixin):
"""
The system entity that represents a Stack Action/Automation in the system.
"""
model = ActionDB
schema = {
"title": "Action",
"description": "An activity that happens as a response to the external event.",
"type": "object",
"properties": {
"id": {
"description": "The unique identifier for the action.",
"type": "string"
},
"ref": {
"description": "System computed user friendly reference for the action. \
Provided value will be overridden by computed value.",
"type": "string"
},
"uid": {
"type": "string"
},
"name": {
"description": "The name of the action.",
"type": "string",
"required": True
},
"description": {
"description": "The description of the action.",
"type": "string"
},
"enabled": {
"description": "Enable or disable the action from invocation.",
"type": "boolean",
"default": True
},
"runner_type": {
"description": "The type of runner that executes the action.",
"type": "string",
"required": True
},
"entry_point": {
"description": "The entry point for the action.",
"type": "string",
"default": ""
},
"pack": {
"description": "The content pack this action belongs to.",
"type": "string",
"default": DEFAULT_PACK_NAME
},
"parameters": {
"description": "Input parameters for the action.",
"type": "object",
"patternProperties": {
"^\w+$": util_schema.get_action_parameters_schema()
},
'additionalProperties': False,
"default": {}
},
"tags": {
"description": "User associated metadata assigned to this object.",
"type": "array",
"items": {"type": "object"}
},
"notify": {
"description": "Notification settings for action.",
"type": "object",
"properties": {
"on-complete": NotificationSubSchemaAPI,
"on-failure": NotificationSubSchemaAPI,
"on-success": NotificationSubSchemaAPI
},
"additionalProperties": False
}
},
"additionalProperties": False
}
def __init__(self, **kw):
for key, value in kw.items():
setattr(self, key, value)
if not hasattr(self, 'parameters'):
setattr(self, 'parameters', dict())
if not hasattr(self, 'entry_point'):
setattr(self, 'entry_point', '')
@classmethod
def from_model(cls, model, mask_secrets=False):
action = cls._from_model(model)
action['runner_type'] = action['runner_type']['name']
action['tags'] = TagsHelper.from_model(model.tags)
if getattr(model, 'notify', None):
action['notify'] = NotificationsHelper.from_model(model.notify)
return cls(**action)
@classmethod
def to_model(cls, action):
name = getattr(action, 'name', None)
description = getattr(action, 'description', None)
enabled = bool(getattr(action, 'enabled', True))
entry_point = str(action.entry_point)
pack = str(action.pack)
runner_type = {'name': str(action.runner_type)}
parameters = getattr(action, 'parameters', dict())
tags = TagsHelper.to_model(getattr(action, 'tags', []))
ref = ResourceReference.to_string_reference(pack=pack, name=name)
if getattr(action, 'notify', None):
notify = NotificationsHelper.to_model(action.notify)
else:
# We use embedded document model for ``notify`` in action model. If notify is
# set notify to None, Mongoengine interprets ``None`` as unmodified
# field therefore doesn't delete the embedded document. Therefore, we need
# to use an empty document.
notify = NotificationsHelper.to_model({})
model = cls.model(name=name, description=description, enabled=enabled,
entry_point=entry_point, pack=pack, runner_type=runner_type,
tags=tags, parameters=parameters, notify=notify,
ref=ref)
return model
class ActionCreateAPI(ActionAPI, APIUIDMixin):
"""
API model for create action operation.
"""
schema = copy.deepcopy(ActionAPI.schema)
schema['properties']['data_files'] = {
'description': 'Optional action script and data files which are written to the filesystem.',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'file_path': {
'type': 'string',
'required': True
},
'content': {
'type': 'string',
'required': True
},
},
'additionalProperties': False
},
'default': []
}
class ActionUpdateAPI(ActionAPI, APIUIDMixin):
"""
API model for update action operation.
"""
schema = copy.deepcopy(ActionCreateAPI.schema)
del schema['properties']['pack']['default']
class LiveActionAPI(BaseAPI):
"""The system entity that represents the execution of a Stack Action/Automation
in the system.
"""
model = LiveActionDB
schema = {
"title": "liveaction",
"description": "An execution of an action.",
"type": "object",
"properties": {
"id": {
"description": "The unique identifier for the action execution.",
"type": "string"
},
"status": {
"description": "The current status of the action execution.",
"type": "string",
"enum": LIVEACTION_STATUSES
},
"start_timestamp": {
"description": "The start time when the action is executed.",
"type": "string",
"pattern": isotime.ISO8601_UTC_REGEX
},
"end_timestamp": {
"description": "The timestamp when the action has finished.",
"type": "string",
"pattern": isotime.ISO8601_UTC_REGEX
},
"action": {
"description": "Reference to the action to be executed.",
"type": "string",
"required": True
},
"parameters": {
"description": "Input parameters for the action.",
"type": "object",
"patternProperties": {
"^\w+$": {
"anyOf": [
{"type": "array"},
{"type": "boolean"},
{"type": "integer"},
{"type": "number"},
{"type": "object"},
{"type": "string"},
{"type": "null"}
]
}
},
'additionalProperties': False
},
"result": {
"anyOf": [{"type": "array"},
{"type": "boolean"},
{"type": "integer"},
{"type": "number"},
{"type": "object"},
{"type": "string"}]
},
"context": {
"type": "object"
},
"callback": {
"type": "object"
},
"runner_info": {
"type": "object"
},
"notify": {
"description": "Notification settings for liveaction.",
"type": "object",
"properties": {
"on-complete": NotificationSubSchemaAPI,
"on-failure": NotificationSubSchemaAPI,
"on-success": NotificationSubSchemaAPI
},
"additionalProperties": False
}
},
"additionalProperties": False
}
@classmethod
def from_model(cls, model, mask_secrets=False):
doc = super(cls, cls)._from_model(model, mask_secrets=mask_secrets)
if model.start_timestamp:
doc['start_timestamp'] = isotime.format(model.start_timestamp, offset=False)
if model.end_timestamp:
doc['end_timestamp'] = isotime.format(model.end_timestamp, offset=False)
if getattr(model, 'notify', None):
doc['notify'] = NotificationsHelper.from_model(model.notify)
return cls(**doc)
@classmethod
def to_model(cls, live_action):
action = live_action.action
if getattr(live_action, 'start_timestamp', None):
start_timestamp = isotime.parse(live_action.start_timestamp)
else:
start_timestamp = None
if getattr(live_action, 'end_timestamp', None):
end_timestamp = isotime.parse(live_action.end_timestamp)
else:
end_timestamp = None
status = getattr(live_action, 'status', None)
parameters = getattr(live_action, 'parameters', dict())
context = getattr(live_action, 'context', dict())
callback = getattr(live_action, 'callback', dict())
result = getattr(live_action, 'result', None)
if getattr(live_action, 'notify', None):
notify = NotificationsHelper.to_model(live_action.notify)
else:
notify = None
model = cls.model(action=action,
start_timestamp=start_timestamp, end_timestamp=end_timestamp,
status=status, parameters=parameters, context=context,
callback=callback, result=result, notify=notify)
return model
class LiveActionCreateAPI(LiveActionAPI):
"""
API model for action execution create (run action) operations.
"""
schema = copy.deepcopy(LiveActionAPI.schema)
schema['properties']['user'] = {
'description': 'User context under which action should run (admins only)',
'type': 'string',
'default': None
}
class ActionExecutionStateAPI(BaseAPI):
"""
System entity that represents state of an action in the system.
This is used only in tests for now.
"""
model = ActionExecutionStateDB
schema = {
"title": "ActionExecutionState",
"description": "Execution state of an action.",
"type": "object",
"properties": {
"id": {
"description": "The unique identifier for the action execution state.",
"type": "string"
},
"execution_id": {
"type": "string",
"description": "ID of the action execution.",
"required": True
},
"query_context": {
"type": "object",
"description": "query context to be used by querier.",
"required": True
},
"query_module": {
"type": "string",
"description": "Name of the query module.",
"required": True
}
},
"additionalProperties": False
}
@classmethod
def to_model(cls, state):
execution_id = state.execution_id
query_module = state.query_module
query_context = state.query_context
model = cls.model(execution_id=execution_id, query_module=query_module,
query_context=query_context)
return model
class ActionAliasAPI(BaseAPI, APIUIDMixin):
"""
Alias for an action in the system.
"""
model = ActionAliasDB
schema = {
"title": "ActionAlias",
"description": "Alias for an action.",
"type": "object",
"properties": {
"id": {
"description": "The unique identifier for the action alias.",
"type": "string"
},
"ref": {
"description": "System computed user friendly reference for the alias. \
Provided value will be overridden by computed value.",
"type": "string"
},
"uid": {
"type": "string"
},
"name": {
"type": "string",
"description": "Name of the action alias.",
"required": True
},
"pack": {
"description": "The content pack this actionalias belongs to.",
"type": "string",
"required": True
},
"description": {
"type": "string",
"description": "Description of the action alias.",
"default": None
},
"enabled": {
"description": "Flag indicating of action alias is enabled.",
"type": "boolean",
"default": True
},
"action_ref": {
"type": "string",
"description": "Reference to the aliased action.",
"required": True
},
"formats": {
"type": "array",
"items": {
"anyOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"display": {"type": "string"},
"representation": {
"type": "array",
"items": {"type": "string"}
}
}
}
]
},
"description": "Possible parameter format."
},
"ack": {
"type": "object",
"properties": {
"enabled": {"type": "boolean"},
"format": {"type": "string"},
"extra": {"type": "object"},
"append_url": {"type": "boolean"}
},
"description": "Acknowledgement message format."
},
"result": {
"type": "object",
"properties": {
"enabled": {"type": "boolean"},
"format": {"type": "string"},
"extra": {"type": "object"}
},
"description": "Execution message format."
},
"extra": {
"type": "object",
"description": "Extra parameters, usually adapter-specific."
}
},
"additionalProperties": False
}
@classmethod
def to_model(cls, alias):
name = alias.name
description = getattr(alias, 'description', None)
pack = alias.pack
ref = ResourceReference.to_string_reference(pack=pack, name=name)
enabled = getattr(alias, 'enabled', True)
action_ref = alias.action_ref
formats = alias.formats
ack = getattr(alias, 'ack', None)
result = getattr(alias, 'result', None)
extra = getattr(alias, 'extra', None)
model = cls.model(name=name, description=description, pack=pack, ref=ref,
enabled=enabled, action_ref=action_ref, formats=formats,
ack=ack, result=result, extra=extra)
return model
class AliasExecutionAPI(BaseAPI):
"""
Alias for an action in the system.
"""
model = None
schema = {
"title": "AliasExecution",
"description": "Execution of an ActionAlias.",
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Name of the action alias which matched.",
"required": True
},
"format": {
"type": "string",
"description": "Format string which matched.",
"required": True
},
"command": {
"type": "string",
"description": "Command used in chat.",
"required": True
},
"user": {
"type": "string",
"description": "User that requested the execution.",
"default": "channel" # TODO: This value doesnt get set
},
"source_channel": {
"type": "string",
"description": "Channel from which the execution was requested. This is not the \
channel as defined by the notification system.",
"required": True
},
"notification_channel": {
"type": "string",
"description": "StackStorm notification channel to use to respond.",
"required": False
},
"notification_route": {
"type": "string",
"description": "StackStorm notification route to use to respond.",
"required": False
}
},
"additionalProperties": False
}
@classmethod
def to_model(cls, aliasexecution):
# probably should be unsupported
raise NotImplementedError()
@classmethod
def from_model(cls, aliasexecution):
raise NotImplementedError()
class ActionAliasMatchAPI(BaseAPI):
"""
API model used for alias match API endpoint.
"""
model = None
schema = {
"title": "ActionAliasMatchAPI",
"description": "ActionAliasMatchAPI.",
"type": "object",
"properties": {
"command": {
"type": "string",
"description": "Command string to try to match the aliases against.",
"required": True
}
},
"additionalProperties": False
}
@classmethod
def to_model(cls, aliasexecution):
raise NotImplementedError()
@classmethod
def from_model(cls, aliasexecution):
raise NotImplementedError()
|
apache-2.0
| 578,311,950,099,190,000
| 34.52193
| 100
| 0.503519
| false
| 4.886766
| false
| false
| false
|
cagriulas/algorithm-analysis-17
|
w3/complexity_graphic.py
|
1
|
3297
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import random
import time
def maxsubsumOn(vector):
max_ending_here = max_so_far = vector[0]
for x in vector[1:]:
max_ending_here = max(x, max_ending_here + x)
max_so_far = max(max_so_far, max_ending_here)
return max_so_far
def maxsubsumOn3(vector):
maxsum = 0
vectorlen = len(vector)
for i in range(vectorlen):
for j in range(i,vectorlen):
thissum=0
for k in range (i,j):
thissum=thissum+vector[k]
if(thissum>maxsum):
maxsum=thissum
return maxsum
def find_max_triple(a,b,c):
if a>b:
if b>c:
return a
elif a>c:
return a
else:
return c
elif b>c:
return b
else:
return c
def find_middle(list):
middle=int(len(list)/2)
sum_left_max=0
sum_left=0
for i in range(middle-1,-1,-1):
sum_left=sum_left+list[i]
if sum_left>sum_left_max:
sum_left_max=sum_left
sum_right_max=0
sum_right=0
for i in range(middle,len(list)):
sum_right=sum_right+list[i]
if sum_right>sum_right_max:
sum_right_max=sum_right
return sum_left_max+sum_right_max
def maxsubsumOnlogn(array):
if(len(array)<2):
return sum(array)
else:
middle=int(len(array)/2)
sum_left=maxsubsumOnlogn(array[0:middle - 1])
sum_right=maxsubsumOnlogn(array[middle:])
sum_middle=find_middle(array)
return find_max_triple(sum_left,sum_right,sum_middle)
if __name__ == '__main__':
nib = random.sample(range(-500, 500), k=100)
nonib = random.sample(range(-5000, 5000), k=500)
zuybin = random.sample(range(-50000, 50000), k=1000)
noylim = random.sample(range(-500000, 500000), k=2000)
circle = {'nib': nib,
'nonib': nonib,
'zuybin': zuybin,
'noylim': noylim}
times = {}
for key in circle:
print(key)
print(circle[key], times, time.time())
print(key)
start = time.time()
maxsubsumOnlogn(circle[key])
times['nlogn' + key] = time.time() - start
# start = time.time()
# maxsubsumOn3(circle[key])
# times['n3' + key] = time.time() - start
start = time.time()
maxsubsumOn(circle[key])
times['n' + key] = time.time() - start
x = np.array([100, 500, 1000, 2000])
# n3 = np.array([times['n3nib'],
# times['n3nonib'],
# times['n3zuybin'],
# times['n3noylim']])
nlogn = np.array([times['nlognnib'],
times['nlognnonib'],
times['nlognzuybin'],
times['nlognnoylim']])
n = np.array([times['nnib'],
times['nnonib'],
times['nzuybin'],
times['nnoylim']])
# plt.plot(x, n3*100)
plt.plot(x, nlogn*100)
plt.plot(x, n * 100)
plt.xticks(x)
plt.xlabel('Dizi uzunluğu')
plt.ylabel('Zaman (milisaniye)')
plt.legend(['n3', 'nlogn', 'n'], loc='upper left')
plt.savefig('foo.png', dpi=1000)
|
unlicense
| -8,398,150,691,713,920,000
| 25.376
| 58
| 0.533374
| false
| 3.187621
| false
| false
| false
|
maas/maas
|
src/maasserver/models/tests/test_filesystemgroup.py
|
1
|
104094
|
# Copyright 2015-2019 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for `FilesystemGroup`."""
import random
import re
from unittest import skip
from uuid import uuid4
from django.core.exceptions import PermissionDenied, ValidationError
from django.http import Http404
from testtools import ExpectedException
from testtools.matchers import Equals, Is, MatchesStructure, Not
from maasserver.enum import (
CACHE_MODE_TYPE,
FILESYSTEM_GROUP_RAID_TYPES,
FILESYSTEM_GROUP_TYPE,
FILESYSTEM_TYPE,
PARTITION_TABLE_TYPE,
)
from maasserver.models.blockdevice import MIN_BLOCK_DEVICE_SIZE
from maasserver.models.filesystem import Filesystem
from maasserver.models.filesystemgroup import (
Bcache,
BcacheManager,
FilesystemGroup,
LVM_PE_SIZE,
RAID,
RAID_SUPERBLOCK_OVERHEAD,
RAIDManager,
VMFS,
VolumeGroup,
VolumeGroupManager,
)
from maasserver.models.partition import PARTITION_ALIGNMENT_SIZE
from maasserver.models.partitiontable import PARTITION_TABLE_EXTRA_SPACE
from maasserver.models.physicalblockdevice import PhysicalBlockDevice
from maasserver.models.virtualblockdevice import VirtualBlockDevice
from maasserver.permissions import NodePermission
from maasserver.testing.factory import factory
from maasserver.testing.orm import reload_objects
from maasserver.testing.testcase import MAASServerTestCase
from maasserver.utils.converters import (
machine_readable_bytes,
round_size_to_nearest_block,
)
from maasserver.utils.orm import reload_object
from maastesting.matchers import MockCalledOnceWith, MockNotCalled
class TestManagersGetObjectOr404(MAASServerTestCase):
"""Tests for the `get_object_or_404` on the managers."""
scenarios = (
("FilesystemGroup", {"model": FilesystemGroup, "type": None}),
(
"VolumeGroup",
{"model": VolumeGroup, "type": FILESYSTEM_GROUP_TYPE.LVM_VG},
),
("RAID", {"model": RAID, "type": FILESYSTEM_GROUP_TYPE.RAID_0}),
("Bcache", {"model": Bcache, "type": FILESYSTEM_GROUP_TYPE.BCACHE}),
)
def test_raises_Http404_when_invalid_node(self):
user = factory.make_admin()
filesystem_group = factory.make_FilesystemGroup(group_type=self.type)
self.assertRaises(
Http404,
self.model.objects.get_object_or_404,
factory.make_name("system_id"),
filesystem_group.id,
user,
NodePermission.view,
)
def test_raises_Http404_when_invalid_device(self):
user = factory.make_admin()
node = factory.make_Node()
self.assertRaises(
Http404,
self.model.objects.get_object_or_404,
node.system_id,
random.randint(0, 100),
user,
NodePermission.view,
)
def test_view_raises_PermissionDenied_when_user_not_owner(self):
user = factory.make_User()
node = factory.make_Node(owner=factory.make_User())
filesystem_group = factory.make_FilesystemGroup(
node=node, group_type=self.type
)
self.assertRaises(
PermissionDenied,
self.model.objects.get_object_or_404,
node.system_id,
filesystem_group.id,
user,
NodePermission.view,
)
def test_view_returns_device_by_name(self):
user = factory.make_User()
node = factory.make_Node()
filesystem_group = factory.make_FilesystemGroup(
node=node, group_type=self.type
)
self.assertEqual(
filesystem_group.id,
self.model.objects.get_object_or_404(
node.system_id,
filesystem_group.name,
user,
NodePermission.view,
).id,
)
def test_view_returns_device_when_no_owner(self):
user = factory.make_User()
node = factory.make_Node()
filesystem_group = factory.make_FilesystemGroup(
node=node, group_type=self.type
)
self.assertEqual(
filesystem_group.id,
self.model.objects.get_object_or_404(
node.system_id, filesystem_group.id, user, NodePermission.view
).id,
)
def test_view_returns_device_when_owner(self):
user = factory.make_User()
node = factory.make_Node(owner=user)
filesystem_group = factory.make_FilesystemGroup(
node=node, group_type=self.type
)
self.assertEqual(
filesystem_group.id,
self.model.objects.get_object_or_404(
node.system_id, filesystem_group.id, user, NodePermission.view
).id,
)
def test_edit_raises_PermissionDenied_when_user_not_owner(self):
user = factory.make_User()
node = factory.make_Node(owner=factory.make_User())
filesystem_group = factory.make_FilesystemGroup(
node=node, group_type=self.type
)
self.assertRaises(
PermissionDenied,
self.model.objects.get_object_or_404,
node.system_id,
filesystem_group.id,
user,
NodePermission.edit,
)
def test_edit_returns_device_when_user_is_owner(self):
user = factory.make_User()
node = factory.make_Node(owner=user)
filesystem_group = factory.make_FilesystemGroup(
node=node, group_type=self.type
)
self.assertEqual(
filesystem_group.id,
self.model.objects.get_object_or_404(
node.system_id, filesystem_group.id, user, NodePermission.edit
).id,
)
def test_admin_raises_PermissionDenied_when_user_requests_admin(self):
user = factory.make_User()
node = factory.make_Node()
filesystem_group = factory.make_FilesystemGroup(
node=node, group_type=self.type
)
self.assertRaises(
PermissionDenied,
self.model.objects.get_object_or_404,
node.system_id,
filesystem_group.id,
user,
NodePermission.admin,
)
def test_admin_returns_device_when_admin(self):
user = factory.make_admin()
node = factory.make_Node()
filesystem_group = factory.make_FilesystemGroup(
node=node, group_type=self.type
)
self.assertEqual(
filesystem_group.id,
self.model.objects.get_object_or_404(
node.system_id, filesystem_group.id, user, NodePermission.admin
).id,
)
class TestManagersFilterByBlockDevice(MAASServerTestCase):
"""Tests for the managers `filter_by_block_device`."""
def test_volume_group_on_block_device(self):
block_device = factory.make_PhysicalBlockDevice()
filesystem = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, block_device=block_device
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, filesystems=[filesystem]
)
filesystem_groups = VolumeGroup.objects.filter_by_block_device(
block_device
)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_volume_group_on_partition(self):
block_device = factory.make_PhysicalBlockDevice(size=10 * 1024 ** 3)
partition_table = factory.make_PartitionTable(
block_device=block_device
)
partition = factory.make_Partition(
size=5 * 1024 ** 3, partition_table=partition_table
)
filesystem = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, partition=partition
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, filesystems=[filesystem]
)
filesystem_groups = VolumeGroup.objects.filter_by_block_device(
block_device
)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_volume_group_on_two_partitions(self):
block_device = factory.make_PhysicalBlockDevice()
partition_table = factory.make_PartitionTable(
block_device=block_device
)
partition_one = factory.make_Partition(partition_table=partition_table)
partition_two = factory.make_Partition(partition_table=partition_table)
filesystem_one = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, partition=partition_one
)
filesystem_two = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, partition=partition_two
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG,
filesystems=[filesystem_one, filesystem_two],
)
filesystem_groups = VolumeGroup.objects.filter_by_block_device(
block_device
)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_raid_on_block_devices(self):
node = factory.make_Node()
block_device_one = factory.make_PhysicalBlockDevice(node=node)
filesystem_one = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID, block_device=block_device_one
)
block_device_two = factory.make_PhysicalBlockDevice(node=node)
filesystem_two = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID, block_device=block_device_two
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_0,
filesystems=[filesystem_one, filesystem_two],
)
filesystem_groups = RAID.objects.filter_by_block_device(
block_device_one
)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_raid_on_partitions(self):
block_device = factory.make_PhysicalBlockDevice()
partition_table = factory.make_PartitionTable(
block_device=block_device
)
partition_one = factory.make_Partition(partition_table=partition_table)
partition_two = factory.make_Partition(partition_table=partition_table)
filesystem_one = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID, partition=partition_one
)
filesystem_two = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID, partition=partition_two
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_0,
filesystems=[filesystem_one, filesystem_two],
)
filesystem_groups = RAID.objects.filter_by_block_device(block_device)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_bcache_on_block_devices(self):
node = factory.make_Node()
block_device_one = factory.make_PhysicalBlockDevice(node=node)
cache_set = factory.make_CacheSet(block_device=block_device_one)
block_device_two = factory.make_PhysicalBlockDevice(node=node)
filesystem_backing = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.BCACHE_BACKING,
block_device=block_device_two,
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE,
cache_mode=CACHE_MODE_TYPE.WRITEBACK,
cache_set=cache_set,
filesystems=[filesystem_backing],
)
filesystem_groups = Bcache.objects.filter_by_block_device(
block_device_one
)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_bcache_on_partitions(self):
device_size = random.randint(
MIN_BLOCK_DEVICE_SIZE * 4, MIN_BLOCK_DEVICE_SIZE * 1024
)
block_device = factory.make_PhysicalBlockDevice(
size=device_size + PARTITION_TABLE_EXTRA_SPACE
)
partition_table = factory.make_PartitionTable(
block_device=block_device
)
partition_one = factory.make_Partition(
partition_table=partition_table, size=device_size // 2
)
partition_two = factory.make_Partition(
partition_table=partition_table, size=device_size // 2
)
cache_set = factory.make_CacheSet(partition=partition_one)
filesystem_backing = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.BCACHE_BACKING, partition=partition_two
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE,
cache_mode=CACHE_MODE_TYPE.WRITEBACK,
cache_set=cache_set,
filesystems=[filesystem_backing],
)
filesystem_groups = Bcache.objects.filter_by_block_device(block_device)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
class TestManagersFilterByNode(MAASServerTestCase):
"""Tests for the managers `filter_by_node`."""
def test_volume_group_on_block_device(self):
node = factory.make_Node()
block_device = factory.make_PhysicalBlockDevice(node=node)
filesystem = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, block_device=block_device
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, filesystems=[filesystem]
)
filesystem_groups = VolumeGroup.objects.filter_by_node(node)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_volume_group_on_partition(self):
node = factory.make_Node()
block_device = factory.make_PhysicalBlockDevice(node=node)
partition_table = factory.make_PartitionTable(
block_device=block_device
)
partition = factory.make_Partition(partition_table=partition_table)
filesystem = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, partition=partition
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, filesystems=[filesystem]
)
filesystem_groups = VolumeGroup.objects.filter_by_node(node)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_volume_group_on_two_partitions(self):
node = factory.make_Node()
block_device = factory.make_PhysicalBlockDevice(node=node)
partition_table = factory.make_PartitionTable(
block_device=block_device
)
partition_one = factory.make_Partition(partition_table=partition_table)
partition_two = factory.make_Partition(partition_table=partition_table)
filesystem_one = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, partition=partition_one
)
filesystem_two = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, partition=partition_two
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG,
filesystems=[filesystem_one, filesystem_two],
)
filesystem_groups = VolumeGroup.objects.filter_by_node(node)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_raid_on_block_devices(self):
node = factory.make_Node()
block_device_one = factory.make_PhysicalBlockDevice(node=node)
filesystem_one = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID, block_device=block_device_one
)
block_device_two = factory.make_PhysicalBlockDevice(node=node)
filesystem_two = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID, block_device=block_device_two
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_0,
filesystems=[filesystem_one, filesystem_two],
)
filesystem_groups = RAID.objects.filter_by_node(node)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_raid_on_partitions(self):
node = factory.make_Node()
block_device = factory.make_PhysicalBlockDevice(node=node)
partition_table = factory.make_PartitionTable(
block_device=block_device
)
partition_one = factory.make_Partition(partition_table=partition_table)
partition_two = factory.make_Partition(partition_table=partition_table)
filesystem_one = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID, partition=partition_one
)
filesystem_two = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID, partition=partition_two
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_0,
filesystems=[filesystem_one, filesystem_two],
)
filesystem_groups = RAID.objects.filter_by_node(node)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_bcache_on_block_devices(self):
node = factory.make_Node()
block_device_one = factory.make_PhysicalBlockDevice(node=node)
cache_set = factory.make_CacheSet(block_device=block_device_one)
block_device_two = factory.make_PhysicalBlockDevice(node=node)
filesystem_backing = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.BCACHE_BACKING,
block_device=block_device_two,
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE,
cache_mode=CACHE_MODE_TYPE.WRITEBACK,
cache_set=cache_set,
filesystems=[filesystem_backing],
)
filesystem_groups = Bcache.objects.filter_by_node(node)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_bcache_on_partitions(self):
node = factory.make_Node()
block_device = factory.make_PhysicalBlockDevice(node=node)
partition_table = factory.make_PartitionTable(
block_device=block_device
)
partition_one = factory.make_Partition(partition_table=partition_table)
partition_two = factory.make_Partition(partition_table=partition_table)
cache_set = factory.make_CacheSet(partition=partition_one)
filesystem_backing = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.BCACHE_BACKING, partition=partition_two
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE,
cache_mode=CACHE_MODE_TYPE.WRITEBACK,
cache_set=cache_set,
filesystems=[filesystem_backing],
)
filesystem_groups = Bcache.objects.filter_by_node(node)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
class TestFilesystemGroupManager(MAASServerTestCase):
"""Tests for the `FilesystemGroupManager`."""
def test_get_available_name_for_returns_next_idx(self):
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE
)
filesystem_group.save()
prefix = filesystem_group.get_name_prefix()
current_idx = int(filesystem_group.name.replace(prefix, ""))
self.assertEqual(
"%s%s" % (prefix, current_idx + 1),
FilesystemGroup.objects.get_available_name_for(filesystem_group),
)
def test_get_available_name_for_ignores_bad_int(self):
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE
)
filesystem_group.save()
prefix = filesystem_group.get_name_prefix()
filesystem_group.name = "%s%s" % (prefix, factory.make_name("bad"))
filesystem_group.save()
self.assertEqual(
"%s0" % prefix,
FilesystemGroup.objects.get_available_name_for(filesystem_group),
)
class TestVolumeGroupManager(MAASServerTestCase):
"""Tests for the `VolumeGroupManager`."""
def test_create_volume_group_with_name_and_uuid(self):
block_device = factory.make_PhysicalBlockDevice()
name = factory.make_name("vg")
vguuid = "%s" % uuid4()
volume_group = VolumeGroup.objects.create_volume_group(
name, [block_device], [], uuid=vguuid
)
self.assertEqual(name, volume_group.name)
self.assertEqual(vguuid, volume_group.uuid)
def test_create_volume_group_with_block_devices(self):
node = factory.make_Node()
block_devices = [
factory.make_PhysicalBlockDevice(node=node) for _ in range(3)
]
name = factory.make_name("vg")
volume_group = VolumeGroup.objects.create_volume_group(
name, block_devices, []
)
block_devices_in_vg = [
filesystem.block_device.actual_instance
for filesystem in volume_group.filesystems.all()
]
self.assertItemsEqual(block_devices, block_devices_in_vg)
def test_create_volume_group_with_partitions(self):
node = factory.make_Node()
block_device = factory.make_PhysicalBlockDevice(
node=node,
size=(MIN_BLOCK_DEVICE_SIZE * 3) + PARTITION_TABLE_EXTRA_SPACE,
)
partition_table = factory.make_PartitionTable(
block_device=block_device
)
partitions = [
partition_table.add_partition(size=MIN_BLOCK_DEVICE_SIZE)
for _ in range(2)
]
name = factory.make_name("vg")
volume_group = VolumeGroup.objects.create_volume_group(
name, [], partitions
)
partitions_in_vg = [
filesystem.partition
for filesystem in volume_group.filesystems.all()
]
self.assertItemsEqual(partitions, partitions_in_vg)
def test_create_volume_group_with_block_devices_and_partitions(self):
node = factory.make_Node()
block_devices = [
factory.make_PhysicalBlockDevice(node=node) for _ in range(3)
]
block_device = factory.make_PhysicalBlockDevice(
node=node,
size=(MIN_BLOCK_DEVICE_SIZE * 3) + PARTITION_TABLE_EXTRA_SPACE,
)
partition_table = factory.make_PartitionTable(
block_device=block_device
)
partitions = [
partition_table.add_partition(size=MIN_BLOCK_DEVICE_SIZE)
for _ in range(2)
]
name = factory.make_name("vg")
volume_group = VolumeGroup.objects.create_volume_group(
name, block_devices, partitions
)
block_devices_in_vg = [
filesystem.block_device.actual_instance
for filesystem in volume_group.filesystems.all()
if filesystem.block_device is not None
]
partitions_in_vg = [
filesystem.partition
for filesystem in volume_group.filesystems.all()
if filesystem.partition is not None
]
self.assertItemsEqual(block_devices, block_devices_in_vg)
self.assertItemsEqual(partitions, partitions_in_vg)
class TestFilesystemGroup(MAASServerTestCase):
"""Tests for the `FilesystemGroup` model."""
def test_virtual_device_raises_AttributeError_for_lvm(self):
fsgroup = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG
)
with ExpectedException(AttributeError):
fsgroup.virtual_device
def test_virtual_device_returns_VirtualBlockDevice_for_group(self):
fsgroup = factory.make_FilesystemGroup(
group_type=factory.pick_enum(
FILESYSTEM_GROUP_TYPE, but_not=FILESYSTEM_GROUP_TYPE.LVM_VG
)
)
self.assertEqual(
VirtualBlockDevice.objects.get(filesystem_group=fsgroup),
fsgroup.virtual_device,
)
def test_get_numa_node_indexes_all_same(self):
fsgroup = factory.make_FilesystemGroup(
group_type=factory.pick_enum(
FILESYSTEM_GROUP_TYPE, but_not=FILESYSTEM_GROUP_TYPE.VMFS6
)
)
self.assertEqual(fsgroup.get_numa_node_indexes(), [0])
def test_get_numa_node_indexes_multiple(self):
node = factory.make_Node()
numa_nodes = [
node.default_numanode,
factory.make_NUMANode(node=node),
factory.make_NUMANode(node=node),
]
block_devices = [
factory.make_PhysicalBlockDevice(numa_node=numa_node)
for numa_node in numa_nodes
]
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, block_device=block_device
)
for block_device in block_devices
]
fsgroup = factory.make_FilesystemGroup(
node=node,
filesystems=filesystems,
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG,
)
self.assertEqual(fsgroup.get_numa_node_indexes(), [0, 1, 2])
def test_get_numa_node_indexes_nested(self):
node = factory.make_Node()
numa_nodes = [
node.default_numanode,
factory.make_NUMANode(node=node),
factory.make_NUMANode(node=node),
factory.make_NUMANode(node=node),
factory.make_NUMANode(node=node),
]
# 2 physical disks have filesystems on them directly
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV,
block_device=factory.make_PhysicalBlockDevice(
numa_node=numa_node
),
)
for numa_node in numa_nodes[:2]
]
# the 3 remaining disks are part of another filesystem group which gets
# added to the first
nested_filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV,
block_device=factory.make_PhysicalBlockDevice(
numa_node=numa_node
),
)
for numa_node in numa_nodes[2:]
]
nested_group = factory.make_FilesystemGroup(
node=node,
filesystems=nested_filesystems,
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG,
)
virtual_block_device = factory.make_VirtualBlockDevice(
filesystem_group=nested_group
)
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV,
block_device=virtual_block_device,
)
)
fsgroup = factory.make_FilesystemGroup(
node=node,
filesystems=filesystems,
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG,
)
self.assertEqual(fsgroup.get_numa_node_indexes(), [0, 1, 2, 3, 4])
def test_get_node_returns_first_filesystem_node(self):
fsgroup = factory.make_FilesystemGroup()
self.assertEqual(
fsgroup.filesystems.first().get_node(), fsgroup.get_node()
)
def test_get_node_returns_None_if_no_filesystems(self):
fsgroup = FilesystemGroup()
self.assertIsNone(fsgroup.get_node())
def test_get_size_returns_0_if_lvm_without_filesystems(self):
fsgroup = FilesystemGroup(group_type=FILESYSTEM_GROUP_TYPE.LVM_VG)
self.assertEqual(0, fsgroup.get_size())
def test_get_size_returns_sum_of_all_filesystem_sizes_for_lvm(self):
node = factory.make_Node()
block_size = 4096
total_size = 0
filesystems = []
for _ in range(3):
size = random.randint(
MIN_BLOCK_DEVICE_SIZE, MIN_BLOCK_DEVICE_SIZE ** 2
)
total_size += size
block_device = factory.make_PhysicalBlockDevice(
node=node, size=size, block_size=block_size
)
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, block_device=block_device
)
)
fsgroup = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, filesystems=filesystems
)
# Reserve one extent per filesystem for LVM headers.
extents = (total_size // LVM_PE_SIZE) - 3
self.assertEqual(extents * LVM_PE_SIZE, fsgroup.get_size())
def test_get_size_returns_0_if_raid_without_filesystems(self):
fsgroup = FilesystemGroup(group_type=FILESYSTEM_GROUP_TYPE.RAID_0)
self.assertEqual(0, fsgroup.get_size())
def test_get_size_returns_smallest_disk_size_for_raid_0(self):
node = factory.make_Node()
small_size = random.randint(
MIN_BLOCK_DEVICE_SIZE, MIN_BLOCK_DEVICE_SIZE ** 2
)
large_size = random.randint(small_size + 1, small_size + (10 ** 5))
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=small_size
),
),
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=large_size
),
),
]
fsgroup = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_0, filesystems=filesystems
)
# Size should be twice the smallest device (the rest of the larger
# device remains unused.
self.assertEqual(
(small_size * 2) - RAID_SUPERBLOCK_OVERHEAD, fsgroup.get_size()
)
def test_get_size_returns_smallest_disk_size_for_raid_1(self):
node = factory.make_Node()
small_size = random.randint(
MIN_BLOCK_DEVICE_SIZE, MIN_BLOCK_DEVICE_SIZE ** 2
)
large_size = random.randint(small_size + 1, small_size + (10 ** 5))
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=small_size
),
),
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=large_size
),
),
]
fsgroup = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_1, filesystems=filesystems
)
self.assertEqual(
small_size - RAID_SUPERBLOCK_OVERHEAD, fsgroup.get_size()
)
def test_get_size_returns_correct_disk_size_for_raid_5(self):
node = factory.make_Node()
small_size = random.randint(
MIN_BLOCK_DEVICE_SIZE, MIN_BLOCK_DEVICE_SIZE ** 2
)
other_size = random.randint(small_size + 1, small_size + (10 ** 5))
number_of_raid_devices = random.randint(2, 9)
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=small_size
),
)
]
for _ in range(number_of_raid_devices):
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=other_size
),
)
)
# Spares are ignored and not taken into calculation.
for _ in range(3):
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID_SPARE,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=other_size
),
)
)
fsgroup = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_5, filesystems=filesystems
)
self.assertEqual(
(small_size * number_of_raid_devices) - RAID_SUPERBLOCK_OVERHEAD,
fsgroup.get_size(),
)
def test_get_size_returns_correct_disk_size_for_raid_6(self):
node = factory.make_Node()
small_size = random.randint(
MIN_BLOCK_DEVICE_SIZE, MIN_BLOCK_DEVICE_SIZE ** 2
)
other_size = random.randint(small_size + 1, small_size + (10 ** 5))
number_of_raid_devices = random.randint(3, 9)
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=small_size
),
)
]
for _ in range(number_of_raid_devices):
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=other_size
),
)
)
# Spares are ignored and not taken into calculation.
for _ in range(3):
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID_SPARE,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=other_size
),
)
)
fsgroup = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_6, filesystems=filesystems
)
self.assertEqual(
(small_size * (number_of_raid_devices - 1))
- RAID_SUPERBLOCK_OVERHEAD,
fsgroup.get_size(),
)
@skip("XXX: GavinPanella 2015-12-04 bug=1522965: Fails spuriously.")
def test_get_size_returns_correct_disk_size_for_raid_10(self):
node = factory.make_Node()
small_size = random.randint(
MIN_BLOCK_DEVICE_SIZE, MIN_BLOCK_DEVICE_SIZE ** 2
)
other_size = random.randint(small_size + 1, small_size + (10 ** 5))
number_of_raid_devices = random.randint(3, 9)
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=small_size
),
)
]
for _ in range(number_of_raid_devices):
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=other_size
),
)
)
# Spares are ignored and not taken into calculation.
for _ in range(3):
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID_SPARE,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=other_size
),
)
)
fsgroup = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_10, filesystems=filesystems
)
self.assertEqual(
(small_size * (number_of_raid_devices + 1) // 2)
- RAID_SUPERBLOCK_OVERHEAD,
fsgroup.get_size(),
)
def test_get_size_returns_0_if_bcache_without_backing(self):
fsgroup = FilesystemGroup(group_type=FILESYSTEM_GROUP_TYPE.BCACHE)
self.assertEqual(0, fsgroup.get_size())
def test_get_size_returns_size_of_backing_device_with_bcache(self):
node = factory.make_Node()
backing_size = random.randint(
MIN_BLOCK_DEVICE_SIZE, MIN_BLOCK_DEVICE_SIZE ** 2
)
cache_set = factory.make_CacheSet(node=node)
backing_block_device = factory.make_PhysicalBlockDevice(
node=node, size=backing_size
)
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.BCACHE_BACKING,
block_device=backing_block_device,
)
]
fsgroup = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE,
cache_mode=CACHE_MODE_TYPE.WRITEBACK,
cache_set=cache_set,
filesystems=filesystems,
)
self.assertEqual(backing_size, fsgroup.get_size())
def test_get_size_returns_total_size_with_vmfs(self):
vmfs = factory.make_VMFS()
self.assertEqual(vmfs.get_total_size(), vmfs.get_size())
def test_get_total_size(self):
vmfs = factory.make_VMFS()
size = 0
for fs in vmfs.filesystems.all():
size += fs.get_size()
self.assertEqual(size, vmfs.get_total_size())
def test_is_lvm_returns_true_when_LVM_VG(self):
fsgroup = FilesystemGroup(group_type=FILESYSTEM_GROUP_TYPE.LVM_VG)
self.assertTrue(fsgroup.is_lvm())
def test_is_lvm_returns_false_when_not_LVM_VG(self):
fsgroup = FilesystemGroup(
group_type=factory.pick_enum(
FILESYSTEM_GROUP_TYPE, but_not=FILESYSTEM_GROUP_TYPE.LVM_VG
)
)
self.assertFalse(fsgroup.is_lvm())
def test_is_raid_returns_true_for_all_raid_types(self):
fsgroup = FilesystemGroup()
for raid_type in FILESYSTEM_GROUP_RAID_TYPES:
fsgroup.group_type = raid_type
self.assertTrue(
fsgroup.is_raid(),
"is_raid should return true for %s" % raid_type,
)
def test_is_raid_returns_false_for_LVM_VG(self):
fsgroup = FilesystemGroup(group_type=FILESYSTEM_GROUP_TYPE.LVM_VG)
self.assertFalse(fsgroup.is_raid())
def test_is_raid_returns_false_for_BCACHE(self):
fsgroup = FilesystemGroup(group_type=FILESYSTEM_GROUP_TYPE.BCACHE)
self.assertFalse(fsgroup.is_raid())
def test_is_bcache_returns_true_when_BCACHE(self):
fsgroup = FilesystemGroup(group_type=FILESYSTEM_GROUP_TYPE.BCACHE)
self.assertTrue(fsgroup.is_bcache())
def test_is_bcache_returns_false_when_not_BCACHE(self):
fsgroup = FilesystemGroup(
group_type=factory.pick_enum(
FILESYSTEM_GROUP_TYPE, but_not=FILESYSTEM_GROUP_TYPE.BCACHE
)
)
self.assertFalse(fsgroup.is_bcache())
def test_is_vmfs(self):
vmfs = factory.make_VMFS()
self.assertTrue(vmfs.is_vmfs())
def test_creating_vmfs_automatically_creates_mounted_fs(self):
part = factory.make_Partition()
name = factory.make_name("datastore")
vmfs = VMFS.objects.create_vmfs(name, [part])
self.assertEqual(
"/vmfs/volumes/%s" % name,
vmfs.virtual_device.get_effective_filesystem().mount_point,
)
def test_can_save_new_filesystem_group_without_filesystems(self):
fsgroup = FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG,
name=factory.make_name("vg"),
)
fsgroup.save()
self.expectThat(fsgroup.id, Not(Is(None)))
self.expectThat(fsgroup.filesystems.count(), Equals(0))
def test_cannot_save_without_filesystems(self):
fsgroup = FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG,
name=factory.make_name("vg"),
)
fsgroup.save()
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['At least one filesystem must have "
"been added.']}"
),
):
fsgroup.save(force_update=True)
def test_cannot_save_without_filesystems_from_different_nodes(self):
filesystems = [factory.make_Filesystem(), factory.make_Filesystem()]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['All added filesystems must belong to "
"the same node.']}"
),
):
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG,
filesystems=filesystems,
)
def test_cannot_save_volume_group_if_invalid_filesystem(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV,
block_device=factory.make_PhysicalBlockDevice(node=node),
),
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
),
]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['Volume group can only contain lvm "
"physical volumes.']}"
),
):
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG,
filesystems=filesystems,
)
def test_can_save_volume_group_if_valid_filesystems(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV,
block_device=factory.make_PhysicalBlockDevice(node=node),
),
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV,
block_device=factory.make_PhysicalBlockDevice(node=node),
),
]
# Test is that this does not raise an exception.
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, filesystems=filesystems
)
def test_cannot_save_volume_group_if_logical_volumes_larger(self):
node = factory.make_Node()
filesystem_one = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
filesystem_two = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
filesystems = [filesystem_one, filesystem_two]
volume_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, filesystems=filesystems
)
factory.make_VirtualBlockDevice(
size=volume_group.get_size(), filesystem_group=volume_group
)
filesystem_two.delete()
with ExpectedException(
ValidationError,
re.escape(
"['Volume group cannot be smaller than its "
"logical volumes.']"
),
):
volume_group.save()
def test_cannot_save_raid_0_with_less_than_2_raid_devices(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 0 must have at least 2 raid "
"devices and no spares.']}"
),
):
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_0,
filesystems=filesystems,
)
def test_cannot_save_raid_0_with_spare_raid_devices(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(2)
]
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID_SPARE,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
)
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 0 must have at least 2 raid "
"devices and no spares.']}"
),
):
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_0,
filesystems=filesystems,
)
def test_can_save_raid_0_with_exactly_2_raid_devices(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(2)
]
# Test is that this does not raise an exception.
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_0, filesystems=filesystems
)
def test_can_save_raid_0_with_more_then_2_raid_devices(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(10)
]
# Test is that this does not raise an exception.
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_0, filesystems=filesystems
)
def test_cannot_save_raid_1_with_less_than_2_raid_devices(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 1 must have at least 2 raid "
"devices and any number of spares.']}"
),
):
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_1,
filesystems=filesystems,
)
def test_can_save_raid_1_with_spare_raid_devices(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(2)
]
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID_SPARE,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
)
# Test is that this does not raise an exception.
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_1, filesystems=filesystems
)
def test_can_save_raid_1_with_2_or_more_raid_devices(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(random.randint(2, 10))
]
# Test is that this does not raise an exception.
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_1, filesystems=filesystems
)
def test_cannot_save_raid_5_with_less_than_3_raid_devices(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(random.randint(1, 2))
]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 5 must have at least 3 raid "
"devices and any number of spares.']}"
),
):
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_5,
filesystems=filesystems,
)
def test_can_save_raid_5_with_3_or_more_raid_devices_and_spares(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(random.randint(3, 10))
]
for _ in range(random.randint(1, 5)):
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID_SPARE,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
)
# Test is that this does not raise an exception.
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_5, filesystems=filesystems
)
def test_cannot_save_raid_6_with_less_than_4_raid_devices(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(random.randint(1, 3))
]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 6 must have at least 4 raid "
"devices and any number of spares.']}"
),
):
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_6,
filesystems=filesystems,
)
def test_can_save_raid_6_with_4_or_more_raid_devices_and_spares(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(random.randint(4, 10))
]
for _ in range(random.randint(1, 5)):
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID_SPARE,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
)
# Test is that this does not raise an exception.
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_6, filesystems=filesystems
)
def test_cannot_save_raid_10_with_less_than_3_raid_devices(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(random.randint(1, 2))
]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 10 must have at least 3 raid "
"devices and any number of spares.']}"
),
):
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_10,
filesystems=filesystems,
)
def test_can_save_raid_10_with_3_raid_devices_and_spares(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(3)
]
for _ in range(random.randint(1, 5)):
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID_SPARE,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
)
# Test is that this does not raise an exception.
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_10, filesystems=filesystems
)
def test_can_save_raid_10_with_4_or_more_raid_devices_and_spares(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(random.randint(4, 10))
]
for _ in range(random.randint(1, 5)):
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID_SPARE,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
)
# Test is that this does not raise an exception.
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_10, filesystems=filesystems
)
def test_cannot_save_bcache_without_cache_set(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.BCACHE_BACKING,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['Bcache requires an assigned cache set.']}"
),
):
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE,
filesystems=filesystems,
)
filesystem_group.cache_set = None
filesystem_group.save()
def test_cannot_save_bcache_without_backing(self):
node = factory.make_Node()
cache_set = factory.make_CacheSet(node=node)
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['At least one filesystem must have "
"been added.']}"
),
):
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE,
cache_set=cache_set,
filesystems=[],
)
def test_cannot_save_bcache_with_logical_volume_as_backing(self):
node = factory.make_Node()
cache_set = factory.make_CacheSet(node=node)
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.BCACHE_BACKING,
block_device=factory.make_VirtualBlockDevice(node=node),
)
]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['Bcache cannot use a logical volume as a "
"backing device.']}"
),
):
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE,
cache_set=cache_set,
filesystems=filesystems,
)
def test_can_save_bcache_with_cache_set_and_backing(self):
node = factory.make_Node()
cache_set = factory.make_CacheSet(node=node)
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.BCACHE_BACKING,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
]
# Test is that this does not raise an exception.
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE,
cache_set=cache_set,
filesystems=filesystems,
)
def test_cannot_save_bcache_with_multiple_backings(self):
node = factory.make_Node()
cache_set = factory.make_CacheSet(node=node)
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.BCACHE_BACKING,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(random.randint(2, 10))
]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['Bcache can only contain one backing "
"device.']}"
),
):
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE,
cache_set=cache_set,
filesystems=filesystems,
)
def test_save_doesnt_overwrite_uuid(self):
uuid = uuid4()
fsgroup = factory.make_FilesystemGroup(uuid=uuid)
self.assertEqual("%s" % uuid, fsgroup.uuid)
def test_save_doesnt_allow_changing_group_type(self):
fsgroup = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_0
)
fsgroup.save()
fsgroup.group_type = FILESYSTEM_GROUP_TYPE.RAID_1
error = self.assertRaises(ValidationError, fsgroup.save)
self.assertEqual(
"Cannot change the group_type of a FilesystemGroup.", error.message
)
def test_save_calls_create_or_update_for_when_filesystems_linked(self):
mock_create_or_update_for = self.patch(
VirtualBlockDevice.objects, "create_or_update_for"
)
filesystem_group = factory.make_FilesystemGroup()
self.assertThat(
mock_create_or_update_for, MockCalledOnceWith(filesystem_group)
)
def test_save_doesnt_call_create_or_update_for_when_no_filesystems(self):
mock_create_or_update_for = self.patch(
VirtualBlockDevice.objects, "create_or_update_for"
)
filesystem_group = FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG,
name=factory.make_name("vg"),
)
filesystem_group.save()
self.assertThat(mock_create_or_update_for, MockNotCalled())
def test_get_lvm_allocated_size_and_get_lvm_free_space(self):
"""Check get_lvm_allocated_size and get_lvm_free_space methods."""
backing_volume_size = machine_readable_bytes("10G")
node = factory.make_Node()
fsgroup = FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG,
name=factory.make_name("vg"),
)
fsgroup.save()
block_size = 4096
for i in range(5):
block_device = factory.make_BlockDevice(
node=node, size=backing_volume_size, block_size=block_size
)
factory.make_Filesystem(
filesystem_group=fsgroup,
fstype=FILESYSTEM_TYPE.LVM_PV,
block_device=block_device,
)
# Size should be 50 GB minus one extent per filesystem for LVM headers.
pv_total_size = 50 * 1000 ** 3
extents = (pv_total_size // LVM_PE_SIZE) - 5
usable_size = extents * LVM_PE_SIZE
self.assertEqual(usable_size, fsgroup.get_size())
# Allocate two VirtualBlockDevice's
factory.make_VirtualBlockDevice(
filesystem_group=fsgroup, size=35 * 1000 ** 3
)
factory.make_VirtualBlockDevice(
filesystem_group=fsgroup, size=5 * 1000 ** 3
)
expected_size = round_size_to_nearest_block(
40 * 1000 ** 3, PARTITION_ALIGNMENT_SIZE, False
)
self.assertEqual(expected_size, fsgroup.get_lvm_allocated_size())
self.assertEqual(
usable_size - expected_size, fsgroup.get_lvm_free_space()
)
def test_get_virtual_block_device_block_size_returns_backing_for_bc(self):
# This test is not included in the scenario below
# `TestFilesystemGroupGetVirtualBlockDeviceBlockSize` because it has
# different logic that doesn't fit in the scenario.
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE
)
filesystem = filesystem_group.get_bcache_backing_filesystem()
self.assertEqual(
filesystem.get_block_size(),
filesystem_group.get_virtual_block_device_block_size(),
)
def test_delete_deletes_filesystems_not_block_devices(self):
node = factory.make_Node()
block_devices = [
factory.make_PhysicalBlockDevice(node=node) for _ in range(3)
]
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, block_device=bd
)
for bd in block_devices
]
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, filesystems=filesystems
)
filesystem_group.delete()
deleted_filesystems = reload_objects(Filesystem, filesystems)
kept_block_devices = reload_objects(PhysicalBlockDevice, block_devices)
self.assertItemsEqual([], deleted_filesystems)
self.assertItemsEqual(block_devices, kept_block_devices)
def test_delete_cannot_delete_volume_group_with_logical_volumes(self):
volume_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG
)
factory.make_VirtualBlockDevice(
size=volume_group.get_size(), filesystem_group=volume_group
)
error = self.assertRaises(ValidationError, volume_group.delete)
self.assertEqual(
"This volume group has logical volumes; it cannot be deleted.",
error.message,
)
def test_delete_deletes_virtual_block_device(self):
filesystem_group = factory.make_FilesystemGroup(
group_type=factory.pick_enum(
FILESYSTEM_GROUP_TYPE, but_not=FILESYSTEM_GROUP_TYPE.LVM_VG
)
)
virtual_device = filesystem_group.virtual_device
filesystem_group.delete()
self.assertIsNone(
reload_object(virtual_device),
"VirtualBlockDevice should have been deleted.",
)
class TestFilesystemGroupGetNiceName(MAASServerTestCase):
scenarios = [
(
FILESYSTEM_GROUP_TYPE.LVM_VG,
{
"group_type": FILESYSTEM_GROUP_TYPE.LVM_VG,
"name": "volume group",
},
),
(
FILESYSTEM_GROUP_TYPE.RAID_0,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_0, "name": "RAID"},
),
(
FILESYSTEM_GROUP_TYPE.RAID_1,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_1, "name": "RAID"},
),
(
FILESYSTEM_GROUP_TYPE.RAID_5,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_5, "name": "RAID"},
),
(
FILESYSTEM_GROUP_TYPE.RAID_6,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_6, "name": "RAID"},
),
(
FILESYSTEM_GROUP_TYPE.RAID_10,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_10, "name": "RAID"},
),
(
FILESYSTEM_GROUP_TYPE.BCACHE,
{"group_type": FILESYSTEM_GROUP_TYPE.BCACHE, "name": "Bcache"},
),
(
FILESYSTEM_GROUP_TYPE.VMFS6,
{"group_type": FILESYSTEM_GROUP_TYPE.VMFS6, "name": "VMFS"},
),
]
def test_returns_prefix(self):
filesystem_group = factory.make_FilesystemGroup(
group_type=self.group_type
)
self.assertEqual(self.name, filesystem_group.get_nice_name())
class TestFilesystemGroupGetNamePrefix(MAASServerTestCase):
scenarios = [
(
FILESYSTEM_GROUP_TYPE.LVM_VG,
{"group_type": FILESYSTEM_GROUP_TYPE.LVM_VG, "prefix": "vg"},
),
(
FILESYSTEM_GROUP_TYPE.RAID_0,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_0, "prefix": "md"},
),
(
FILESYSTEM_GROUP_TYPE.RAID_1,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_1, "prefix": "md"},
),
(
FILESYSTEM_GROUP_TYPE.RAID_5,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_5, "prefix": "md"},
),
(
FILESYSTEM_GROUP_TYPE.RAID_6,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_6, "prefix": "md"},
),
(
FILESYSTEM_GROUP_TYPE.RAID_10,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_10, "prefix": "md"},
),
(
FILESYSTEM_GROUP_TYPE.BCACHE,
{"group_type": FILESYSTEM_GROUP_TYPE.BCACHE, "prefix": "bcache"},
),
(
FILESYSTEM_GROUP_TYPE.VMFS6,
{"group_type": FILESYSTEM_GROUP_TYPE.VMFS6, "prefix": "vmfs"},
),
]
def test_returns_prefix(self):
filesystem_group = factory.make_FilesystemGroup(
group_type=self.group_type
)
self.assertEqual(self.prefix, filesystem_group.get_name_prefix())
class TestFilesystemGroupGetVirtualBlockDeviceBlockSize(MAASServerTestCase):
scenarios = [
(
FILESYSTEM_GROUP_TYPE.LVM_VG,
{"group_type": FILESYSTEM_GROUP_TYPE.LVM_VG, "block_size": 4096},
),
(
FILESYSTEM_GROUP_TYPE.RAID_0,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_0, "block_size": 512},
),
(
FILESYSTEM_GROUP_TYPE.RAID_1,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_1, "block_size": 512},
),
(
FILESYSTEM_GROUP_TYPE.RAID_5,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_5, "block_size": 512},
),
(
FILESYSTEM_GROUP_TYPE.RAID_6,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_6, "block_size": 512},
),
(
FILESYSTEM_GROUP_TYPE.RAID_10,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_10, "block_size": 512},
),
(
FILESYSTEM_GROUP_TYPE.VMFS6,
{"group_type": FILESYSTEM_GROUP_TYPE.VMFS6, "block_size": 1024},
),
# For BCACHE see
# `test_get_virtual_block_device_block_size_returns_backing_for_bc`
# above.
]
def test_returns_block_size(self):
filesystem_group = factory.make_FilesystemGroup(
group_type=self.group_type
)
self.assertEqual(
self.block_size,
filesystem_group.get_virtual_block_device_block_size(),
)
class TestVolumeGroup(MAASServerTestCase):
def test_objects_is_VolumeGroupManager(self):
self.assertIsInstance(VolumeGroup.objects, VolumeGroupManager)
def test_group_type_set_to_LVM_VG(self):
obj = VolumeGroup()
self.assertEqual(FILESYSTEM_GROUP_TYPE.LVM_VG, obj.group_type)
def test_update_block_devices_and_partitions(self):
node = factory.make_Node()
block_devices = [
factory.make_PhysicalBlockDevice(node=node) for _ in range(3)
]
new_block_device = factory.make_PhysicalBlockDevice(node=node)
partition_block_device = factory.make_PhysicalBlockDevice(
node=node,
size=(MIN_BLOCK_DEVICE_SIZE * 4) + PARTITION_TABLE_EXTRA_SPACE,
)
partition_table = factory.make_PartitionTable(
block_device=partition_block_device
)
partitions = [
partition_table.add_partition(size=MIN_BLOCK_DEVICE_SIZE)
for _ in range(2)
]
new_partition = partition_table.add_partition(
size=MIN_BLOCK_DEVICE_SIZE
)
initial_bd_filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, block_device=bd
)
for bd in block_devices
]
initial_part_filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, partition=part
)
for part in partitions
]
volume_group = factory.make_VolumeGroup(
filesystems=initial_bd_filesystems + initial_part_filesystems
)
deleted_block_device = block_devices[0]
updated_block_devices = [new_block_device] + block_devices[1:]
deleted_partition = partitions[0]
update_partitions = [new_partition] + partitions[1:]
volume_group.update_block_devices_and_partitions(
updated_block_devices, update_partitions
)
self.assertIsNone(deleted_block_device.get_effective_filesystem())
self.assertIsNone(deleted_partition.get_effective_filesystem())
self.assertEqual(
volume_group.id,
new_block_device.get_effective_filesystem().filesystem_group.id,
)
self.assertEqual(
volume_group.id,
new_partition.get_effective_filesystem().filesystem_group.id,
)
for device in block_devices[1:] + partitions[1:]:
self.assertEqual(
volume_group.id,
device.get_effective_filesystem().filesystem_group.id,
)
def test_create_logical_volume(self):
volume_group = factory.make_VolumeGroup()
name = factory.make_name()
vguuid = "%s" % uuid4()
size = random.randint(MIN_BLOCK_DEVICE_SIZE, volume_group.get_size())
logical_volume = volume_group.create_logical_volume(
name=name, uuid=vguuid, size=size
)
logical_volume = reload_object(logical_volume)
expected_size = round_size_to_nearest_block(
size, PARTITION_ALIGNMENT_SIZE, False
)
self.assertThat(
logical_volume,
MatchesStructure.byEquality(
name=name,
uuid=vguuid,
size=expected_size,
block_size=volume_group.get_virtual_block_device_block_size(),
),
)
class TestRAID(MAASServerTestCase):
def test_objects_is_RAIDManager(self):
self.assertIsInstance(RAID.objects, RAIDManager)
def test_init_raises_ValueError_if_group_type_not_set_to_raid_type(self):
self.assertRaises(
ValueError, RAID, group_type=FILESYSTEM_GROUP_TYPE.LVM_VG
)
def test_create_raid(self):
node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
for bd in block_devices[5:]:
factory.make_PartitionTable(block_device=bd)
partitions = [
bd.get_partitiontable().add_partition() for bd in block_devices[5:]
]
spare_block_device = block_devices[0]
spare_partition = partitions[0]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_6,
uuid=uuid,
block_devices=block_devices[1:5],
partitions=partitions[1:],
spare_devices=[spare_block_device],
spare_partitions=[spare_partition],
)
self.assertEqual("md0", raid.name)
self.assertEqual(
(6 * partitions[1].size) - RAID_SUPERBLOCK_OVERHEAD,
raid.get_size(),
)
self.assertEqual(FILESYSTEM_GROUP_TYPE.RAID_6, raid.group_type)
self.assertEqual(uuid, raid.uuid)
self.assertEqual(10, raid.filesystems.count())
self.assertEqual(
8, raid.filesystems.filter(fstype=FILESYSTEM_TYPE.RAID).count()
)
self.assertEqual(
2,
raid.filesystems.filter(fstype=FILESYSTEM_TYPE.RAID_SPARE).count(),
)
def test_create_raid_0_with_a_spare_fails(self):
node = factory.make_Node()
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=10 * 1000 ** 4)
for _ in range(10)
]
uuid = str(uuid4())
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 0 must have at least 2 raid "
"devices and no spares.']}"
),
):
RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_0,
uuid=uuid,
block_devices=block_devices[1:],
partitions=[],
spare_devices=block_devices[:1],
spare_partitions=[],
)
def test_create_raid_without_devices_fails(self):
uuid = str(uuid4())
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['At least one filesystem must have been "
"added.']}"
),
):
RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_0,
uuid=uuid,
block_devices=[],
partitions=[],
spare_devices=[],
spare_partitions=[],
)
def test_create_raid_0_with_one_element_fails(self):
node = factory.make_Node()
block_device = factory.make_PhysicalBlockDevice(node=node)
uuid = str(uuid4())
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 0 must have at least 2 raid "
"devices and no spares.']}"
),
):
RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_0,
uuid=uuid,
block_devices=[block_device],
partitions=[],
spare_devices=[],
spare_partitions=[],
)
def test_create_raid_1_with_spares(self):
node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
for bd in block_devices[5:]:
factory.make_PartitionTable(block_device=bd)
partitions = [
bd.get_partitiontable().add_partition() for bd in block_devices[5:]
]
# Partition size will be smaller than the disk, because of overhead.
spare_block_device = block_devices[0]
spare_partition = partitions[0]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_1,
uuid=uuid,
block_devices=block_devices[1:5],
partitions=partitions[1:],
spare_devices=[spare_block_device],
spare_partitions=[spare_partition],
)
self.assertEqual("md0", raid.name)
self.assertEqual(
partitions[1].size - RAID_SUPERBLOCK_OVERHEAD, raid.get_size()
)
self.assertEqual(FILESYSTEM_GROUP_TYPE.RAID_1, raid.group_type)
self.assertEqual(uuid, raid.uuid)
self.assertEqual(10, raid.filesystems.count())
self.assertEqual(
8, raid.filesystems.filter(fstype=FILESYSTEM_TYPE.RAID).count()
)
self.assertEqual(
2,
raid.filesystems.filter(fstype=FILESYSTEM_TYPE.RAID_SPARE).count(),
)
def test_create_raid_1_with_one_element_fails(self):
node = factory.make_Node()
block_device = factory.make_PhysicalBlockDevice(node=node)
uuid = str(uuid4())
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 1 must have at least 2 raid "
"devices and any number of spares.']}"
),
):
RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_1,
uuid=uuid,
block_devices=[block_device],
partitions=[],
spare_devices=[],
spare_partitions=[],
)
def test_create_raid_5_with_spares(self):
node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
for bd in block_devices[5:]:
factory.make_PartitionTable(block_device=bd)
partitions = [
bd.get_partitiontable().add_partition() for bd in block_devices[5:]
]
spare_block_device = block_devices[0]
spare_partition = partitions[0]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
block_devices=block_devices[1:5],
partitions=partitions[1:],
spare_devices=[spare_block_device],
spare_partitions=[spare_partition],
)
self.assertEqual("md0", raid.name)
self.assertEqual(
(7 * partitions[1].size) - RAID_SUPERBLOCK_OVERHEAD,
raid.get_size(),
)
self.assertEqual(FILESYSTEM_GROUP_TYPE.RAID_5, raid.group_type)
self.assertEqual(uuid, raid.uuid)
self.assertEqual(10, raid.filesystems.count())
self.assertEqual(
8, raid.filesystems.filter(fstype=FILESYSTEM_TYPE.RAID).count()
)
self.assertEqual(
2,
raid.filesystems.filter(fstype=FILESYSTEM_TYPE.RAID_SPARE).count(),
)
def test_create_raid_5_with_2_elements_fails(self):
node = factory.make_Node()
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=10 * 1000 ** 4)
for _ in range(2)
]
uuid = str(uuid4())
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 5 must have at least 3 raid "
"devices and any number of spares.']}"
),
):
RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
block_devices=block_devices,
partitions=[],
spare_devices=[],
spare_partitions=[],
)
def test_create_raid_6_with_3_elements_fails(self):
node = factory.make_Node()
block_devices = [
factory.make_PhysicalBlockDevice(node=node) for _ in range(3)
]
uuid = str(uuid4())
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 6 must have at least 4 raid "
"devices and any number of spares.']}"
),
):
RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_6,
uuid=uuid,
block_devices=block_devices,
partitions=[],
spare_devices=[],
spare_partitions=[],
)
def test_create_raid_10_with_2_elements_fails(self):
node = factory.make_Node()
block_devices = [
factory.make_PhysicalBlockDevice(node=node) for _ in range(2)
]
uuid = str(uuid4())
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 10 must have at least 3 raid "
"devices and any number of spares.']}"
),
):
RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_10,
uuid=uuid,
block_devices=block_devices,
partitions=[],
spare_devices=[],
spare_partitions=[],
)
def test_create_raid_with_block_device_from_other_node_fails(self):
node1 = factory.make_Node()
node2 = factory.make_Node()
block_devices_1 = [
factory.make_PhysicalBlockDevice(node=node1) for _ in range(5)
]
block_devices_2 = [
factory.make_PhysicalBlockDevice(node=node2) for _ in range(5)
]
uuid = str(uuid4())
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['All added filesystems must belong to the "
"same node.']}"
),
):
RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_1,
uuid=uuid,
block_devices=block_devices_1 + block_devices_2,
partitions=[],
spare_devices=[],
spare_partitions=[],
)
def test_add_device_to_array(self):
node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
block_devices=block_devices,
)
device = factory.make_PhysicalBlockDevice(node=node, size=device_size)
raid.add_device(device, FILESYSTEM_TYPE.RAID)
self.assertEqual(11, raid.filesystems.count())
self.assertEqual(
(10 * device_size) - RAID_SUPERBLOCK_OVERHEAD, raid.get_size()
)
def test_add_spare_device_to_array(self):
node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
block_devices=block_devices,
)
device = factory.make_PhysicalBlockDevice(node=node, size=device_size)
raid.add_device(device, FILESYSTEM_TYPE.RAID_SPARE)
self.assertEqual(11, raid.filesystems.count())
self.assertEqual(
(9 * device_size) - RAID_SUPERBLOCK_OVERHEAD, raid.get_size()
)
def test_add_partition_to_array(self):
node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
block_devices=block_devices,
)
partition = factory.make_PartitionTable(
block_device=factory.make_PhysicalBlockDevice(
node=node, size=device_size
)
).add_partition()
raid.add_partition(partition, FILESYSTEM_TYPE.RAID)
self.assertEqual(11, raid.filesystems.count())
self.assertEqual(
(10 * partition.size) - RAID_SUPERBLOCK_OVERHEAD, raid.get_size()
)
def test_add_spare_partition_to_array(self):
node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
block_devices=block_devices,
)
partition = factory.make_PartitionTable(
block_device=factory.make_PhysicalBlockDevice(
node=node, size=device_size
)
).add_partition()
raid.add_partition(partition, FILESYSTEM_TYPE.RAID_SPARE)
self.assertEqual(11, raid.filesystems.count())
self.assertEqual(
(9 * partition.size) - RAID_SUPERBLOCK_OVERHEAD, raid.get_size()
)
def test_add_device_from_another_node_to_array_fails(self):
node = factory.make_Node()
other_node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
block_devices=block_devices,
)
device = factory.make_PhysicalBlockDevice(
node=other_node, size=device_size
)
with ExpectedException(
ValidationError,
re.escape(
"['Device needs to be from the same node as the rest of the "
"array.']"
),
):
raid.add_device(device, FILESYSTEM_TYPE.RAID)
self.assertEqual(10, raid.filesystems.count()) # Still 10 devices
self.assertEqual(
(9 * device_size) - RAID_SUPERBLOCK_OVERHEAD, raid.get_size()
)
def test_add_partition_from_another_node_to_array_fails(self):
node = factory.make_Node()
other_node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
block_devices=block_devices,
)
partition = factory.make_PartitionTable(
block_device=factory.make_PhysicalBlockDevice(
node=other_node, size=device_size
)
).add_partition()
with ExpectedException(
ValidationError,
re.escape(
"['Partition must be on a device from the same node as "
"the rest of the array.']"
),
):
raid.add_partition(partition, FILESYSTEM_TYPE.RAID)
self.assertEqual(10, raid.filesystems.count()) # Nothing added
self.assertEqual(
(9 * device_size) - RAID_SUPERBLOCK_OVERHEAD, raid.get_size()
)
def test_add_already_used_device_to_array_fails(self):
node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
block_devices=block_devices,
)
device = factory.make_PhysicalBlockDevice(node=node, size=device_size)
Filesystem.objects.create(
block_device=device,
mount_point="/export/home",
fstype=FILESYSTEM_TYPE.EXT4,
)
with ExpectedException(
ValidationError,
re.escape("['There is another filesystem on this device.']"),
):
raid.add_device(device, FILESYSTEM_TYPE.RAID)
self.assertEqual(10, raid.filesystems.count()) # Nothing added.
self.assertEqual(
(9 * device_size) - RAID_SUPERBLOCK_OVERHEAD, raid.get_size()
)
def test_remove_device_from_array_invalidates_array_fails(self):
"""Checks it's not possible to remove a device from an RAID in such way
as to make the RAID invalid (a 1-device RAID-0/1, a 2-device RAID-5
etc). The goal is to make sure we trigger the RAID internal validation.
"""
node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(4)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_6,
uuid=uuid,
block_devices=block_devices,
)
fsids_before = [fs.id for fs in raid.filesystems.all()]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 6 must have at least 4 raid "
"devices and any number of spares.']}"
),
):
raid.remove_device(block_devices[0])
self.assertEqual(4, raid.filesystems.count())
self.assertEqual(
(2 * device_size) - RAID_SUPERBLOCK_OVERHEAD, raid.get_size()
)
# Ensure the filesystems are the exact same before and after.
self.assertItemsEqual(
fsids_before, [fs.id for fs in raid.filesystems.all()]
)
def test_remove_partition_from_array_invalidates_array_fails(self):
"""Checks it's not possible to remove a partition from an RAID in such
way as to make the RAID invalid (a 1-device RAID-0/1, a 2-device RAID-5
etc). The goal is to make sure we trigger the RAID internal validation.
"""
node = factory.make_Node(bios_boot_method="uefi")
device_size = 10 * 1000 ** 4
partitions = [
factory.make_PartitionTable(
table_type=PARTITION_TABLE_TYPE.GPT,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=device_size
),
).add_partition()
for _ in range(4)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_6,
uuid=uuid,
partitions=partitions,
)
fsids_before = [fs.id for fs in raid.filesystems.all()]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 6 must have at least 4 raid "
"devices and any number of spares.']}"
),
):
raid.remove_partition(partitions[0])
self.assertEqual(4, raid.filesystems.count())
self.assertEqual(
(2 * partitions[0].size) - RAID_SUPERBLOCK_OVERHEAD,
raid.get_size(),
)
# Ensure the filesystems are the exact same before and after.
self.assertItemsEqual(
fsids_before, [fs.id for fs in raid.filesystems.all()]
)
def test_remove_device_from_array(self):
node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
block_devices=block_devices[:-2],
spare_devices=block_devices[-2:],
)
raid.remove_device(block_devices[0])
self.assertEqual(9, raid.filesystems.count())
self.assertEqual(
(6 * device_size) - RAID_SUPERBLOCK_OVERHEAD, raid.get_size()
)
def test_remove_partition_from_array(self):
node = factory.make_Node()
device_size = 10 * 1000 ** 4
partitions = [
factory.make_PartitionTable(
block_device=factory.make_PhysicalBlockDevice(
node=node, size=device_size
)
).add_partition()
for _ in range(10)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
partitions=partitions[:-2],
spare_partitions=partitions[-2:],
)
raid.remove_partition(partitions[0])
self.assertEqual(9, raid.filesystems.count())
self.assertEqual(
(6 * partitions[0].size) - RAID_SUPERBLOCK_OVERHEAD,
raid.get_size(),
)
def test_remove_invalid_partition_from_array_fails(self):
node = factory.make_Node(bios_boot_method="uefi")
device_size = 10 * 1000 ** 4
partitions = [
factory.make_PartitionTable(
table_type=PARTITION_TABLE_TYPE.GPT,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=device_size
),
).add_partition()
for _ in range(10)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
partitions=partitions,
)
with ExpectedException(
ValidationError,
re.escape("['Partition does not belong to this array.']"),
):
raid.remove_partition(
factory.make_PartitionTable(
block_device=factory.make_PhysicalBlockDevice(
node=node, size=device_size
)
).add_partition()
)
self.assertEqual(10, raid.filesystems.count())
self.assertEqual(
(9 * partitions[0].size) - RAID_SUPERBLOCK_OVERHEAD,
raid.get_size(),
)
def test_remove_device_from_array_fails(self):
node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
block_devices=block_devices,
)
with ExpectedException(
ValidationError,
re.escape("['Device does not belong to this array.']"),
):
raid.remove_device(
factory.make_PhysicalBlockDevice(node=node, size=device_size)
)
self.assertEqual(10, raid.filesystems.count())
self.assertEqual(
(9 * device_size) - RAID_SUPERBLOCK_OVERHEAD, raid.get_size()
)
class TestBcache(MAASServerTestCase):
def test_objects_is_BcacheManager(self):
self.assertIsInstance(Bcache.objects, BcacheManager)
def test_group_type_set_to_BCACHE(self):
obj = Bcache()
self.assertEqual(FILESYSTEM_GROUP_TYPE.BCACHE, obj.group_type)
def test_create_bcache_with_physical_block_devices(self):
"""Checks creation of a Bcache with physical block devices for caching
and backing roles."""
node = factory.make_Node()
backing_size = 10 * 1000 ** 4
cache_set = factory.make_CacheSet(node=node)
backing_device = factory.make_PhysicalBlockDevice(
node=node, size=backing_size
)
uuid = str(uuid4())
bcache = Bcache.objects.create_bcache(
name="bcache0",
uuid=uuid,
cache_set=cache_set,
backing_device=backing_device,
cache_mode=CACHE_MODE_TYPE.WRITEBACK,
)
# Verify the filesystems were properly created on the target devices
self.assertEqual(backing_size, bcache.get_size())
self.assertEqual(
FILESYSTEM_TYPE.BCACHE_BACKING,
backing_device.get_effective_filesystem().fstype,
)
self.assertEqual(cache_set, bcache.cache_set)
self.assertEqual(
bcache, backing_device.get_effective_filesystem().filesystem_group
)
def test_create_bcache_with_virtual_block_devices(self):
"""Checks creation of a Bcache with virtual block devices for caching
and backing roles."""
node = factory.make_Node()
backing_size = 10 * 1000 ** 4
cache_size = 1000 ** 4
# A caching device that's ridiculously fast to read from, but slow for
# writing to it.
cache_device = RAID.objects.create_raid(
block_devices=[
factory.make_PhysicalBlockDevice(node=node, size=cache_size)
for _ in range(10)
],
level=FILESYSTEM_GROUP_TYPE.RAID_1,
).virtual_device
cache_set = factory.make_CacheSet(block_device=cache_device)
# A ridiculously reliable backing store.
backing_device = RAID.objects.create_raid(
block_devices=[
factory.make_PhysicalBlockDevice(node=node, size=backing_size)
for _ in range(12)
], # 10 data devices, 2 checksum devices.
level=FILESYSTEM_GROUP_TYPE.RAID_6,
).virtual_device
bcache = Bcache.objects.create_bcache(
cache_set=cache_set,
backing_device=backing_device,
cache_mode=CACHE_MODE_TYPE.WRITEAROUND,
)
# Verify the filesystems were properly created on the target devices
self.assertEqual(
(10 * backing_size) - RAID_SUPERBLOCK_OVERHEAD, bcache.get_size()
)
self.assertEqual(
FILESYSTEM_TYPE.BCACHE_CACHE,
cache_device.get_effective_filesystem().fstype,
)
self.assertEqual(
FILESYSTEM_TYPE.BCACHE_BACKING,
backing_device.get_effective_filesystem().fstype,
)
self.assertEqual(cache_set, bcache.cache_set)
self.assertEqual(
bcache, backing_device.get_effective_filesystem().filesystem_group
)
def test_create_bcache_with_partitions(self):
"""Checks creation of a Bcache with partitions for caching and backing
roles."""
node = factory.make_Node()
backing_size = 10 * 1000 ** 4
cache_size = 1000 ** 4
cache_partition = factory.make_PartitionTable(
block_device=factory.make_PhysicalBlockDevice(
node=node, size=cache_size
)
).add_partition()
cache_set = factory.make_CacheSet(partition=cache_partition)
backing_partition = factory.make_PartitionTable(
block_device=factory.make_PhysicalBlockDevice(
node=node, size=backing_size
)
).add_partition()
uuid = str(uuid4())
bcache = Bcache.objects.create_bcache(
name="bcache0",
uuid=uuid,
cache_set=cache_set,
backing_partition=backing_partition,
cache_mode=CACHE_MODE_TYPE.WRITEBACK,
)
# Verify the filesystems were properly created on the target devices
self.assertEqual(backing_partition.size, bcache.get_size())
self.assertEqual(
FILESYSTEM_TYPE.BCACHE_CACHE,
cache_partition.get_effective_filesystem().fstype,
)
self.assertEqual(
FILESYSTEM_TYPE.BCACHE_BACKING,
backing_partition.get_effective_filesystem().fstype,
)
self.assertEqual(cache_set, bcache.cache_set)
self.assertEqual(
bcache,
backing_partition.get_effective_filesystem().filesystem_group,
)
def test_create_bcache_with_block_devices_and_partition(self):
"""Checks creation of a Bcache with a partition for caching and a
physical block device for backing."""
node = factory.make_Node()
backing_size = 10 * 1000 ** 4
cache_size = 1000 ** 4
cache_partition = factory.make_PartitionTable(
block_device=factory.make_PhysicalBlockDevice(
node=node, size=cache_size
)
).add_partition()
cache_set = factory.make_CacheSet(partition=cache_partition)
backing_device = factory.make_PhysicalBlockDevice(
node=node, size=backing_size
)
uuid = str(uuid4())
bcache = Bcache.objects.create_bcache(
name="bcache0",
uuid=uuid,
cache_set=cache_set,
backing_device=backing_device,
cache_mode=CACHE_MODE_TYPE.WRITEBACK,
)
# Verify the filesystems were properly created on the target devices
self.assertEqual(backing_size, bcache.get_size())
self.assertEqual(
FILESYSTEM_TYPE.BCACHE_CACHE,
cache_partition.get_effective_filesystem().fstype,
)
self.assertEqual(
FILESYSTEM_TYPE.BCACHE_BACKING,
backing_device.get_effective_filesystem().fstype,
)
self.assertEqual(cache_set, bcache.cache_set)
self.assertEqual(
bcache, backing_device.get_effective_filesystem().filesystem_group
)
def test_delete_bcache(self):
"""Ensures deletion of a bcache also deletes bcache filesystems from
caching and backing devices."""
node = factory.make_Node()
backing_size = 10 * 1000 ** 4
cache_set = factory.make_CacheSet(node=node)
backing_device = factory.make_PhysicalBlockDevice(
node=node, size=backing_size
)
bcache = Bcache.objects.create_bcache(
cache_set=cache_set,
backing_device=backing_device,
cache_mode=CACHE_MODE_TYPE.WRITEBACK,
)
bcache.delete()
# Verify both filesystems were deleted.
self.assertIsNone(backing_device.get_effective_filesystem())
# Verify the cache_set is not deleted.
self.assertIsNotNone(reload_object(cache_set))
|
agpl-3.0
| 4,331,299,812,725,828,600
| 36.123395
| 79
| 0.574106
| false
| 3.977456
| true
| false
| false
|
MGEScan/mgescan
|
mgescan/utils.py
|
1
|
1147
|
import time
import os, errno
import subprocess as sub
def get_abspath(path):
try:
return os.path.abspath(path)
except:
# print [DEBUG] Failed to convert a path to an absolute path
return path
def create_directory(path, skipifexists=True):
if not os.path.exists(path):
os.makedirs(path)
else:
if skipifexists:
new_path = path + ".1"
return create_directory(new_path, skipifexists)
return get_abspath(path)
def exists(path):
try:
return os.path.exists(path)
except:
return False
def silentremove(filename):
try:
os.remove(filename)
except OSError as e: # this would be "except OSError, e:" before Python 2.6
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
raise # re-raise exception if a different error occured
def cmd_exists(cmd):
return sub.call(["which", cmd], stdout=sub.PIPE, stderr=sub.PIPE) == 0
def check_cmd(cmd):
if not cmd_exists(cmd):
print "=" * 50
print "[Error] " + cmd + " is not found. "
print "=" * 50
time.sleep(3)
|
gpl-3.0
| 8,974,377,971,734,298,000
| 25.674419
| 79
| 0.61116
| false
| 3.7
| false
| false
| false
|
edineicolli/daruma-exemplo-python
|
scripts/fiscal/ui_fiscal_icfefetuarpagamentoformatado.py
|
1
|
4753
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_fiscal_icfefetuarpagamentoformatado.ui'
#
# Created: Mon Nov 24 22:25:42 2014
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
from pydaruma.pydaruma import iCFEfetuarPagamentoFormatado_ECF_Daruma
from scripts.fiscal.retornofiscal import tratarRetornoFiscal
class Ui_ui_FISCAL_iCFEfetuarPagamentoFormatado(QtGui.QWidget):
def __init__(self):
super(Ui_ui_FISCAL_iCFEfetuarPagamentoFormatado, self).__init__()
self.setupUi(self)
self.pushButtonEnviar.clicked.connect(self.on_pushButtonEnviar_clicked)
self.pushButtonCancelar.clicked.connect(self.on_pushButtonCancelar_clicked)
def on_pushButtonEnviar_clicked(self):
StrFPGTO = self.lineEditFormaPGTO.text()
StrValor = self.lineEditValor.text()
tratarRetornoFiscal(iCFEfetuarPagamentoFormatado_ECF_Daruma(StrFPGTO,StrValor), self)
def on_pushButtonCancelar_clicked(self):
self.close()
def setupUi(self, ui_FISCAL_iCFEfetuarPagamentoFormatado):
ui_FISCAL_iCFEfetuarPagamentoFormatado.setObjectName("ui_FISCAL_iCFEfetuarPagamentoFormatado")
ui_FISCAL_iCFEfetuarPagamentoFormatado.resize(309, 132)
ui_FISCAL_iCFEfetuarPagamentoFormatado.setMinimumSize(QtCore.QSize(309, 132))
ui_FISCAL_iCFEfetuarPagamentoFormatado.setMaximumSize(QtCore.QSize(309, 132))
self.verticalLayout = QtGui.QVBoxLayout(ui_FISCAL_iCFEfetuarPagamentoFormatado)
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.labelForma = QtGui.QLabel(ui_FISCAL_iCFEfetuarPagamentoFormatado)
self.labelForma.setObjectName("labelForma")
self.gridLayout.addWidget(self.labelForma, 0, 0, 1, 1)
self.lineEditFormaPGTO = QtGui.QLineEdit(ui_FISCAL_iCFEfetuarPagamentoFormatado)
self.lineEditFormaPGTO.setMaximumSize(QtCore.QSize(100, 16777215))
self.lineEditFormaPGTO.setObjectName("lineEditFormaPGTO")
self.gridLayout.addWidget(self.lineEditFormaPGTO, 0, 1, 1, 1)
self.labelValor = QtGui.QLabel(ui_FISCAL_iCFEfetuarPagamentoFormatado)
self.labelValor.setObjectName("labelValor")
self.gridLayout.addWidget(self.labelValor, 1, 0, 1, 1)
self.lineEditValor = QtGui.QLineEdit(ui_FISCAL_iCFEfetuarPagamentoFormatado)
self.lineEditValor.setMaximumSize(QtCore.QSize(70, 25))
self.lineEditValor.setObjectName("lineEditValor")
self.gridLayout.addWidget(self.lineEditValor, 1, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.pushButtonEnviar = QtGui.QPushButton(ui_FISCAL_iCFEfetuarPagamentoFormatado)
self.pushButtonEnviar.setObjectName("pushButtonEnviar")
self.horizontalLayout.addWidget(self.pushButtonEnviar)
self.pushButtonCancelar = QtGui.QPushButton(ui_FISCAL_iCFEfetuarPagamentoFormatado)
self.pushButtonCancelar.setObjectName("pushButtonCancelar")
self.horizontalLayout.addWidget(self.pushButtonCancelar)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(ui_FISCAL_iCFEfetuarPagamentoFormatado)
QtCore.QMetaObject.connectSlotsByName(ui_FISCAL_iCFEfetuarPagamentoFormatado)
def retranslateUi(self, ui_FISCAL_iCFEfetuarPagamentoFormatado):
ui_FISCAL_iCFEfetuarPagamentoFormatado.setWindowTitle(QtGui.QApplication.translate("ui_FISCAL_iCFEfetuarPagamentoFormatado", "iCFEfetuarPagamentoFormatado_ECF_Daruma", None, QtGui.QApplication.UnicodeUTF8))
self.labelForma.setText(QtGui.QApplication.translate("ui_FISCAL_iCFEfetuarPagamentoFormatado", "Forma Pagto:", None, QtGui.QApplication.UnicodeUTF8))
self.labelValor.setText(QtGui.QApplication.translate("ui_FISCAL_iCFEfetuarPagamentoFormatado", "Valor:", None, QtGui.QApplication.UnicodeUTF8))
self.pushButtonEnviar.setText(QtGui.QApplication.translate("ui_FISCAL_iCFEfetuarPagamentoFormatado", "Enviar", None, QtGui.QApplication.UnicodeUTF8))
self.pushButtonCancelar.setText(QtGui.QApplication.translate("ui_FISCAL_iCFEfetuarPagamentoFormatado", "Cancelar", None, QtGui.QApplication.UnicodeUTF8))
|
gpl-2.0
| 5,616,585,045,271,257,000
| 59.164557
| 214
| 0.764149
| false
| 3.207152
| false
| false
| false
|
zibawa/zibawa
|
zibawa/urls.py
|
1
|
1400
|
"""zibawa URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls import include
from rest_framework import routers
from rest_framework.documentation import include_docs_urls
from IoT_pki import views
router = routers.DefaultRouter()
urlpatterns = [
url(r'^devices/', include('devices.urls',namespace='devices')),
url(r'^front/', include('front.urls',namespace='front')),
url(r'^admin/', admin.site.urls),
url(r'^', include('front.urls')),
url(r'^IoT_pki/', include('IoT_pki.urls',namespace='IoT_pki')),
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^docs/', include_docs_urls(title='zibawa_PKI'))
]
|
gpl-3.0
| -6,997,898,641,479,709,000
| 30.111111
| 83
| 0.681429
| false
| 3.45679
| false
| false
| false
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/_splom.py
|
1
|
12883
|
import _plotly_utils.basevalidators
class SplomValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="splom", parent_name="", **kwargs):
super(SplomValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Splom"),
data_docs=kwargs.pop(
"data_docs",
"""
customdata
Assigns extra data each datum. This may be
useful when listening to hover, click and
selection events. Note that, "scatter" traces
also appends customdata items in the markers
DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud
for customdata .
diagonal
:class:`plotly.graph_objects.splom.Diagonal`
instance or dict with compatible properties
dimensions
A tuple of
:class:`plotly.graph_objects.splom.Dimension`
instances or dicts with compatible properties
dimensiondefaults
When used in a template (as
layout.template.data.splom.dimensiondefaults),
sets the default property values to use for
elements of splom.dimensions
hoverinfo
Determines which trace information appear on
hover. If `none` or `skip` are set, no
information is displayed upon hovering. But, if
`none` is set, click and hover events are still
fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud
for hoverinfo .
hoverlabel
:class:`plotly.graph_objects.splom.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the
information that appear on hover box. Note that
this will override `hoverinfo`. Variables are
inserted using %{variable}, for example "y:
%{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for
several points, "xother" will be added to those
with different x positions from the first
point. An underscore before or after
"(x|y)other" will add a space on that side,
only when this field is shown. Numbers are
formatted using d3-format's syntax
%{variable:d3-format}, for example "Price:
%{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}".
https://github.com/d3/d3-time-
format#locale_format for details on the date
formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event
data described at this link
https://plotly.com/javascript/plotlyjs-
events/#event-data. Additionally, every
attributes that can be specified per-point (the
ones that are `arrayOk: true`) are available.
Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud
for hovertemplate .
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud
for hovertext .
ids
Assigns id labels to each datum. These ids for
object constancy of data points during
animation. Should be an array of strings, not
numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud
for ids .
legendgroup
Sets the legend group for this trace. Traces
part of the same legend group hide/show at the
same time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.splom.Legendgroupt
itle` instance or dict with compatible
properties
legendrank
Sets the legend rank for this trace. Items and
groups with smaller ranks are presented on
top/left side while with `*reversed*
`legend.traceorder` they are on bottom/right
side. The default legendrank is 1000, so that
you can use ranks less than 1000 to place
certain items before all unranked items, and
ranks greater than 1000 to go after all
unranked items.
marker
:class:`plotly.graph_objects.splom.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with
this trace that can be used in various text
attributes. Attributes such as trace `name`,
graph, axis and colorbar `title.text`,
annotation `text` `rangeselector`,
`updatemenues` and `sliders` `label` text all
support `meta`. To access the trace `meta`
values in an attribute in the same trace,
simply use `%{meta[i]}` where `i` is the index
or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or
key of the `meta` and `n` is the trace index.
metasrc
Sets the source reference on Chart Studio Cloud
for meta .
name
Sets the trace name. The trace name appear as
the legend item and on hover.
opacity
Sets the opacity of the trace.
selected
:class:`plotly.graph_objects.splom.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected
points. Has an effect only for traces that
support selections. Note that an empty array
means an empty selection where the `unselected`
are turned on for all points, whereas, any
other non-array values means no selection all
where the `selected` and `unselected` styles
have no effect.
showlegend
Determines whether or not an item corresponding
to this trace is shown in the legend.
showlowerhalf
Determines whether or not subplots on the lower
half from the diagonal are displayed.
showupperhalf
Determines whether or not subplots on the upper
half from the diagonal are displayed.
stream
:class:`plotly.graph_objects.splom.Stream`
instance or dict with compatible properties
text
Sets text elements associated with each (x,y)
pair to appear on hover. If a single string,
the same string appears over all the data
points. If an array of string, the items are
mapped in order to the this trace's (x,y)
coordinates.
textsrc
Sets the source reference on Chart Studio Cloud
for text .
uid
Assign an id to this trace, Use this to provide
object constancy between traces during
animations and transitions.
uirevision
Controls persistence of some user-driven
changes to the trace: `constraintrange` in
`parcoords` traces, as well as some `editable:
true` modifications such as `name` and
`colorbar.title`. Defaults to
`layout.uirevision`. Note that other user-
driven trace attribute changes are controlled
by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and
`colorbar.(x|y)` (accessible with `config:
{editable: true}`) is controlled by
`layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on
trace index if no `uid` is provided. So if your
app can add/remove traces before the end of the
`data` array, such that the same trace has a
different index, you can still preserve user-
driven changes if you give each trace a `uid`
that stays with it as it moves.
unselected
:class:`plotly.graph_objects.splom.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is
visible. If "legendonly", the trace is not
drawn, but can appear as a legend item
(provided that the legend itself is visible).
xaxes
Sets the list of x axes corresponding to
dimensions of this splom trace. By default, a
splom will match the first N xaxes where N is
the number of input dimensions. Note that, in
case where `diagonal.visible` is false and
`showupperhalf` or `showlowerhalf` is false,
this splom trace will generate one less x-axis
and one less y-axis.
xhoverformat
Sets the hover text formatting rulefor `x`
using d3 formatting mini-languages which are
very similar to those in Python. For numbers,
see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format.
And for dates see:
https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's
date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for
fractional seconds with n digits. For example,
*2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By
default the values are formatted using
`xaxis.hoverformat`.
yaxes
Sets the list of y axes corresponding to
dimensions of this splom trace. By default, a
splom will match the first N yaxes where N is
the number of input dimensions. Note that, in
case where `diagonal.visible` is false and
`showupperhalf` or `showlowerhalf` is false,
this splom trace will generate one less x-axis
and one less y-axis.
yhoverformat
Sets the hover text formatting rulefor `y`
using d3 formatting mini-languages which are
very similar to those in Python. For numbers,
see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format.
And for dates see:
https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's
date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for
fractional seconds with n digits. For example,
*2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By
default the values are formatted using
`yaxis.hoverformat`.
""",
),
**kwargs
)
|
mit
| 1,610,731,165,411,505,000
| 48.55
| 70
| 0.54731
| false
| 5.084057
| false
| false
| false
|
nakagami/reportlab
|
src/reportlab/platypus/flowables.py
|
1
|
68383
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/flowables.py
__version__=''' $Id: flowables.py 3959 2012-09-27 14:39:39Z robin $ '''
__doc__="""
A flowable is a "floating element" in a document whose exact position is determined by the
other elements that precede it, such as a paragraph, a diagram interspersed between paragraphs,
a section header, etcetera. Examples of non-flowables include page numbering annotations,
headers, footers, fixed diagrams or logos, among others.
Flowables are defined here as objects which know how to determine their size and which
can draw themselves onto a page with respect to a relative "origin" position determined
at a higher level. The object's draw() method should assume that (0,0) corresponds to the
bottom left corner of the enclosing rectangle that will contain the object. The attributes
vAlign and hAlign may be used by 'packers' as hints as to how the object should be placed.
Some Flowables also know how to "split themselves". For example a
long paragraph might split itself between one page and the next.
Packers should set the canv attribute during wrap, split & draw operations to allow
the flowable to work out sizes etc in the proper context.
The "text" of a document usually consists mainly of a sequence of flowables which
flow into a document from top to bottom (with column and page breaks controlled by
higher level components).
"""
import os
from copy import deepcopy, copy
from reportlab.lib.colors import red, gray, lightgrey
from reportlab.lib.utils import fp_str, isStrType
from reportlab.lib.enums import TA_LEFT, TA_CENTER, TA_RIGHT, TA_JUSTIFY
from reportlab.lib.styles import _baseFontName
from reportlab.pdfbase import pdfutils
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.rl_config import _FUZZ, overlapAttachedSpace, ignoreContainerActions
__all__=('TraceInfo','Flowable','XBox','Preformatted','Image','Spacer','PageBreak','SlowPageBreak',
'CondPageBreak','KeepTogether','Macro','CallerMacro','ParagraphAndImage',
'FailOnWrap','HRFlowable','PTOContainer','KeepInFrame','UseUpSpace',
'ListFlowable','ListItem','DDIndenter','LIIndenter',
'DocAssign', 'DocExec', 'DocAssert', 'DocPara', 'DocIf', 'DocWhile',
)
class TraceInfo:
"Holder for info about where an object originated"
def __init__(self):
self.srcFile = '(unknown)'
self.startLineNo = -1
self.startLinePos = -1
self.endLineNo = -1
self.endLinePos = -1
#############################################################
# Flowable Objects - a base class and a few examples.
# One is just a box to get some metrics. We also have
# a paragraph, an image and a special 'page break'
# object which fills the space.
#############################################################
class Flowable:
"""Abstract base class for things to be drawn. Key concepts:
1. It knows its size
2. It draws in its own coordinate system (this requires the
base API to provide a translate() function.
"""
_fixedWidth = 0 #assume wrap results depend on arguments?
_fixedHeight = 0
def __init__(self):
self.width = 0
self.height = 0
self.wrapped = 0
#these are hints to packers/frames as to how the floable should be positioned
self.hAlign = 'LEFT' #CENTER/CENTRE or RIGHT
self.vAlign = 'BOTTOM' #MIDDLE or TOP
#optional holder for trace info
self._traceInfo = None
self._showBoundary = None
#many flowables handle text and must be processed in the
#absence of a canvas. tagging them with their encoding
#helps us to get conversions right. Use Python codec names.
self.encoding = None
def _drawOn(self,canv):
'''ensure canv is set on and then draw'''
self.canv = canv
self.draw()#this is the bit you overload
del self.canv
def _hAlignAdjust(self,x,sW=0):
if sW and hasattr(self,'hAlign'):
a = self.hAlign
if a in ('CENTER','CENTRE', TA_CENTER):
x += 0.5*sW
elif a in ('RIGHT',TA_RIGHT):
x += sW
elif a not in ('LEFT',TA_LEFT):
raise ValueError("Bad hAlign value "+str(a))
return x
def drawOn(self, canvas, x, y, _sW=0):
"Tell it to draw itself on the canvas. Do not override"
x = self._hAlignAdjust(x,_sW)
canvas.saveState()
canvas.translate(x, y)
self._drawOn(canvas)
if hasattr(self, '_showBoundary') and self._showBoundary:
#diagnostic tool support
canvas.setStrokeColor(gray)
canvas.rect(0,0,self.width, self.height)
canvas.restoreState()
def wrapOn(self, canv, aW, aH):
'''intended for use by packers allows setting the canvas on
during the actual wrap'''
self.canv = canv
w, h = self.wrap(aW,aH)
del self.canv
return w, h
def wrap(self, availWidth, availHeight):
"""This will be called by the enclosing frame before objects
are asked their size, drawn or whatever. It returns the
size actually used."""
return (self.width, self.height)
def minWidth(self):
"""This should return the minimum required width"""
return getattr(self,'_minWidth',self.width)
def splitOn(self, canv, aW, aH):
'''intended for use by packers allows setting the canvas on
during the actual split'''
self.canv = canv
S = self.split(aW,aH)
del self.canv
return S
def split(self, availWidth, availheight):
"""This will be called by more sophisticated frames when
wrap fails. Stupid flowables should return []. Clever flowables
should split themselves and return a list of flowables.
If they decide that nothing useful can be fitted in the
available space (e.g. if you have a table and not enough
space for the first row), also return []"""
return []
def getKeepWithNext(self):
"""returns boolean determining whether the next flowable should stay with this one"""
if hasattr(self,'keepWithNext'): return self.keepWithNext
elif hasattr(self,'style') and hasattr(self.style,'keepWithNext'): return self.style.keepWithNext
else: return 0
def getSpaceAfter(self):
"""returns how much space should follow this item if another item follows on the same page."""
if hasattr(self,'spaceAfter'): return self.spaceAfter
elif hasattr(self,'style') and hasattr(self.style,'spaceAfter'): return self.style.spaceAfter
else: return 0
def getSpaceBefore(self):
"""returns how much space should precede this item if another item precedess on the same page."""
if hasattr(self,'spaceBefore'): return self.spaceBefore
elif hasattr(self,'style') and hasattr(self.style,'spaceBefore'): return self.style.spaceBefore
else: return 0
def isIndexing(self):
"""Hook for IndexingFlowables - things which have cross references"""
return 0
def identity(self, maxLen=None):
'''
This method should attempt to return a string that can be used to identify
a particular flowable uniquely. The result can then be used for debugging
and or error printouts
'''
if hasattr(self, 'getPlainText'):
r = self.getPlainText(identify=1)
elif hasattr(self, 'text'):
r = str(self.text)
else:
r = '...'
if r and maxLen:
r = r[:maxLen]
return "<%s at %s%s>%s" % (self.__class__.__name__, hex(id(self)), self._frameName(), r)
def _doctemplateAttr(self,a):
return getattr(getattr(getattr(self,'canv',None),'_doctemplate',None),a,None)
def _frameName(self):
f = getattr(self,'_frame',None)
if not f: f = self._doctemplateAttr('frame')
if f and f.id: return ' frame=%s' % f.id
return ''
class XBox(Flowable):
"""Example flowable - a box with an x through it and a caption.
This has a known size, so does not need to respond to wrap()."""
def __init__(self, width, height, text = 'A Box'):
Flowable.__init__(self)
self.width = width
self.height = height
self.text = text
def __repr__(self):
return "XBox(w=%s, h=%s, t=%s)" % (self.width, self.height, self.text)
def draw(self):
self.canv.rect(0, 0, self.width, self.height)
self.canv.line(0, 0, self.width, self.height)
self.canv.line(0, self.height, self.width, 0)
#centre the text
self.canv.setFont(_baseFontName,12)
self.canv.drawCentredString(0.5*self.width, 0.5*self.height, self.text)
def _trimEmptyLines(lines):
#don't want the first or last to be empty
while len(lines) and lines[0].strip() == '':
lines = lines[1:]
while len(lines) and lines[-1].strip() == '':
lines = lines[:-1]
return lines
def _dedenter(text,dedent=0):
'''
tidy up text - carefully, it is probably code. If people want to
indent code within a source script, you can supply an arg to dedent
and it will chop off that many character, otherwise it leaves
left edge intact.
'''
lines = text.split('\n')
if dedent>0:
templines = _trimEmptyLines(lines)
lines = []
for line in templines:
line = line[dedent:].rstrip()
lines.append(line)
else:
lines = _trimEmptyLines(lines)
return lines
SPLIT_CHARS = "[{( ,.;:/\\-"
def splitLines(lines, maximum_length, split_characters, new_line_characters):
if split_characters is None:
split_characters = SPLIT_CHARS
if new_line_characters is None:
new_line_characters = ""
# Return a table of lines
lines_splitted = []
for line in lines:
if len(line) > maximum_length:
splitLine(line, lines_splitted, maximum_length, \
split_characters, new_line_characters)
else:
lines_splitted.append(line)
return lines_splitted
def splitLine(line_to_split, lines_splitted, maximum_length, \
split_characters, new_line_characters):
# Used to implement the characters added
#at the beginning of each new line created
first_line = True
# Check if the text can be splitted
while line_to_split and len(line_to_split)>0:
# Index of the character where we can split
split_index = 0
# Check if the line length still exceeds the maximum length
if len(line_to_split) <= maximum_length:
# Return the remaining of the line
split_index = len(line_to_split)
else:
# Iterate for each character of the line
for line_index in range(maximum_length):
# Check if the character is in the list
# of allowed characters to split on
if line_to_split[line_index] in split_characters:
split_index = line_index + 1
# If the end of the line was reached
# with no character to split on
if split_index==0:
split_index = line_index + 1
if first_line:
lines_splitted.append(line_to_split[0:split_index])
first_line = False
maximum_length -= len(new_line_characters)
else:
lines_splitted.append(new_line_characters + \
line_to_split[0:split_index])
# Remaining text to split
line_to_split = line_to_split[split_index:]
class Preformatted(Flowable):
"""This is like the HTML <PRE> tag.
It attempts to display text exactly as you typed it in a fixed width "typewriter" font.
By default the line breaks are exactly where you put them, and it will not be wrapped.
You can optionally define a maximum line length and the code will be wrapped; and
extra characters to be inserted at the beginning of each wrapped line (e.g. '> ').
"""
def __init__(self, text, style, bulletText = None, dedent=0, maxLineLength=None, splitChars=None, newLineChars=""):
"""text is the text to display. If dedent is set then common leading space
will be chopped off the front (for example if the entire text is indented
6 spaces or more then each line will have 6 spaces removed from the front).
"""
self.style = style
self.bulletText = bulletText
self.lines = _dedenter(text,dedent)
if text and maxLineLength:
self.lines = splitLines(
self.lines,
maxLineLength,
splitChars,
newLineChars
)
def __repr__(self):
bT = self.bulletText
H = "Preformatted("
if bT is not None:
H = "Preformatted(bulletText=%s," % repr(bT)
return "%s'''\\ \n%s''')" % (H, '\n'.join(self.lines))
def wrap(self, availWidth, availHeight):
self.width = availWidth
self.height = self.style.leading*len(self.lines)
return (self.width, self.height)
def minWidth(self):
style = self.style
fontSize = style.fontSize
fontName = style.fontName
return max([stringWidth(line,fontName,fontSize) for line in self.lines])
def split(self, availWidth, availHeight):
#returns two Preformatted objects
#not sure why they can be called with a negative height
if availHeight < self.style.leading:
return []
linesThatFit = int(availHeight * 1.0 / self.style.leading)
text1 = '\n'.join(self.lines[0:linesThatFit])
text2 = '\n'.join(self.lines[linesThatFit:])
style = self.style
if style.firstLineIndent != 0:
style = deepcopy(style)
style.firstLineIndent = 0
return [Preformatted(text1, self.style), Preformatted(text2, style)]
def draw(self):
#call another method for historical reasons. Besides, I
#suspect I will be playing with alternate drawing routines
#so not doing it here makes it easier to switch.
cur_x = self.style.leftIndent
cur_y = self.height - self.style.fontSize
self.canv.addLiteral('%PreformattedPara')
if self.style.textColor:
self.canv.setFillColor(self.style.textColor)
tx = self.canv.beginText(cur_x, cur_y)
#set up the font etc.
tx.setFont( self.style.fontName,
self.style.fontSize,
self.style.leading)
for text in self.lines:
tx.textLine(text)
self.canv.drawText(tx)
class Image(Flowable):
"""an image (digital picture). Formats supported by PIL/Java 1.4 (the Python/Java Imaging Library
are supported. At the present time images as flowables are always centered horozontally
in the frame. We allow for two kinds of lazyness to allow for many images in a document
which could lead to file handle starvation.
lazy=1 don't open image until required.
lazy=2 open image when required then shut it.
"""
_fixedWidth = 1
_fixedHeight = 1
def __init__(self, filename, width=None, height=None, kind='direct', mask="auto", lazy=1):
"""If size to draw at not specified, get it from the image."""
self.hAlign = 'CENTER'
self._mask = mask
fp = hasattr(filename,'read')
if fp:
self._file = filename
self.filename = repr(filename)
else:
self._file = self.filename = filename
if not fp and os.path.splitext(filename)[1] in ['.jpg', '.JPG', '.jpeg', '.JPEG']:
# if it is a JPEG, will be inlined within the file -
# but we still need to know its size now
from reportlab.lib.utils import open_for_read
f = open_for_read(filename, 'b')
try:
try:
info = pdfutils.readJPEGInfo(f)
except:
#couldn't read as a JPEG, try like normal
self._setup(width,height,kind,lazy)
return
finally:
f.close()
self.imageWidth = info[0]
self.imageHeight = info[1]
self._img = None
self._setup(width,height,kind,0)
elif fp:
self._setup(width,height,kind,0)
else:
self._setup(width,height,kind,lazy)
def _setup(self,width,height,kind,lazy):
self._lazy = lazy
self._width = width
self._height = height
self._kind = kind
if lazy<=0: self._setup_inner()
def _setup_inner(self):
width = self._width
height = self._height
kind = self._kind
img = self._img
if img: self.imageWidth, self.imageHeight = img.getSize()
if self._lazy>=2: del self._img
if kind in ['direct','absolute']:
self.drawWidth = width or self.imageWidth
self.drawHeight = height or self.imageHeight
elif kind in ['percentage','%']:
self.drawWidth = self.imageWidth*width*0.01
self.drawHeight = self.imageHeight*height*0.01
elif kind in ['bound','proportional']:
factor = min(float(width)/self.imageWidth,float(height)/self.imageHeight)
self.drawWidth = self.imageWidth*factor
self.drawHeight = self.imageHeight*factor
def _restrictSize(self,aW,aH):
if self.drawWidth>aW+_FUZZ or self.drawHeight>aH+_FUZZ:
self._oldDrawSize = self.drawWidth, self.drawHeight
factor = min(float(aW)/self.drawWidth,float(aH)/self.drawHeight)
self.drawWidth *= factor
self.drawHeight *= factor
return self.drawWidth, self.drawHeight
def _unRestrictSize(self):
dwh = getattr(self,'_oldDrawSize',None)
if dwh:
self.drawWidth, self.drawHeight = dwh
def __getattr__(self,a):
if a=='_img':
from reportlab.lib.utils import ImageReader #this may raise an error
self._img = ImageReader(self._file)
del self._file
return self._img
elif a in ('drawWidth','drawHeight','imageWidth','imageHeight'):
self._setup_inner()
return self.__dict__[a]
raise AttributeError("<Image @ 0x%x>.%s" % (id(self),a))
def wrap(self, availWidth, availHeight):
#the caller may decide it does not fit.
return self.drawWidth, self.drawHeight
def draw(self):
lazy = self._lazy
if lazy>=2: self._lazy = 1
self.canv.drawImage( self._img or self.filename,
getattr(self,'_offs_x',0),
getattr(self,'_offs_y',0),
self.drawWidth,
self.drawHeight,
mask=self._mask,
)
if lazy>=2:
self._img = None
self._lazy = lazy
def identity(self,maxLen=None):
r = Flowable.identity(self,maxLen)
if r[-4:]=='>...' and isStrType(self.filename):
r = "%s filename=%s>" % (r[:-4],self.filename)
return r
class NullDraw(Flowable):
def draw(self):
pass
class Spacer(NullDraw):
"""A spacer just takes up space and doesn't draw anything - it guarantees
a gap between objects."""
_fixedWidth = 1
_fixedHeight = 1
def __init__(self, width, height, isGlue=False):
self.width = width
if isGlue:
self.height = 1e-4
self.spacebefore = height
self.height = height
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,self.width, self.height)
class UseUpSpace(NullDraw):
def __init__(self):
pass
def __repr__(self):
return "%s()" % self.__class__.__name__
def wrap(self, availWidth, availHeight):
self.width = availWidth
self.height = availHeight
return (availWidth,availHeight-1e-8) #step back a point
class PageBreak(UseUpSpace):
"""Move on to the next page in the document.
This works by consuming all remaining space in the frame!"""
class SlowPageBreak(PageBreak):
pass
class CondPageBreak(Spacer):
"""use up a frame if not enough vertical space effectively CondFrameBreak"""
def __init__(self, height):
self.height = height
def __repr__(self):
return "CondPageBreak(%s)" %(self.height,)
def wrap(self, availWidth, availHeight):
if availHeight<self.height:
f = self._doctemplateAttr('frame')
if not f: return availWidth, availHeight
from reportlab.platypus.doctemplate import FrameBreak
f.add_generated_content(FrameBreak)
return 0, 0
def identity(self,maxLen=None):
return repr(self).replace(')',',frame=%s)'%self._frameName())
def _listWrapOn(F,availWidth,canv,mergeSpace=1,obj=None,dims=None):
'''return max width, required height for a list of flowables F'''
doct = getattr(canv,'_doctemplate',None)
cframe = getattr(doct,'frame',None)
if cframe:
from reportlab.platypus.doctemplate import _addGeneratedContent
doct_frame = cframe
cframe = doct.frame = deepcopy(doct_frame)
cframe._generated_content = None
del cframe._generated_content
try:
W = 0
H = 0
pS = 0
atTop = 1
F = F[:]
while F:
f = F.pop(0)
if hasattr(f,'frameAction'): continue
w,h = f.wrapOn(canv,availWidth,0xfffffff)
if dims is not None: dims.append((w,h))
if cframe:
_addGeneratedContent(F,cframe)
if w<=_FUZZ or h<=_FUZZ: continue
W = max(W,w)
H += h
if not atTop:
h = f.getSpaceBefore()
if mergeSpace: h = max(h-pS,0)
H += h
else:
if obj is not None: obj._spaceBefore = f.getSpaceBefore()
atTop = 0
pS = f.getSpaceAfter()
H += pS
if obj is not None: obj._spaceAfter = pS
return W, H-pS
finally:
if cframe:
doct.frame = doct_frame
def _flowableSublist(V):
"if it isn't a list or tuple, wrap it in a list"
if not isinstance(V,(list,tuple)): V = V is not None and [V] or []
from reportlab.platypus.doctemplate import LCActionFlowable
assert not [x for x in V if isinstance(x,LCActionFlowable)],'LCActionFlowables not allowed in sublists'
return V
class _ContainerSpace: #Abstract some common container like behaviour
def getSpaceBefore(self):
for c in self._content:
if not hasattr(c,'frameAction'):
return c.getSpaceBefore()
return 0
def getSpaceAfter(self,content=None):
#this needs 2.4
#for c in reversed(content or self._content):
reverseContent = (content or self._content)[:]
reverseContent.reverse()
for c in reverseContent:
if not hasattr(c,'frameAction'):
return c.getSpaceAfter()
return 0
class KeepTogether(_ContainerSpace,Flowable):
def __init__(self,flowables,maxHeight=None):
self._content = _flowableSublist(flowables)
self._maxHeight = maxHeight
def __repr__(self):
f = self._content
L = map(repr,f)
L = "\n"+"\n".join(L)
L = L.replace("\n", "\n ")
return "%s(%s,maxHeight=%s)" % (self.__class__.__name__,L,self._maxHeight)
def wrap(self, aW, aH):
dims = []
W,H = _listWrapOn(self._content,aW,self.canv,dims=dims)
self._H = H
self._H0 = dims and dims[0][1] or 0
self._wrapInfo = aW,aH
return W, 0xffffff # force a split
def split(self, aW, aH):
if getattr(self,'_wrapInfo',None)!=(aW,aH): self.wrap(aW,aH)
S = self._content[:]
atTop = getattr(self,'_frame',None)
if atTop: atTop = getattr(atTop,'_atTop',None)
C0 = self._H>aH and (not self._maxHeight or aH>self._maxHeight)
C1 = (self._H0>aH) or C0 and atTop
if C0 or C1:
if C0:
from reportlab.platypus.doctemplate import FrameBreak
A = FrameBreak
else:
from reportlab.platypus.doctemplate import NullActionFlowable
A = NullActionFlowable
S.insert(0,A())
return S
def identity(self, maxLen=None):
msg = "<%s at %s%s> containing :%s" % (self.__class__.__name__,hex(id(self)),self._frameName(),"\n".join([f.identity() for f in self._content]))
if maxLen:
return msg[0:maxLen]
else:
return msg
class Macro(Flowable):
"""This is not actually drawn (i.e. it has zero height)
but is executed when it would fit in the frame. Allows direct
access to the canvas through the object 'canvas'"""
def __init__(self, command):
self.command = command
def __repr__(self):
return "Macro(%s)" % repr(self.command)
def wrap(self, availWidth, availHeight):
return (0,0)
def draw(self):
exec(self.command) in globals(), {'canvas':self.canv}
class CallerMacro(Flowable):
'''
like Macro, but with callable command(s)
drawCallable(self)
wrapCallable(self,aW,aH)
'''
def __init__(self, drawCallable=None, wrapCallable=None):
_ = lambda *args: None
self._drawCallable = drawCallable or _
self._wrapCallable = wrapCallable or _
def __repr__(self):
return "CallerMacro(%s)" % repr(self.command)
def wrap(self, aW, aH):
self._wrapCallable(self,aW,aH)
return (0,0)
def draw(self):
self._drawCallable(self)
class ParagraphAndImage(Flowable):
'''combine a Paragraph and an Image'''
def __init__(self,P,I,xpad=3,ypad=3,side='right'):
self.P = P
self.I = I
self.xpad = xpad
self.ypad = ypad
self._side = side
def getSpaceBefore(self):
return max(self.P.getSpaceBefore(),self.I.getSpaceBefore())
def getSpaceAfter(self):
return max(self.P.getSpaceAfter(),self.I.getSpaceAfter())
def wrap(self,availWidth,availHeight):
wI, hI = self.I.wrap(availWidth,availHeight)
self.wI = wI
self.hI = hI
# work out widths array for breaking
self.width = availWidth
P = self.P
style = P.style
xpad = self.xpad
ypad = self.ypad
leading = style.leading
leftIndent = style.leftIndent
later_widths = availWidth - leftIndent - style.rightIndent
intermediate_widths = later_widths - xpad - wI
first_line_width = intermediate_widths - style.firstLineIndent
P.width = 0
nIW = int((hI+ypad)/(leading*1.0))
P.blPara = P.breakLines([first_line_width] + nIW*[intermediate_widths]+[later_widths])
if self._side=='left':
self._offsets = [wI+xpad]*(1+nIW)+[0]
P.height = len(P.blPara.lines)*leading
self.height = max(hI,P.height)
return (self.width, self.height)
def split(self,availWidth, availHeight):
P, wI, hI, ypad = self.P, self.wI, self.hI, self.ypad
if hI+ypad>availHeight or len(P.frags)<=0: return []
S = P.split(availWidth,availHeight)
if not S: return S
P = self.P = S[0]
del S[0]
style = P.style
P.height = len(self.P.blPara.lines)*style.leading
self.height = max(hI,P.height)
return [self]+S
def draw(self):
canv = self.canv
if self._side=='left':
self.I.drawOn(canv,0,self.height-self.hI)
self.P._offsets = self._offsets
try:
self.P.drawOn(canv,0,0)
finally:
del self.P._offsets
else:
self.I.drawOn(canv,self.width-self.wI-self.xpad,self.height-self.hI)
self.P.drawOn(canv,0,0)
class FailOnWrap(NullDraw):
def wrap(self, availWidth, availHeight):
raise ValueError("FailOnWrap flowable wrapped and failing as ordered!")
class FailOnDraw(Flowable):
def wrap(self, availWidth, availHeight):
return 0,0
def draw(self):
raise ValueError("FailOnDraw flowable drawn, and failing as ordered!")
class HRFlowable(Flowable):
'''Like the hr tag'''
def __init__(self,
width="80%",
thickness=1,
lineCap='round',
color=lightgrey,
spaceBefore=1, spaceAfter=1,
hAlign='CENTER', vAlign='BOTTOM',
dash=None):
Flowable.__init__(self)
self.width = width
self.lineWidth = thickness
self.lineCap=lineCap
self.spaceBefore = spaceBefore
self.spaceAfter = spaceAfter
self.color = color
self.hAlign = hAlign
self.vAlign = vAlign
self.dash = dash
def __repr__(self):
return "HRFlowable(width=%s, height=%s)" % (self.width, self.height)
def wrap(self, availWidth, availHeight):
w = self.width
if type(w) is type(''):
w = w.strip()
if w.endswith('%'): w = availWidth*float(w[:-1])*0.01
else: w = float(w)
w = min(w,availWidth)
self._width = w
return w, self.lineWidth
def draw(self):
canv = self.canv
canv.saveState()
canv.setLineWidth(self.lineWidth)
canv.setLineCap({'butt':0,'round':1, 'square': 2}[self.lineCap.lower()])
canv.setStrokeColor(self.color)
if self.dash: canv.setDash(self.dash)
canv.line(0, 0, self._width, self.height)
canv.restoreState()
class _PTOInfo:
def __init__(self,trailer,header):
self.trailer = _flowableSublist(trailer)
self.header = _flowableSublist(header)
def cdeepcopy(obj):
if hasattr(obj,'deepcopy'):
return obj.deepcopy()
else:
return deepcopy(obj)
class _Container(_ContainerSpace): #Abstract some common container like behaviour
def drawOn(self, canv, x, y, _sW=0, scale=1.0, content=None, aW=None):
'''we simulate being added to a frame'''
from reportlab.platypus.doctemplate import ActionFlowable
pS = 0
if aW is None: aW = self.width
aW *= scale
if content is None:
content = self._content
x = self._hAlignAdjust(x,_sW*scale)
y += self.height*scale
for c in content:
if not ignoreContainerActions and isinstance(c,ActionFlowable):
c.apply(self.canv._doctemplate)
continue
w, h = c.wrapOn(canv,aW,0xfffffff)
if (w<_FUZZ or h<_FUZZ) and not getattr(c,'_ZEROSIZE',None): continue
if c is not content[0]: h += max(c.getSpaceBefore()-pS,0)
y -= h
c.drawOn(canv,x,y,_sW=aW-w)
if c is not content[-1]:
pS = c.getSpaceAfter()
y -= pS
def copyContent(self,content=None):
C = [].append
for c in (content or self._content):
C(cdeepcopy(c))
self._content = C.__self__
class PTOContainer(_Container,Flowable):
'''PTOContainer(contentList,trailerList,headerList)
A container for flowables decorated with trailer & header lists.
If the split operation would be called then the trailer and header
lists are injected before and after the split. This allows specialist
"please turn over" and "continued from previous" like behaviours.'''
def __init__(self,content,trailer=None,header=None):
I = _PTOInfo(trailer,header)
self._content = C = []
for _ in _flowableSublist(content):
if isinstance(_,PTOContainer):
C.extend(_._content)
else:
C.append(_)
if not hasattr(_,'_ptoinfo'): _._ptoinfo = I
def wrap(self,availWidth,availHeight):
self.width, self.height = _listWrapOn(self._content,availWidth,self.canv)
return self.width,self.height
def split(self, availWidth, availHeight):
if availHeight<0: return []
canv = self.canv
C = self._content
x = i = H = pS = hx = 0
n = len(C)
I2W = {}
for x in range(n):
c = C[x]
I = c._ptoinfo
if I not in I2W.keys():
T = I.trailer
Hdr = I.header
tW, tH = _listWrapOn(T, availWidth, self.canv)
if len(T): #trailer may have no content
tSB = T[0].getSpaceBefore()
else:
tSB = 0
I2W[I] = T,tW,tH,tSB
else:
T,tW,tH,tSB = I2W[I]
_, h = c.wrapOn(canv,availWidth,0xfffffff)
if x:
hx = max(c.getSpaceBefore()-pS,0)
h += hx
pS = c.getSpaceAfter()
H += h+pS
tHS = tH+max(tSB,pS)
if H+tHS>=availHeight-_FUZZ: break
i += 1
#first retract last thing we tried
H -= (h+pS)
#attempt a sub split on the last one we have
aH = (availHeight-H-tHS-hx)*0.99999
if aH>=0.05*availHeight:
SS = c.splitOn(canv,availWidth,aH)
else:
SS = []
if not SS:
j = i
while i>1 and C[i-1].getKeepWithNext():
i -= 1
C[i].keepWithNext = 0
if i==1 and C[0].getKeepWithNext():
#robin's black sheep
i = j
C[0].keepWithNext = 0
F = [UseUpSpace()]
if len(SS)>1:
R1 = C[:i] + SS[:1] + T + F
R2 = Hdr + SS[1:]+C[i+1:]
elif not i:
return []
else:
R1 = C[:i]+T+F
R2 = Hdr + C[i:]
T = R1 + [PTOContainer(R2,[copy(x) for x in I.trailer],[copy(x) for x in I.header])]
return T
#utility functions used by KeepInFrame
def _hmodel(s0,s1,h0,h1):
# calculate the parameters in the model
# h = a/s**2 + b/s
a11 = 1./s0**2
a12 = 1./s0
a21 = 1./s1**2
a22 = 1./s1
det = a11*a22-a12*a21
b11 = a22/det
b12 = -a12/det
b21 = -a21/det
b22 = a11/det
a = b11*h0+b12*h1
b = b21*h0+b22*h1
return a,b
def _qsolve(h,ab):
'''solve the model v = a/s**2 + b/s for an s which gives us v==h'''
a,b = ab
if abs(a)<=_FUZZ:
return b/h
t = 0.5*b/a
from math import sqrt
f = -h/a
r = t*t-f
if r<0: return None
r = sqrt(r)
if t>=0:
s1 = -t - r
else:
s1 = -t + r
s2 = f/s1
return max(1./s1, 1./s2)
class KeepInFrame(_Container,Flowable):
def __init__(self, maxWidth, maxHeight, content=[], mergeSpace=1, mode='shrink', name='',hAlign='LEFT',vAlign='BOTTOM'):
'''mode describes the action to take when overflowing
error raise an error in the normal way
continue ignore ie just draw it and report maxWidth, maxHeight
shrink shrinkToFit
truncate fit as much as possible
'''
self.name = name
self.maxWidth = maxWidth
self.maxHeight = maxHeight
self.mode = mode
assert mode in ('error','overflow','shrink','truncate'), '%s invalid mode value %s' % (self.identity(),mode)
assert maxHeight>=0, '%s invalid maxHeight value %s' % (self.identity(),maxHeight)
if mergeSpace is None: mergeSpace = overlapAttachedSpace
self.mergespace = mergeSpace
self._content = content or []
self.vAlign = vAlign
self.hAlign = hAlign
def _getAvailableWidth(self):
return self.maxWidth - self._leftExtraIndent - self._rightExtraIndent
def identity(self, maxLen=None):
return "<%s at %s%s%s> size=%sx%s" % (self.__class__.__name__, hex(id(self)), self._frameName(),
getattr(self,'name','') and (' name="%s"'% getattr(self,'name','')) or '',
getattr(self,'maxWidth','') and (' maxWidth=%s'%fp_str(getattr(self,'maxWidth',0))) or '',
getattr(self,'maxHeight','')and (' maxHeight=%s' % fp_str(getattr(self,'maxHeight')))or '')
def wrap(self,availWidth,availHeight):
from reportlab.platypus.doctemplate import LayoutError
mode = self.mode
maxWidth = float(min(self.maxWidth or availWidth,availWidth))
maxHeight = float(min(self.maxHeight or availHeight,availHeight))
W, H = _listWrapOn(self._content,maxWidth,self.canv)
if (mode=='error' and (W>maxWidth+_FUZZ or H>maxHeight+_FUZZ)):
ident = 'content %sx%s too large for %s' % (W,H,self.identity(30))
#leave to keep apart from the raise
raise LayoutError(ident)
elif W<=maxWidth+_FUZZ and H<=maxHeight+_FUZZ:
self.width = W-_FUZZ #we take what we get
self.height = H-_FUZZ
elif mode in ('overflow','truncate'): #we lie
self.width = min(maxWidth,W)-_FUZZ
self.height = min(maxHeight,H)-_FUZZ
else:
def func(x):
W, H = _listWrapOn(self._content,x*maxWidth,self.canv)
W /= x
H /= x
return W, H
W0 = W
H0 = H
s0 = 1
if W>maxWidth+_FUZZ:
#squeeze out the excess width and or Height
s1 = W/maxWidth
W, H = func(s1)
if H<=maxHeight+_FUZZ:
self.width = W-_FUZZ
self.height = H-_FUZZ
self._scale = s1
return W,H
s0 = s1
H0 = H
W0 = W
s1 = H/maxHeight
W, H = func(s1)
self.width = W-_FUZZ
self.height = H-_FUZZ
self._scale = s1
if H<min(0.95*maxHeight,maxHeight-10) or H>=maxHeight+_FUZZ:
#the standard case W should be OK, H is short we want
#to find the smallest s with H<=maxHeight
H1 = H
for f in 0, 0.01, 0.05, 0.10, 0.15:
#apply the quadratic model
s = _qsolve(maxHeight*(1-f),_hmodel(s0,s1,H0,H1))
W, H = func(s)
if H<=maxHeight+_FUZZ and W<=maxWidth+_FUZZ:
self.width = W-_FUZZ
self.height = H-_FUZZ
self._scale = s
break
return self.width, self.height
def drawOn(self, canv, x, y, _sW=0):
scale = getattr(self,'_scale',1.0)
truncate = self.mode=='truncate'
ss = scale!=1.0 or truncate
if ss:
canv.saveState()
if truncate:
p = canv.beginPath()
p.rect(x, y, self.width,self.height)
canv.clipPath(p,stroke=0)
else:
canv.translate(x,y)
x=y=0
canv.scale(1.0/scale, 1.0/scale)
_Container.drawOn(self, canv, x, y, _sW=_sW, scale=scale)
if ss: canv.restoreState()
class ImageAndFlowables(_Container,Flowable):
'''combine a list of flowables and an Image'''
def __init__(self,I,F,imageLeftPadding=0,imageRightPadding=3,imageTopPadding=0,imageBottomPadding=3,
imageSide='right', imageHref=None):
self._content = _flowableSublist(F)
self._I = I
self._irpad = imageRightPadding
self._ilpad = imageLeftPadding
self._ibpad = imageBottomPadding
self._itpad = imageTopPadding
self._side = imageSide
self.imageHref = imageHref
def deepcopy(self):
c = copy(self) #shallow
self._reset()
c.copyContent() #partially deep?
return c
def getSpaceAfter(self):
if hasattr(self,'_C1'):
C = self._C1
elif hasattr(self,'_C0'):
C = self._C0
else:
C = self._content
return _Container.getSpaceAfter(self,C)
def getSpaceBefore(self):
return max(self._I.getSpaceBefore(),_Container.getSpaceBefore(self))
def _reset(self):
for a in ('_wrapArgs','_C0','_C1'):
try:
delattr(self,a)
except:
pass
def wrap(self,availWidth,availHeight):
canv = self.canv
I = self._I
if hasattr(self,'_wrapArgs'):
if self._wrapArgs==(availWidth,availHeight) and getattr(I,'_oldDrawSize',None) is None:
return self.width,self.height
self._reset()
I._unRestrictSize()
self._wrapArgs = availWidth, availHeight
I.wrap(availWidth,availHeight)
wI, hI = I._restrictSize(availWidth,availHeight)
self._wI = wI
self._hI = hI
ilpad = self._ilpad
irpad = self._irpad
ibpad = self._ibpad
itpad = self._itpad
self._iW = availWidth - irpad - wI - ilpad
aH = itpad + hI + ibpad
W,H0,self._C0,self._C1 = self._findSplit(canv,self._iW,aH)
if W>self._iW+_FUZZ:
self._C0 = []
self._C1 = self._content
aH = self._aH = max(aH,H0)
self.width = availWidth
if not self._C1:
self.height = aH
else:
W1,H1 = _listWrapOn(self._C1,availWidth,canv)
self.height = aH+H1
return self.width, self.height
def split(self,availWidth, availHeight):
if hasattr(self,'_wrapArgs'):
I = self._I
if self._wrapArgs!=(availWidth,availHeight) or getattr(I,'_oldDrawSize',None) is not None:
self._reset()
I._unRestrictSize()
W,H=self.wrap(availWidth,availHeight)
if self._aH>availHeight: return []
C1 = self._C1
if C1:
S = C1[0].split(availWidth,availHeight-self._aH)
if not S:
_C1 = []
else:
_C1 = [S[0]]
C1 = S[1:]+C1[1:]
else:
_C1 = []
return [ImageAndFlowables(
self._I,
self._C0+_C1,
imageLeftPadding=self._ilpad,
imageRightPadding=self._irpad,
imageTopPadding=self._itpad,
imageBottomPadding=self._ibpad,
imageSide=self._side, imageHref=self.imageHref)
]+C1
def drawOn(self, canv, x, y, _sW=0):
if self._side=='left':
Ix = x + self._ilpad
Fx = Ix+ self._irpad + self._wI
else:
Ix = x + self.width-self._wI-self._irpad
Fx = x
self._I.drawOn(canv,Ix,y+self.height-self._itpad-self._hI)
if self.imageHref:
canv.linkURL(self.imageHref, (Ix, y+self.height-self._itpad-self._hI, Ix + self._wI, y+self.height), relative=1)
if self._C0:
_Container.drawOn(self, canv, Fx, y, content=self._C0, aW=self._iW)
if self._C1:
_Container.drawOn(self, canv, x, y-self._aH,content=self._C1)
def _findSplit(self,canv,availWidth,availHeight,mergeSpace=1,obj=None):
'''return max width, required height for a list of flowables F'''
W = 0
H = 0
pS = sB = 0
atTop = 1
F = self._content
for i,f in enumerate(F):
w,h = f.wrapOn(canv,availWidth,0xfffffff)
if w<=_FUZZ or h<=_FUZZ: continue
W = max(W,w)
if not atTop:
s = f.getSpaceBefore()
if mergeSpace: s = max(s-pS,0)
H += s
else:
if obj is not None: obj._spaceBefore = f.getSpaceBefore()
atTop = 0
if H>=availHeight or w>availWidth:
return W, availHeight, F[:i],F[i:]
H += h
if H>availHeight:
from reportlab.platypus.paragraph import Paragraph
aH = availHeight-(H-h)
if isinstance(f,(Paragraph,Preformatted)):
leading = f.style.leading
nH = leading*int(aH/float(leading))+_FUZZ
if nH<aH: nH += leading
availHeight += nH-aH
aH = nH
S = cdeepcopy(f).splitOn(canv,availWidth,aH)
if not S:
return W, availHeight, F[:i],F[i:]
else:
return W,availHeight,F[:i]+S[:1],S[1:]+F[i+1:]
pS = f.getSpaceAfter()
H += pS
if obj is not None: obj._spaceAfter = pS
return W, H-pS, F, []
class AnchorFlowable(Spacer):
'''create a bookmark in the pdf'''
_ZEROSIZE=1
def __init__(self,name):
Spacer.__init__(self,0,0)
self._name = name
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__,self._name)
def wrap(self,aW,aH):
return 0,0
def draw(self):
self.canv.bookmarkHorizontal(self._name,0,0)
class FrameSplitter(NullDraw):
'''When encountered this flowable should either switch directly to nextTemplate
if remaining space in the current frame is less than gap+required or it should
temporarily modify the current template to have the frames from nextTemplate
that are listed in nextFrames and switch to the first of those frames.
'''
_ZEROSIZE=1
def __init__(self,nextTemplate,nextFrames=[],gap=10,required=72):
self.nextTemplate=nextTemplate
self.nextFrames=nextFrames or []
self.gap=gap
self.required=required
def wrap(self,aW,aH):
frame = self._frame
from reportlab.platypus.doctemplate import NextPageTemplate,CurrentFrameFlowable,LayoutError
G=[NextPageTemplate(self.nextTemplate)]
if aH<self.gap+self.required-_FUZZ:
#we are going straight to the nextTemplate with no attempt to modify the frames
G.append(PageBreak())
else:
#we are going to modify the incoming templates
templates = self._doctemplateAttr('pageTemplates')
if templates is None:
raise LayoutError('%s called in non-doctemplate environment'%self.identity())
T=[t for t in templates if t.id==self.nextTemplate]
if not T:
raise LayoutError('%s.nextTemplate=%s not found' % (self.identity(),self.nextTemplate))
T=T[0]
F=[f for f in T.frames if f.id in self.nextFrames]
N=[f.id for f in F]
N=[f for f in self.nextFrames if f not in N]
if N:
raise LayoutError('%s frames=%r not found in pageTemplate(%s)\n%r has frames %r' % (self.identity(),N,T.id,T,[f.id for f in T.frames]))
T=self._doctemplateAttr('pageTemplate')
def unwrap(canv,doc,T=T,onPage=T.onPage,oldFrames=T.frames):
T.frames=oldFrames
T.onPage=onPage
onPage(canv,doc)
T.onPage=unwrap
h=aH-self.gap
for i,f in enumerate(F):
f=copy(f)
f.height=h
f._reset()
F[i]=f
T.frames=F
G.append(CurrentFrameFlowable(F[0].id))
frame.add_generated_content(*G)
return 0,0
from reportlab.lib.sequencer import _type2formatter
_bulletNames = dict(
circle=u'\u25cf',
square=u'\u25a0',
disc=u'\u25cf',
diamond=u'\u25c6',
rarrowhead=u'\u27a4',
)
def _bulletFormat(value,type='1',format=None):
if type=='bullet':
s = _bulletNames.get(value,value)
else:
s = _type2formatter[type](int(value))
if format:
if isinstance(format,basestring):
s = format % s
elif callable(format):
s = format(s)
else:
raise ValueError('unexpected BulletDrawer format %r' % format)
return s
class BulletDrawer:
def __init__(self,
value='0',
bulletAlign='left',
bulletType='1',
bulletColor='black',
bulletFontName='Helvetica',
bulletFontSize=12,
bulletOffsetY=0,
bulletDedent=0,
bulletDir='ltr',
bulletFormat=None,
):
self.value = value
self._bulletAlign = bulletAlign
self._bulletType = bulletType
self._bulletColor = bulletColor
self._bulletFontName = bulletFontName
self._bulletFontSize = bulletFontSize
self._bulletOffsetY = bulletOffsetY
self._bulletDedent = bulletDedent
self._bulletDir = bulletDir
self._bulletFormat = bulletFormat
def drawOn(self,indenter,canv,x,y,_sW=0):
value = self.value
if not value: return
canv.saveState()
canv.translate(x, y)
y = indenter.height-self._bulletFontSize+self._bulletOffsetY
if self._bulletDir=='rtl':
x = indenter.width - indenter._rightIndent + self._bulletDedent
else:
x = indenter._leftIndent - self._bulletDedent
canv.setFont(self._bulletFontName,self._bulletFontSize)
canv.setFillColor(self._bulletColor)
bulletAlign = self._bulletAlign
value = _bulletFormat(value,self._bulletType,self._bulletFormat)
if bulletAlign=='left':
canv.drawString(x,y,value)
elif bulletAlign=='right':
canv.drawRightString(x,y,value)
elif bulletAlign in ('center','centre'):
canv.drawCentredString(x,y,value)
elif bulletAlign.startswith('numeric') or bulletAlign.startswith('decimal'):
pc = bulletAlign[7:].strip() or '.'
canv.drawAlignedString(x,y,value,pc)
else:
raise ValueError('Invalid bulletAlign: %r' % bulletAlign)
canv.restoreState()
def _computeBulletWidth(b,value):
value = _bulletFormat(value,b._bulletType,b._bulletFormat)
return stringWidth(value,b._bulletFontName,b._bulletFontSize)
class DDIndenter(Flowable):
_IndenterAttrs = '_flowable _leftIndent _rightIndent width height'.split()
def __init__(self,flowable,leftIndent=0,rightIndent=0):
self._flowable = flowable
self._leftIndent = leftIndent
self._rightIndent = rightIndent
self.width = None
self.height = None
def split(self, aW, aH):
S = self._flowable.split(aW-self._leftIndent-self._rightIndent, aH)
return [
DDIndenter(s,
leftIndent=self._leftIndent,
rightIndent=self._rightIndent,
) for s in S
]
def drawOn(self, canv, x, y, _sW=0):
self._flowable.drawOn(canv,x+self._leftIndent,y,max(0,_sW-self._leftIndent-self._rightIndent))
def wrap(self, aW, aH):
w,h = self._flowable.wrap(aW-self._leftIndent-self._rightIndent, aH)
self.width = w+self._leftIndent+self._rightIndent
self.height = h
return self.width,h
def __getattr__(self,a):
if a in self._IndenterAttrs:
try:
return self.__dict__[a]
except KeyError:
if a not in ('spaceBefore','spaceAfter'):
raise
return getattr(self._flowable,a)
def __setattr__(self,a,v):
if a in self._IndenterAttrs:
self.__dict__[a] = v
else:
setattr(self._flowable,a,v)
def __delattr__(self,a):
if a in self._IndenterAttrs:
del self.__dict__[a]
else:
delattr(self._flowable,a)
def identity(self,maxLen=None):
return '%s containing %s' % (self.__class__.__name__,self._flowable.identity(maxLen))
class LIIndenter(DDIndenter):
_IndenterAttrs = '_flowable _bullet _leftIndent _rightIndent width height spaceBefore spaceAfter'.split()
def __init__(self,flowable,leftIndent=0,rightIndent=0,bullet=None, spaceBefore=None, spaceAfter=None):
self._flowable = flowable
self._bullet = bullet
self._leftIndent = leftIndent
self._rightIndent = rightIndent
self.width = None
self.height = None
if spaceBefore is not None:
self.spaceBefore = spaceBefore
if spaceAfter is not None:
self.spaceAfter = spaceAfter
def split(self, aW, aH):
S = self._flowable.split(aW-self._leftIndent-self._rightIndent, aH)
return [
LIIndenter(s,
leftIndent=self._leftIndent,
rightIndent=self._rightIndent,
bullet = (s is S[0] and self._bullet or None),
) for s in S
]
def drawOn(self, canv, x, y, _sW=0):
if self._bullet:
self._bullet.drawOn(self,canv,x,y,0)
self._flowable.drawOn(canv,x+self._leftIndent,y,max(0,_sW-self._leftIndent-self._rightIndent))
from reportlab.lib.styles import ListStyle
class ListItem:
def __init__(self,
flowables, #the initial flowables
style=None,
#leftIndent=18,
#rightIndent=0,
#spaceBefore=None,
#spaceAfter=None,
#bulletType='1',
#bulletColor='black',
#bulletFontName='Helvetica',
#bulletFontSize=12,
#bulletOffsetY=0,
#bulletDedent='auto',
#bulletDir='ltr',
#bulletFormat=None,
**kwds
):
if not isinstance(flowables,(list,tuple)):
flowables = (flowables,)
self._flowables = flowables
params = self._params = {}
if style:
if not isinstance(style,ListStyle):
raise ValueError('%s style argument (%r) not a ListStyle' % (self.__class__.__name__,style))
self._style = style
for k in ListStyle.defaults:
if k in kwds:
v = kwds.get(k)
elif style:
v = getattr(style,k)
else:
continue
params[k] = v
for k in ('value', 'spaceBefore','spaceAfter'):
v = kwds.get(k,getattr(style,k,None))
if v is not None:
params[k] = v
class _LIParams:
def __init__(self,flowable,params,value,first):
self.flowable = flowable
self.params = params
self.value = value
self.first= first
class ListFlowable(_Container,Flowable):
def __init__(self,
flowables, #the initial flowables
start=1,
style=None,
#leftIndent=18,
#rightIndent=0,
#spaceBefore=None,
#spaceAfter=None,
#bulletType='1',
#bulletColor='black',
#bulletFontName='Helvetica',
#bulletFontSize=12,
#bulletOffsetY=0,
#bulletDedent='auto',
#bulletDir='ltr',
#bulletFormat=None,
**kwds
):
self._flowables = flowables
if style:
if not isinstance(style,ListStyle):
raise ValueError('%s style argument not a ListStyle' % self.__class__.__name__)
self.style = style
for k,v in ListStyle.defaults.items():
setattr(self,'_'+k,kwds.get(k,getattr(style,k,v)))
if start is None:
start = getattr(self,'_start',None)
if start is None:
if getattr(self,'_bulletType','1')=='bullet':
start = 'circle'
else:
start = '1'
self._start = start
for k in ('spaceBefore','spaceAfter'):
v = kwds.get(k,getattr(style,k,None))
if v is not None:
setattr(self,k,v)
self._content = self._getContent()
del self._flowables
self._dims = None
def wrap(self,aW,aH):
if self._dims!=aW:
self.width, self.height = _listWrapOn(self._content,aW,self.canv)
self._dims = aW
return self.width,self.height
def split(self,aW,aH):
return self._content
def _flowablesIter(self):
for f in self._flowables:
if isinstance(f,(list,tuple)):
if f:
for i, z in enumerate(f):
yield i==0 and not isinstance(z,LIIndenter), z
elif isinstance(f,ListItem):
params = f._params
if not params:
#meerkat simples just a list like object
for i, z in enumerate(f._flowables):
if isinstance(z,LIIndenter):
raise ValueError('LIIndenter not allowed in ListItem')
yield i==0, z
else:
params = params.copy()
value = params.pop('value',None)
spaceBefore = params.pop('spaceBefore',None)
spaceAfter = params.pop('spaceAfter',None)
n = len(f._flowables) - 1
for i, z in enumerate(f._flowables):
P = params.copy()
if not i and spaceBefore is not None:
P['spaceBefore'] = spaceBefore
if i==n and spaceAfter is not None:
P['spaceAfter'] = spaceAfter
if i: value=None
yield 0, _LIParams(z,P,value,i==0)
else:
yield not isinstance(f,LIIndenter), f
def _makeLIIndenter(self,flowable, bullet, params=None):
if params:
leftIndent = params.get('leftIndent',self._leftIndent)
rightIndent = params.get('rightIndent',self._rightIndent)
spaceBefore = params.get('spaceBefore',None)
spaceAfter = params.get('spaceAfter',None)
return LIIndenter(flowable,leftIndent,rightIndent,bullet,spaceBefore=spaceBefore,spaceAfter=spaceAfter)
else:
return LIIndenter(flowable,self._leftIndent,self._rightIndent,bullet)
def _makeBullet(self,value,params=None):
if params is None:
def getp(a):
return getattr(self,'_'+a)
else:
style = getattr(params,'style',None)
def getp(a):
if a in params: return params[a]
if style and a in style.__dict__: return getattr(self,a)
return getattr(self,'_'+a)
return BulletDrawer(
value=value,
bulletAlign=getp('bulletAlign'),
bulletType=getp('bulletType'),
bulletColor=getp('bulletColor'),
bulletFontName=getp('bulletFontName'),
bulletFontSize=getp('bulletFontSize'),
bulletOffsetY=getp('bulletOffsetY'),
bulletDedent=getp('calcBulletDedent'),
bulletDir=getp('bulletDir'),
bulletFormat=getp('bulletFormat'),
)
def _getContent(self):
value = self._start
bt = self._bulletType
inc = int(bt in '1aAiI')
if inc: value = int(value)
bd = self._bulletDedent
if bd=='auto':
align = self._bulletAlign
dir = self._bulletDir
if dir=='ltr' and align=='left':
bd = self._leftIndent
elif align=='right':
bd = self._rightIndent
else:
#we need to work out the maximum width of any of the labels
tvalue = value
maxW = 0
for d,f in self._flowablesIter():
if d:
maxW = max(maxW,_computeBulletWidth(self,tvalue))
if inc: tvalue += inc
elif isinstance(f,LIIndenter):
b = f._bullet
if b:
if b.bulletType==bt:
maxW = max(maxW,_computeBulletWidth(b,b.value))
tvalue = int(b.value)
else:
maxW = max(maxW,_computeBulletWidth(self,tvalue))
if inc: tvalue += inc
if dir=='ltr':
if align=='right':
bd = self._leftIndent - maxW
else:
bd = self._leftIndent - maxW*0.5
elif align=='left':
bd = self._rightIndent - maxW
else:
bd = self._rightIndent - maxW*0.5
self._calcBulletDedent = bd
S = []
aS = S.append
i=0
for d,f in self._flowablesIter():
fparams = {}
if not i:
i += 1
spaceBefore = getattr(self,'spaceBefore',None)
if spaceBefore is not None:
fparams['spaceBefore'] = spaceBefore
if d:
aS(self._makeLIIndenter(f,bullet=self._makeBullet(value),params=fparams))
if inc: value += inc
elif isinstance(f,LIIndenter):
b = f._bullet
if b:
if b.bulletType!=bt:
raise ValueError('Included LIIndenter bulletType=%s != OrderedList bulletType=%s' % (b.bulletType,bt))
value = int(b.value)
else:
f._bullet = self._makeBullet(value,params=getattr(f,'params',None))
if fparams:
f.__dict__['spaceBefore'] = max(f.__dict__.get('spaceBefore',0),spaceBefore)
aS(f)
if inc: value += inc
elif isinstance(f,_LIParams):
fparams.update(f.params)
z = self._makeLIIndenter(f.flowable,bullet=None,params=fparams)
if f.first:
if f.value is not None:
value = f.value
if inc: value = int(value)
z._bullet = self._makeBullet(value,f.params)
if inc: value += inc
aS(z)
else:
aS(self._makeLIIndenter(f,bullet=None,params=fparams))
spaceAfter = getattr(self,'spaceAfter',None)
if spaceAfter is not None:
f=S[-1]
f.__dict__['spaceAfter'] = max(f.__dict__.get('spaceAfter',0),spaceAfter)
return S
class TopPadder(Flowable):
'''wrap a single flowable so that its first bit will be
padded to fill out the space so that it appears at the
bottom of its frame'''
def __init__(self,f):
self.__dict__['_TopPadder__f'] = f
def wrap(self,aW,aH):
w,h = self.__f.wrap(aW,aH)
self.__dict__['_TopPadder__dh'] = aH-h
return w,h
def split(self,aW,aH):
S = self.__f.split(aW,aH)
if len(S)>1:
S[0] = TopPadder(S[0])
return S
def drawOn(self, canvas, x, y, _sW=0):
self.__f.drawOn(canvas,x,y-max(0,self.__dh-1e-8),_sW)
def __setattr__(self,a,v):
setattr(self.__f,a,v)
def __getattr__(self,a):
return getattr(self.__f,a)
def __delattr__(self,a):
delattr(self.__f,a)
class DocAssign(NullDraw):
'''At wrap time this flowable evaluates var=expr in the doctemplate namespace'''
_ZEROSIZE=1
def __init__(self,var,expr,life='forever'):
Flowable.__init__(self)
self.args = var,expr,life
def funcWrap(self,aW,aH):
NS=self._doctemplateAttr('_nameSpace')
NS.update(dict(availableWidth=aW,availableHeight=aH))
try:
return self.func()
finally:
for k in 'availableWidth','availableHeight':
try:
del NS[k]
except:
pass
def func(self):
return self._doctemplateAttr('d'+self.__class__.__name__[1:])(*self.args)
def wrap(self,aW,aH):
self.funcWrap(aW,aH)
return 0,0
class DocExec(DocAssign):
'''at wrap time exec stmt in doc._nameSpace'''
def __init__(self,stmt,lifetime='forever'):
Flowable.__init__(self)
self.args=stmt,lifetime
class DocPara(DocAssign):
'''at wrap time create a paragraph with the value of expr as text
if format is specified it should use %(__expr__)s for string interpolation
of the expression expr (if any). It may also use %(name)s interpolations
for other variables in the namespace.
suitable defaults will be used if style and klass are None
'''
def __init__(self,expr,format=None,style=None,klass=None,escape=True):
Flowable.__init__(self)
self.expr=expr
self.format=format
self.style=style
self.klass=klass
self.escape=escape
def func(self):
expr = self.expr
if expr:
if not isStrType(expr): expr = str(expr)
return self._doctemplateAttr('docEval')(expr)
def add_content(self,*args):
self._doctemplateAttr('frame').add_generated_content(*args)
def get_value(self,aW,aH):
value = self.funcWrap(aW,aH)
if self.format:
NS=self._doctemplateAttr('_nameSpace').copy()
NS.update(dict(availableWidth=aW,availableHeight=aH))
NS['__expr__'] = value
value = self.format % NS
else:
value = str(value)
return value
def wrap(self,aW,aH):
value = self.get_value(aW,aH)
P = self.klass
if not P:
from reportlab.platypus.paragraph import Paragraph as P
style = self.style
if not style:
from reportlab.lib.styles import getSampleStyleSheet
style=getSampleStyleSheet()['Code']
if self.escape:
from xml.sax.saxutils import escape
value=escape(value)
self.add_content(P(value,style=style))
return 0,0
class DocAssert(DocPara):
def __init__(self,cond,format=None):
Flowable.__init__(self)
self.expr=cond
self.format=format
def funcWrap(self,aW,aH):
self._cond = DocPara.funcWrap(self,aW,aH)
return self._cond
def wrap(self,aW,aH):
value = self.get_value(aW,aH)
if not bool(self._cond):
raise AssertionError(value)
return 0,0
class DocIf(DocPara):
def __init__(self,cond,thenBlock,elseBlock=[]):
Flowable.__init__(self)
self.expr = cond
self.blocks = elseBlock or [],thenBlock
def checkBlock(self,block):
if not isinstance(block,(list,tuple)):
block = (block,)
return block
def wrap(self,aW,aH):
self.add_content(*self.checkBlock(self.blocks[int(bool(self.funcWrap(aW,aH)))]))
return 0,0
class DocWhile(DocIf):
def __init__(self,cond,whileBlock):
Flowable.__init__(self)
self.expr = cond
self.block = self.checkBlock(whileBlock)
def wrap(self,aW,aH):
if bool(self.funcWrap(aW,aH)):
self.add_content(*(list(self.block)+[self]))
return 0,0
|
bsd-3-clause
| -4,955,683,830,492,444,000
| 35.490395
| 152
| 0.553047
| false
| 3.812399
| false
| false
| false
|
stoilov/Programming101
|
week3/HackBulgariaAPI/team_matcher.py
|
1
|
2715
|
import requests
import random
class MatchCourse:
def __init__(self):
self.url = "https://hackbulgaria.com/api/students/"
self.records = []
self.courses = None
def get_info(self):
self.records = requests.get(self.url, verify=False)
if self.records.status_code != 200:
return False
self.records = self.records.json()
return self.records
def print_messages(self):
print("\nHello, you can use one the following commands")
print("list_courses - this lists all the courses that are available now.")
print("match_teams <course_id>, <team_size>, <group_time>\n\n")
def list_courses(self):
if self.records is False:
return False
self.courses = set()
for record in self.records:
for course in record["courses"]:
self.courses.add(course["name"])
self.courses = list(self.courses)
for key, course in enumerate(self.courses):
print("[{}] {}".format(key + 1, course))
def match_teams(self, course_id, team_size, group_time):
people_in_teams = []
for record in self.records:
for course in record["courses"]:
course_group = course["group"] == group_time
course_name = course["name"] == self.courses[course_id - 1]
available = record["available"] is True
if course_name and course_group and available:
people_in_teams.append(record["name"])
random.shuffle(people_in_teams)
for key, student in enumerate(people_in_teams):
print(student)
if (key + 1) % team_size == 0:
print("==========")
def get_input(self):
command = input("Enter command> ")
command = command.split(" ")
return command
def interface(self):
command = self.get_input()
while command[0] != "exit":
if command[0] == "list_courses":
self.list_courses()
command = self.get_input()
elif command[0] == "match_teams":
command[1] = int(command[1])
command[2] = int(command[2])
command[3] = int(command[3])
self.match_teams(command[1], command[2], command[3])
command = self.get_input()
else:
print("Bad input!")
command = self.get_input()
else:
print("Goodbye!")
def main():
hackbulgaria = MatchCourse()
hackbulgaria.get_info()
hackbulgaria.print_messages()
hackbulgaria.interface()
if __name__ == "__main__":
main()
|
mit
| -4,352,641,921,128,700,400
| 30.569767
| 82
| 0.539963
| false
| 4.12614
| false
| false
| false
|
PnX-SI/GeoNature
|
backend/geonature/utils/module.py
|
1
|
4711
|
import os
import sys
from pathlib import Path
from importlib import import_module
from pkg_resources import load_entry_point, get_entry_info, iter_entry_points
from geonature.utils.utilstoml import load_and_validate_toml
from geonature.utils.config_schema import ManifestSchemaProdConf
from geonature.utils.env import GN_EXTERNAL_MODULE
from geonature.core.gn_commons.models import TModules
class NoManifestFound(Exception):
pass
def import_legacy_module(module_object):
sys.path.insert(0, str(GN_EXTERNAL_MODULE)) # to be able to import non-packaged modules
try:
# module dist is module_code.lower() because the symlink is created like this
# in utils.gn_module_import.copy_in_external_mods
module_dist = module_object.module_code.lower()
module_dir = GN_EXTERNAL_MODULE / module_dist
manifest_path = module_dir / 'manifest.toml'
if not manifest_path.is_file():
raise NoManifestFound()
module_manifest = load_and_validate_toml(manifest_path, ManifestSchemaProdConf)
module_blueprint = import_module(f'{module_dist}.backend.blueprint').blueprint
module_config = {
'ID_MODULE': module_object.id_module,
'MODULE_CODE': module_object.module_code,
'MODULE_URL': '/' + module_object.module_path.replace(' ', ''),
'FRONTEND_PATH': str(module_dir / 'frontend'),
}
module_schema = import_module(f'{module_object.module_code.lower()}.config.conf_schema_toml').GnModuleSchemaConf
config_path = module_dir / "config/conf_gn_module.toml"
module_config.update(load_and_validate_toml(config_path, module_schema))
module_blueprint.config = module_config
return module_config, module_blueprint
finally:
sys.path.pop(0)
def import_packaged_module(module_dist, module_object):
module_code = module_object.module_code
module_dir = GN_EXTERNAL_MODULE / module_object.module_path
frontend_path = os.environ.get(f'GEONATURE_{module_code}_FRONTEND_PATH',
str(module_dir / 'frontend'))
module_config = {
'MODULE_CODE': module_code,
'MODULE_URL': '/' + module_object.module_path,
'FRONTEND_PATH': frontend_path,
}
module_schema = load_entry_point(module_dist, 'gn_module', 'config_schema')
config_path = os.environ.get(f'GEONATURE_{module_object.module_code}_CONFIG_FILE')
if not config_path: # fallback to legacy conf path guessing
config_path = str(module_dir / 'config/conf_gn_module.toml')
module_config.update(load_and_validate_toml(config_path, module_schema))
blueprint_entry_point = get_entry_info(module_dist, 'gn_module', 'blueprint')
if blueprint_entry_point:
module_blueprint = blueprint_entry_point.load()
module_blueprint.config = module_config
else:
module_blueprint = None
return (module_object, module_config, module_blueprint)
def get_dist_from_code(module_code):
for entry_point in iter_entry_points('gn_module', 'code'):
if module_code == entry_point.load():
return entry_point.dist
def import_gn_module(module_object):
"""
return (module_object, module_config, module_blueprint)
module_blueprint may be None in case of front-only module
"""
# try to find a packaged module with the given code
module_dist = get_dist_from_code(module_object.module_code)
if module_dist:
return import_packaged_module(module_dist, module_object)
else:
module_config, module_blueprint = import_legacy_module(module_object)
return (module_object, module_config, module_blueprint)
def import_backend_enabled_modules():
"""
yield (module_object, module_config, module_blueprint)
for backend-enabled modules in gn_commons.t_modules
"""
enabled_modules = TModules.query.filter_by(active_backend=True).all()
for module_object in enabled_modules:
# ignore internal module (i.e. without symlink in external module directory)
if not Path(GN_EXTERNAL_MODULE / module_object.module_code.lower()).exists():
continue
yield import_gn_module(module_object)
def list_frontend_enabled_modules():
"""
yield module_config
for frontend-enabled modules in gn_commons.t_modules
"""
enabled_modules = TModules.query.filter_by(active_frontend=True).all()
for module_object in enabled_modules:
# ignore internal module (i.e. without symlink in external module directory)
if not Path(GN_EXTERNAL_MODULE / module_object.module_code.lower()).exists():
continue
yield module_object
|
gpl-3.0
| 4,306,982,387,413,549,600
| 40.690265
| 120
| 0.680959
| false
| 3.738889
| true
| false
| false
|
alexanderfefelov/nav
|
python/nav/eventengine/topology.py
|
1
|
7788
|
#
# Copyright (C) 2012 UNINETT
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Topology evaluation functions for event processing"""
import socket
import datetime
import networkx
from networkx.exception import NetworkXException
from nav.models.manage import SwPortVlan, Netbox, Prefix, Arp, Cam
import logging
_logger = logging.getLogger(__name__)
def netbox_appears_reachable(netbox):
"""Returns True if netbox appears to be reachable through the known
topology.
"""
target_path = get_path_to_netbox(netbox)
nav = NAVServer.make_for(netbox.ip)
nav_path = get_path_to_netbox(nav) if nav else True
_logger.debug("reachability paths, target_path=%(target_path)r, "
"nav_path=%(nav_path)r", locals())
return bool(target_path and nav_path)
def get_path_to_netbox(netbox):
"""Returns a likely path from netbox to its apparent gateway/router.
If any switches on the path, or the router itself is down,
no current path exists and a False value is returned. However,
if there is insufficient information for NAV to find a likely path,
a True value is returned.
"""
prefix = netbox.get_prefix()
if not prefix:
_logger.warning("couldn't find prefix for %s", netbox)
return True
router_ports = prefix.get_router_ports()
if router_ports:
router_port = router_ports[0]
else:
_logger.warning("couldn't find router ports for %s", prefix)
return True
router = router_port.interface.netbox
_logger.debug("reachability check for %s on %s (router: %s)",
netbox, prefix, router)
graph = get_graph_for_vlan(prefix.vlan)
try:
netbox.add_to_graph(graph)
except AttributeError:
pass
strip_down_nodes_from_graph(graph, keep=netbox)
if netbox not in graph or router not in graph:
if router.up == router.UP_UP:
_logger.warning("%(netbox)s topology problem: router %(router)s "
"is up, but not in VLAN graph for %(prefix)r. "
"Defaulting to 'reachable' status.", locals())
return True
_logger.debug("%s not reachable, router or box not in graph: %r",
netbox, graph.edges())
return False
try:
path = networkx.shortest_path(graph, netbox, router)
except NetworkXException as error:
_logger.debug("an internal networkx exception was raised in "
"shortest_path, assuming no path was found: %s", error)
path = []
else:
_logger.debug("path to %s: %r", netbox, path)
return path
def get_graph_for_vlan(vlan):
"""Builds a simple topology graph of the active netboxes in vlan.
Any netbox that seems to be down at the moment will not be included in
the graph.
:returns: A networkx.Graph object.
"""
swpvlan = SwPortVlan.objects.filter(vlan=vlan).select_related(
'interface', 'interface__netbox', 'interface__to_netbox',
'interface__to_interface')
graph = networkx.MultiGraph(name='graph for vlan %s' % vlan)
for swp in swpvlan:
source = swp.interface.netbox
source_ifc = swp.interface
target = swp.interface.to_netbox
target_ifc = swp.interface.to_interface
if target:
key = tuple(sorted(
(source_ifc.id, target_ifc.id if target_ifc else None)))
data = set([source_ifc, target_ifc])
graph.add_edge(source, target, key=key, data=data)
return graph
def strip_down_nodes_from_graph(graph, keep=None):
"""Strips all nodes (netboxes) from graph that are currently down.
:param keep: A node to keep regardless of its current status.
"""
removable = set(node for node in graph.nodes_iter()
if node.up != node.UP_UP and node != keep)
graph.remove_nodes_from(removable)
return len(removable)
def strip_down_links_from_graph(graph):
"""Strips all edges (links) from graph where any of the involved
interfaces are down.
"""
def _is_down(data):
ifcs = data.get('data', [])
return any(ifc and ifc.ifoperstatus == ifc.OPER_DOWN for ifc in ifcs)
removable = set(
(u, v, key)
for u, v, key, data in graph.edges_iter(data=True, keys=True)
if _is_down(data)
)
graph.remove_edges_from(removable)
return len(removable)
###
### Functions for locating the NAV server itself
###
class NAVServer(object):
"""A simple mockup of a Netbox representing the NAV server itself"""
UP_UP = Netbox.UP_UP
@classmethod
def make_for(cls, dest):
"""Creates a NAVServer instance with the source IP address of the
local host used for routing traffic to dest.
:param dest: An IP address
"""
ipaddr = get_source_address_for(dest)
if ipaddr:
return cls(ipaddr)
def __init__(self, ip):
self.sysname = "NAV"
self.ip = ip
self.up = Netbox.UP_UP
def get_prefix(self):
"""Gets the prefix for the NAV servers ip"""
matches = Prefix.objects.contains_ip(self.ip)
if matches:
return matches[0]
def add_to_graph(self, graph):
"""Adds edge between myself and all neighboring switches"""
for switch in self.get_switches_from_cam():
graph.add_edge(self, switch)
def get_switches_from_cam(self):
"""Gets all neighboring switches"""
mac = self.get_mac_from_arp()
if mac:
records = Cam.objects.filter(
mac=mac,
end_time__gte=datetime.datetime.max
).select_related('netbox')
return list(set(cam.netbox for cam in records))
else:
return []
def get_mac_from_arp(self):
"""Finds the NAV server's MAC address based on its IP address"""
arp = Arp.objects.extra(
where=['ip = %s'],
params=[self.ip]
).filter(end_time__gte=datetime.datetime.max)
if arp:
return arp[0].mac
def __repr__(self):
return "{self.__class__.__name__}({self.ip!r})".format(self=self)
def get_source_address_for(dest):
"""Gets the source IP address used by this host when attempting to
contact the destination host.
:param dest: An IP address string.
:return: And IP address string, or None if no address was found.
"""
family, sockaddr = _get_target_dgram_addr(dest)
sock = socket.socket(family, socket.SOCK_DGRAM)
try:
sock.connect(sockaddr)
except socket.error, err:
_logger.warning("Error when getting NAV's source address for "
"connecting to %(dest)s: %(err)s", locals())
return
addrinfo = sock.getsockname()
sock.close()
return addrinfo[0]
def _get_target_dgram_addr(target):
"""Returns a (family, sockaddr) tuple for the target address for
a SOCK_DGRAM socket type.
"""
for (family, socktype,
_proto, _canonname,
sockaddr) in socket.getaddrinfo(target, 1):
if socktype == socket.SOCK_DGRAM:
return family, sockaddr
|
gpl-2.0
| 4,639,183,928,962,233,000
| 31.45
| 79
| 0.626605
| false
| 3.795322
| false
| false
| false
|
MikeLaptev/sandbox_python
|
mera/unittest_example/generate_and_load_unittest_update_four.py
|
1
|
4101
|
'''
Created on Jul 30, 2015
@author: Mikhail
'''
import unittest
import re
from json_file_generator import MyOwnJSONProcessing as json_processing
from json_file_generator import __version__ as json_file_generator_version
from unittest.case import skip, skipIf
class GenerateAndLoadJSONTestUpdateFour(unittest.TestCase):
expected_data = {}
@classmethod
def setUpClass(cls):
print "{} for {} has been called".format(cls.setUpClass.__name__, cls.__name__)
cls.expected_data = json_processing.generate_data_for_json_obj()
def setUp(self):
print "{} for {} has been called".format(self.setUp.__name__, self._testMethodName)
self.file_name = "generate_and_load_unittest.json"
self.original_name = json_processing.generate_json_file_with_data(self.file_name, self.expected_data)
def tearDown(self):
print "{} for {} has been called".format(self.tearDown.__name__, self._testMethodName)
@classmethod
def tearDownClass(cls):
print "{} for {} has been called".format(cls.tearDownClass.__name__, cls.__name__)
json_processing.clean_up()
def testGenerateAndLoadJSONValidKeys(self):
print "Processing file {}".format(self.original_name)
actual_data = json_processing.load_data_from_json_file(self.original_name)
for exp_key in self.expected_data.keys():
self.assertTrue(actual_data.has_key(exp_key), "Expected key '{}' has not been found in loaded json".format(exp_key))
for act_key in actual_data.keys():
self.assertTrue(self.expected_data.has_key(act_key), "Loaded key '{}' has not been found in dumped json".format(act_key))
# General version of skip
@skip("old functionality")
def testGenerateAndLoadJSONValidKeysHasOnlyLetters1(self):
print "Processing file {}".format(self.original_name)
actual_data = json_processing.load_data_from_json_file(self.original_name)
for act_key in actual_data.keys():
self.assertTrue(re.match("[^a-zA-Z]", act_key) is None, "Key should contains only alpha symbols: {}".format(act_key))
# Version of skip that check version of our json_file_generator
@skipIf(json_file_generator_version > 1, "This functionality is not supported in this version on the json file generator")
def testGenerateAndLoadJSONValidKeysHasOnlyLetters2(self):
print "Processing file {}".format(self.original_name)
actual_data = json_processing.load_data_from_json_file(self.original_name)
for act_key in actual_data.keys():
self.assertIsNone(re.match("[^a-zA-Z]", act_key), "Key should contains only alpha symbols: {}".format(act_key))
def testGenerateAndLoadJSONValidValues(self):
print "Processing file {}".format(self.original_name)
actual_data = json_processing.load_data_from_json_file(self.original_name)
for exp_key, exp_value in self.expected_data.items():
self.assertEquals(exp_value, actual_data.get(exp_key), "Dictionaries have different values '{}' for first and '{}' for second for the same key".format(exp_value, actual_data.get(exp_key)))
for act_key, act_value in actual_data.items():
self.assertEquals(act_value, self.expected_data.get(act_key), "Dictionaries have different values '{}' for first and '{}' for second for the same key".format(act_value, self.expected_data.get(act_key)))
def testGenerateAndLoadJSONForInvalidFile(self):
"""
This test checks that valid exception will be raised if required file will not be found
"""
invalid_name = "invalid_" + self.original_name
print "Processing file {}".format(invalid_name)
with self.assertRaises(IOError) as io_exception:
# attempt to read file that doesn't exist
json_processing.load_data_from_json_file(invalid_name)
self.assertEqual(io_exception.exception.errno, 2)
self.assertEqual(io_exception.exception.strerror, 'No such file or directory')
if __name__ == "__main__":
unittest.main(verbosity=2)
|
apache-2.0
| -1,147,400,482,822,408,300
| 50.275
| 214
| 0.683248
| false
| 3.890892
| true
| false
| false
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/express_route_circuits_routes_table_list_result.py
|
1
|
1252
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExpressRouteCircuitsRoutesTableListResult(Model):
"""Response for ListRoutesTable associated with the Express Route Circuits
API.
:param value: The list of routes table.
:type value:
list[~azure.mgmt.network.v2017_09_01.models.ExpressRouteCircuitRoutesTable]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteCircuitRoutesTable]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self, value=None, next_link=None):
super(ExpressRouteCircuitsRoutesTableListResult, self).__init__()
self.value = value
self.next_link = next_link
|
mit
| -8,715,857,573,506,023,000
| 35.823529
| 80
| 0.610224
| false
| 4.173333
| false
| false
| false
|
appleseedhq/cortex
|
python/IECoreScene/RemovePrimitiveVariables.py
|
5
|
2937
|
##########################################################################
#
# Copyright (c) 2007-2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from fnmatch import fnmatchcase
import IECore
import IECoreScene
class RemovePrimitiveVariables( IECoreScene.PrimitiveOp ) :
def __init__( self ) :
IECoreScene.PrimitiveOp.__init__( self, "Removes variables from primitives" )
self.parameters().addParameters(
[
IECore.StringParameter(
name = "mode",
description = """This chooses whether or not the names parameter specifies the names of
variables to keep or the names of variables to remove.""",
defaultValue = "remove",
presets = (
( "keep", "keep" ),
( "remove", "remove" )
),
presetsOnly = True
),
IECore.StringVectorParameter(
name = "names",
description = "The names of variables. These can include * or ? characters to match many names.",
defaultValue = IECore.StringVectorData()
)
]
)
def modifyPrimitive( self, primitive, args ) :
keep = args["mode"].value == "keep"
for key in primitive.keys() :
for n in args["names"] :
m = fnmatchcase( key, n )
if (m and not keep) or (not m and keep) :
del primitive[key]
IECore.registerRunTimeTyped( RemovePrimitiveVariables )
|
bsd-3-clause
| 5,771,326,003,735,331,000
| 36.177215
| 102
| 0.677562
| false
| 4.338257
| false
| false
| false
|
eJRF/ejrf
|
questionnaire/migrations/0002_copy_question_text_to_export_label.py
|
1
|
25463
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
for question in orm.question.objects.filter(export_label=''):
question.export_label = question.text
question.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'questionnaire.answer': {
'Meta': {'object_name': 'Answer'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Country']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'null': 'True', 'to': "orm['questionnaire.Question']"}),
'questionnaire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'null': 'True', 'to': "orm['questionnaire.Questionnaire']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Draft'", 'max_length': '15'}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'})
},
'questionnaire.answergroup': {
'Meta': {'object_name': 'AnswerGroup'},
'answer': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'answergroup'", 'null': 'True', 'to': "orm['questionnaire.Answer']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'grouped_question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answer_groups'", 'null': 'True', 'to': "orm['questionnaire.QuestionGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'row': ('django.db.models.fields.CharField', [], {'max_length': '6'})
},
'questionnaire.comment': {
'Meta': {'object_name': 'Comment'},
'answer_group': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'comments'", 'symmetrical': 'False', 'to': "orm['questionnaire.AnswerGroup']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'questionnaire.country': {
'Meta': {'object_name': 'Country'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'countries'", 'null': 'True', 'to': "orm['questionnaire.Region']"})
},
'questionnaire.countryquestionnairesubmission': {
'Meta': {'object_name': 'CountryQuestionnaireSubmission'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'to': "orm['questionnaire.Country']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'questionnaire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'to': "orm['questionnaire.Questionnaire']"}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'questionnaire.dateanswer': {
'Meta': {'object_name': 'DateAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'})
},
'questionnaire.multichoiceanswer': {
'Meta': {'object_name': 'MultiChoiceAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answer'", 'null': 'True', 'to': "orm['questionnaire.QuestionOption']"})
},
'questionnaire.multipleresponseanswer': {
'Meta': {'object_name': 'MultipleResponseAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'answers'", 'null': 'True', 'to': "orm['questionnaire.QuestionOption']"})
},
'questionnaire.numericalanswer': {
'Meta': {'object_name': 'NumericalAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'})
},
'questionnaire.organization': {
'Meta': {'object_name': 'Organization'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'questionnaire.question': {
'Meta': {'object_name': 'Question'},
'UID': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'answer_sub_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'answer_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'export_label': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'is_primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child'", 'null': 'True', 'to': "orm['questionnaire.Question']"}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'null': 'True', 'to': "orm['questionnaire.Region']"}),
'text': ('django.db.models.fields.TextField', [], {}),
'theme': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'null': 'True', 'to': "orm['questionnaire.Theme']"})
},
'questionnaire.questiongroup': {
'Meta': {'ordering': "('order',)", 'object_name': 'QuestionGroup'},
'allow_multiples': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'display_all': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hybrid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructions': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'is_core': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sub_group'", 'null': 'True', 'to': "orm['questionnaire.QuestionGroup']"}),
'question': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'question_group'", 'symmetrical': 'False', 'to': "orm['questionnaire.Question']"}),
'subsection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_group'", 'to': "orm['questionnaire.SubSection']"})
},
'questionnaire.questiongrouporder': {
'Meta': {'ordering': "('order',)", 'unique_together': "(('order', 'question_group', 'question'),)", 'object_name': 'QuestionGroupOrder'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'orders'", 'to': "orm['questionnaire.Question']"}),
'question_group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'orders'", 'null': 'True', 'to': "orm['questionnaire.QuestionGroup']"})
},
'questionnaire.questionnaire': {
'Meta': {'object_name': 'Questionnaire'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['questionnaire.Questionnaire']"}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'questionnaire'", 'null': 'True', 'to': "orm['questionnaire.Region']"}),
'status': ('model_utils.fields.StatusField', [], {'default': "'draft'", 'max_length': '100', u'no_check_for_status': 'True'}),
'year': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'questionnaire.questionoption': {
'Meta': {'ordering': "('modified',)", 'object_name': 'QuestionOption'},
'UID': ('django.db.models.fields.CharField', [], {'max_length': '6', 'unique': 'True', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['questionnaire.Question']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'questionnaire.region': {
'Meta': {'object_name': 'Region'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'regions'", 'null': 'True', 'to': "orm['questionnaire.Organization']"})
},
'questionnaire.section': {
'Meta': {'ordering': "('order',)", 'object_name': 'Section'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_core': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'questionnaire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sections'", 'to': "orm['questionnaire.Questionnaire']"}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sections'", 'null': 'True', 'to': "orm['questionnaire.Region']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'questionnaire.skipquestion': {
'Meta': {'object_name': 'SkipQuestion', '_ormbases': ['questionnaire.SkipRule']},
'skip_question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'skip_rules'", 'to': "orm['questionnaire.Question']"}),
u'skiprule_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.SkipRule']", 'unique': 'True', 'primary_key': 'True'})
},
'questionnaire.skiprule': {
'Meta': {'object_name': 'SkipRule'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'skip_rules'", 'null': 'True', 'to': "orm['questionnaire.Region']"}),
'response': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'skip_rules'", 'to': "orm['questionnaire.QuestionOption']"}),
'root_question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'root_skip_rules'", 'to': "orm['questionnaire.Question']"}),
'subsection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'skip_rules'", 'to': "orm['questionnaire.SubSection']"})
},
'questionnaire.skipsubsection': {
'Meta': {'object_name': 'SkipSubsection', '_ormbases': ['questionnaire.SkipRule']},
'skip_subsection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.SubSection']"}),
u'skiprule_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.SkipRule']", 'unique': 'True', 'primary_key': 'True'})
},
'questionnaire.subsection': {
'Meta': {'ordering': "('order',)", 'object_name': 'SubSection'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_core': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sub_sections'", 'null': 'True', 'to': "orm['questionnaire.Region']"}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sub_sections'", 'to': "orm['questionnaire.Section']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
'questionnaire.supportdocument': {
'Meta': {'object_name': 'SupportDocument'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Country']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'path': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'questionnaire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'support_documents'", 'to': "orm['questionnaire.Questionnaire']"})
},
'questionnaire.textanswer': {
'Meta': {'object_name': 'TextAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'questionnaire.theme': {
'Meta': {'object_name': 'Theme'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'themes'", 'null': 'True', 'to': "orm['questionnaire.Region']"})
},
'questionnaire.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Country']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Organization']", 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Region']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['questionnaire']
symmetrical = True
|
bsd-3-clause
| -352,401,073,382,339,500
| 88.031469
| 195
| 0.571535
| false
| 3.747866
| false
| false
| false
|
SaschaMester/delicium
|
tools/telemetry/telemetry/core/platform/profiler/java_heap_profiler.py
|
1
|
3432
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import subprocess
import threading
from telemetry.core.platform import profiler
from telemetry.core import util
from telemetry.internal.backends.chrome import android_browser_finder
util.AddDirToPythonPath(util.GetChromiumSrcDir(), 'build', 'android')
try:
from pylib import constants # pylint: disable=F0401
from pylib.device import device_errors # pylint: disable=F0401
except ImportError:
constants = None
device_errors = None
class JavaHeapProfiler(profiler.Profiler):
"""Android-specific, trigger and fetch java heap dumps."""
_DEFAULT_DEVICE_DIR = '/data/local/tmp/javaheap'
# TODO(bulach): expose this as a command line option somehow.
_DEFAULT_INTERVAL = 20
def __init__(self, browser_backend, platform_backend, output_path, state):
super(JavaHeapProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
self._run_count = 1
self._DumpJavaHeap(False)
self._timer = threading.Timer(self._DEFAULT_INTERVAL, self._OnTimer)
self._timer.start()
@classmethod
def name(cls):
return 'java-heap'
@classmethod
def is_supported(cls, browser_type):
if browser_type == 'any':
return android_browser_finder.CanFindAvailableBrowsers()
return browser_type.startswith('android')
def CollectProfile(self):
self._timer.cancel()
self._DumpJavaHeap(True)
try:
self._browser_backend.adb.device().PullFile(
self._DEFAULT_DEVICE_DIR, self._output_path)
except:
logging.exception('New exception caused by DeviceUtils conversion')
raise
self._browser_backend.adb.RunShellCommand(
'rm ' + os.path.join(self._DEFAULT_DEVICE_DIR, '*'))
output_files = []
for f in os.listdir(self._output_path):
if os.path.splitext(f)[1] == '.aprof':
input_file = os.path.join(self._output_path, f)
output_file = input_file.replace('.aprof', '.hprof')
hprof_conv = os.path.join(constants.ANDROID_SDK_ROOT,
'tools', 'hprof-conv')
subprocess.call([hprof_conv, input_file, output_file])
output_files.append(output_file)
return output_files
def _OnTimer(self):
self._DumpJavaHeap(False)
def _DumpJavaHeap(self, wait_for_completion):
if not self._browser_backend.adb.device().FileExists(
self._DEFAULT_DEVICE_DIR):
self._browser_backend.adb.RunShellCommand(
'mkdir -p ' + self._DEFAULT_DEVICE_DIR)
self._browser_backend.adb.RunShellCommand(
'chmod 777 ' + self._DEFAULT_DEVICE_DIR)
device_dump_file = None
for pid in self._GetProcessOutputFileMap().iterkeys():
device_dump_file = '%s/%s.%s.aprof' % (self._DEFAULT_DEVICE_DIR, pid,
self._run_count)
self._browser_backend.adb.RunShellCommand('am dumpheap %s %s' %
(pid, device_dump_file))
if device_dump_file and wait_for_completion:
util.WaitFor(lambda: self._FileSize(device_dump_file) > 0, timeout=2)
self._run_count += 1
def _FileSize(self, file_name):
try:
return self._browser_backend.adb.device().Stat(file_name).st_size
except device_errors.CommandFailedError:
return 0
|
bsd-3-clause
| 5,593,076,294,993,118,000
| 34.75
| 76
| 0.666084
| false
| 3.734494
| false
| false
| false
|
HaydenFaulkner/phd
|
tensorflow_code/word2vec_basic.py
|
1
|
11354
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
# from six.moves import urllib
# from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
# url = 'http://mattmahoney.net/dc/'
# def maybe_download(filename, expected_bytes):
# """Download a file if not present, and make sure it's the right size."""
# if not os.path.exists(filename):
# filename, _ = urllib.request.urlretrieve(url + filename, filename)
# statinfo = os.stat(filename)
# if statinfo.st_size == expected_bytes:
# print('Found and verified', filename)
# else:
# print(statinfo.st_size)
# raise Exception(
# 'Failed to verify ' + filename + '. Can you get to it with a browser?')
# return filename
# filename = maybe_download('text8.zip', 31344016)
# filename = '/home/hayden/Downloads/text8.zip'
# # Read the data into a list of strings.
# def read_data(filename):
# """Extract the first file enclosed in a zip file as a list of words"""
# with zipfile.ZipFile(filename) as f:
# data = tf.compat.as_str(f.read(f.namelist()[0])).split()
# return data
#
# words = read_data(filename)
def word2_vec_basic(sentence_paths, extra_path=None, plot_path=None):
def get_tennis_words():
words = []
for sentence_path in sentence_paths:
with open(sentence_path) as f:
lines = f.readlines()
for line in lines:
for word in ('<BOS> '+line.split('\t')[1].rstrip()+' <EOS>').split():
words.append(word)
if extra_path is not None:
with open(extra_path) as f:
lines = f.readlines()
for line in lines:
for word in ('<BOS> ' + line.split('\t')[1].rstrip() + ' <EOS>').split():
words.append(word)
return words
words = get_tennis_words()
print('Data size', len(words))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = min(len(collections.Counter(words)), 50000)
def build_dataset(words):
count = [['<UNK>', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
del words # Hint to reduce memory.
print('Most common words (+<UNK>)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
global data_index
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 64 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps = 50001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in range(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 1000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in range(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_word
for k in range(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
embeds = {}
for i in range(len(reverse_dictionary)):
embeds[reverse_dictionary[i]] = final_embeddings[i]
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename=plot_path+'tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
if plot_path is not None:
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = min(vocabulary_size, 500)
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in range(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn, matplotlib, and scipy to visualize embeddings.")
return len(embeds), embeds, None
|
mit
| 5,478,328,674,893,871,000
| 39.123675
| 91
| 0.59239
| false
| 3.999296
| false
| false
| false
|
jacob-meacham/chain-cli
|
chain/cli.py
|
1
|
4526
|
"""CLI for chain.
This module is not intended to be used programmatically - if this is something you want, use chain.client instead.
"""
import click
from termcolor import colored
from chain.chain import ChainClient, Frequency, NoChainExistsException, ChainExistsException
# No docstrings for this file, as the functions are not meant to be called directly.
# pylint: disable=missing-docstring
DEFAULT_DATA_PATH = '~/.chain/chains.json'
DONT_BREAK_TEXT = colored("Don't break the chain!", 'red', attrs=['underline'])
# This is idiomatic for click
# pylint: disable=C0103
pass_chain_context = click.make_pass_decorator(ChainClient)
def _format_chain_name(name):
return colored('"{}"'.format(name), 'green', attrs=['bold'])
@click.group()
@click.option('--file', metavar='FILE', help='Data file path, default is ~/.chain/chains.json', type=click.Path(),
default=DEFAULT_DATA_PATH)
@click.version_option('0.3.2')
@click.pass_context
def cli(ctx, file):
ctx.obj = ChainClient(file)
@cli.command(name='new', help='add a new chain')
@click.argument('name')
@click.option('--title', '-t', help='Title of this chain. If not specified, the title will be the name')
@click.option('--daily', is_flag=True, help='Add daily links (Default)')
@click.option('--weekly', is_flag=True, help='Add weekly links')
@click.option('--monthly', is_flag=True, help='Add monthly links')
@click.option('--required', help='Number of links required for the chain to be considered unbroken', default=1)
@click.option('--description', '-d', help='Description of this chain', default='')
@pass_chain_context
def new_chain(client, name, title, daily, weekly, monthly, required, description):
if [daily, weekly, monthly].count(True) > 1:
raise click.BadArgumentUsage('One and only one of --daily, --weekly, --monthly must be set.')
# Pylint has bugs with enums
# pylint: disable=redefined-variable-type
if weekly:
frequency = Frequency.weekly
elif monthly:
frequency = Frequency.monthly
else:
frequency = Frequency.daily
try:
client.new_chain(name, title=title, frequency=frequency, description=description, num_required=required)
except ChainExistsException as e:
raise click.BadArgumentUsage(e.message)
click.echo("New chain {} created. {}".format(_format_chain_name(name), DONT_BREAK_TEXT))
@cli.command(name='add', help='add a link to the chain')
@click.argument('name')
@click.option('--num', '-n', help='Number of links to add', default=1)
@click.option('--message', '-m', help='Message attached to the added link', default='')
@pass_chain_context
def add_link(client, name, num, message):
try:
client.add_link_to_chain(name, num, message=message)
except NoChainExistsException as e:
raise click.BadArgumentUsage(e.message)
num_links_text = colored('{}'.format(num), "blue", attrs=['bold'])
link_pluralization = 'link' if num == 1 else 'links'
click.echo('Added {} {} to chain {}. {}'.format(num_links_text, link_pluralization,
_format_chain_name(name), DONT_BREAK_TEXT))
@cli.command(name='ls', help='List chains')
@click.option('-q', help='List name only', is_flag=True)
@click.option('--prefix', help='List only those chains whose name matches this prefix')
@pass_chain_context
def list_chains(client, q, prefix):
try:
chains = [c for c in client.list_chains() if prefix is None or c['id'].startswith(prefix)]
if q:
for c in chains:
click.echo(c['id'])
else:
for c in chains:
# TODO: List them using termtable
click.echo(c)
except NoChainExistsException as e:
raise click.BadArgumentUsage(e.message)
@cli.command(name='archive', help='Archive a chain')
@click.argument('name')
@pass_chain_context
def archive_chain(client, name):
try:
client.archive_chain(name)
except NoChainExistsException as e:
raise click.BadArgumentUsage(e.message)
click.echo('Archived chain {}'.format(_format_chain_name(name)))
@cli.command(name='rm', help='Remove a chain')
@click.argument('name')
@pass_chain_context
def remove_chain(client, name):
try:
client.remove_chain(name)
except NoChainExistsException as e:
raise click.BadArgumentUsage(e.message)
click.echo('Removed chain {}'.format(_format_chain_name(name)))
if __name__ == '__main__':
# pylint: disable=E1120
cli()
|
mit
| -6,670,421,807,501,067,000
| 35.208
| 114
| 0.67057
| false
| 3.606375
| false
| false
| false
|
DiCarloLab-Delft/PycQED_py3
|
pycqed/analysis_v2/randomized_benchmarking_analysis.py
|
1
|
94884
|
import lmfit
from uncertainties import ufloat
import pandas as pd
from copy import deepcopy
from pycqed.analysis import analysis_toolbox as a_tools
from collections import OrderedDict
from pycqed.analysis import measurement_analysis as ma_old
import pycqed.analysis_v2.base_analysis as ba
import numpy as np
import logging
from scipy.stats import sem
from pycqed.analysis.tools.data_manipulation import populations_using_rate_equations
from pycqed.analysis.tools.plotting import set_xlabel, set_ylabel, plot_fit
from pycqed.utilities.general import format_value_string
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, PowerNorm
from sklearn import linear_model
from matplotlib import colors as c
from pycqed.analysis_v2.tools import geometry_utils as geo
log = logging.getLogger(__name__)
class RandomizedBenchmarking_SingleQubit_Analysis(ba.BaseDataAnalysis):
def __init__(
self,
t_start: str = None,
t_stop: str = None,
label="",
options_dict: dict = None,
auto=True,
close_figs=True,
classification_method="rates",
rates_I_quad_ch_idx: int = 0,
rates_Q_quad_ch_idx: int = None,
rates_ch_idx=None, # Deprecated
cal_pnts_in_dset: list = np.repeat(["0", "1", "2"], 2),
ignore_f_cal_pts: bool = False,
do_fitting: bool = True,
**kwargs
):
"""
Analysis for single qubit randomized benchmarking.
For basic options see docstring of BaseDataAnalysis
Args:
classification_method ["rates", ] sets method to determine
populations of g,e and f states. Currently only supports "rates"
rates: uses calibration points and rate equation from
Asaad et al. to determine populations
rates_I_quad_ch_idx (int) : sets the I quadrature channel from which
to use the data for the rate equations,
`rates_I_quad_ch_idx + 1` is assumed to be the Q quadrature,
both quadratures are used in the rate equation,
this analysis expects the RO mode to be "optimal IQ"
ignore_f_cal_pts (bool) : if True, ignores the f-state calibration
points and instead makes the approximation that the f-state
looks the same as the e-state in readout. This is useful when
the ef-pulse is not calibrated.
"""
if options_dict is None:
options_dict = dict()
super().__init__(
t_start=t_start,
t_stop=t_stop,
label=label,
options_dict=options_dict,
close_figs=close_figs,
do_fitting=do_fitting,
**kwargs
)
# used to determine how to determine 2nd excited state population
self.classification_method = classification_method
# [2020-07-09 Victor] RB has been used with the "optimal IQ" RO mode
# for a while in the lab, both quadratures are necessary for plotting
# and correct calculation using the rates equation
if rates_ch_idx is not None:
log.warning(
"`rates_ch_idx` is deprecated `rates_I_quad_ch_idx` "
+ "and `rates_I_quad_ch_idx + 1` are used for population "
+ "rates calculation! Please apply changes to `pycqed`."
)
self.rates_I_quad_ch_idx = rates_I_quad_ch_idx
self.rates_Q_quad_ch_idx = rates_Q_quad_ch_idx
if self.rates_Q_quad_ch_idx is None:
self.rates_Q_quad_ch_idx = rates_I_quad_ch_idx + 1
self.d1 = 2
self.cal_pnts_in_dset = np.array(cal_pnts_in_dset)
self.ignore_f_cal_pts = ignore_f_cal_pts
# Allows to run this analysis for different qubits in same dataset
self.overwrite_qois = False
if auto:
self.run_analysis()
# NB all the fit_res, plot_dicts, qois are appended the `value_name`
# corresponding to `rates_I_quad_ch_idx` so that this analysis can be
# run several times targeting a different measured qubit
def extract_data(self):
"""
Custom data extraction for this specific experiment.
"""
self.raw_data_dict = OrderedDict()
self.timestamps = a_tools.get_timestamps_in_range(
self.t_start, self.t_stop, label=self.labels
)
a = ma_old.MeasurementAnalysis(
timestamp=self.timestamps[0], auto=False, close_file=False
)
a.get_naming_and_values()
if "bins" in a.data_file["Experimental Data"]["Experimental Metadata"].keys():
bins = a.data_file["Experimental Data"]["Experimental Metadata"]["bins"][()]
num_cal_pnts = len(self.cal_pnts_in_dset)
self.raw_data_dict["ncl"] = bins[:-num_cal_pnts:2]
self.raw_data_dict["bins"] = bins
self.raw_data_dict["value_names"] = a.value_names
self.raw_data_dict["value_units"] = a.value_units
self.raw_data_dict["measurementstring"] = a.measurementstring
self.raw_data_dict["timestamp_string"] = a.timestamp_string
self.raw_data_dict["binned_vals"] = OrderedDict()
self.raw_data_dict["cal_pts_zero"] = OrderedDict()
self.raw_data_dict["cal_pts_one"] = OrderedDict()
self.raw_data_dict["cal_pts_two"] = OrderedDict()
self.raw_data_dict["measured_values_I"] = OrderedDict()
self.raw_data_dict["measured_values_X"] = OrderedDict()
# [2020-07-08 Victor] don't know why is this here, seems like
# a nasty hack... will keep it to avoid braking some more stuff...
selection = a.measured_values[0] == 0
for i in range(1, len(a.measured_values)):
selection &= a.measured_values[i] == 0
invalid_idxs = np.where(selection)[0]
if len(invalid_idxs):
log.warning(
"Found zero values at {} indices!".format(len(invalid_idxs))
)
log.warning(invalid_idxs[:10])
a.measured_values[:, invalid_idxs] = np.array(
[[np.nan] * len(invalid_idxs)] * len(a.value_names)
)
zero_idxs = np.where(self.cal_pnts_in_dset == "0")[0] - num_cal_pnts
one_idxs = np.where(self.cal_pnts_in_dset == "1")[0] - num_cal_pnts
two_idxs = np.where(self.cal_pnts_in_dset == "2")[0] - num_cal_pnts
for i, val_name in enumerate(a.value_names):
binned_yvals = np.reshape(
a.measured_values[i], (len(bins), -1), order="F"
)
self.raw_data_dict["binned_vals"][val_name] = binned_yvals
vlns = a.value_names
if val_name in (
vlns[self.rates_I_quad_ch_idx],
vlns[self.rates_Q_quad_ch_idx],
):
self.raw_data_dict["cal_pts_zero"][val_name] = binned_yvals[
zero_idxs, :
].flatten()
self.raw_data_dict["cal_pts_one"][val_name] = binned_yvals[
one_idxs, :
].flatten()
if self.ignore_f_cal_pts:
self.raw_data_dict["cal_pts_two"][
val_name
] = self.raw_data_dict["cal_pts_one"][val_name]
else:
self.raw_data_dict["cal_pts_two"][val_name] = binned_yvals[
two_idxs, :
].flatten()
self.raw_data_dict["measured_values_I"][val_name] = binned_yvals[
:-num_cal_pnts:2, :
]
self.raw_data_dict["measured_values_X"][val_name] = binned_yvals[
1:-num_cal_pnts:2, :
]
else:
bins = None
self.raw_data_dict["folder"] = a.folder
self.raw_data_dict["timestamps"] = self.timestamps
a.finish() # closes data file
def process_data(self):
rdd = self.raw_data_dict
self.proc_data_dict = deepcopy(rdd)
pdd = self.proc_data_dict
for key in [
"V0",
"V1",
"V2",
"SI",
"SI_corr",
"SX",
"SX_corr",
"P0",
"P1",
"P2",
"M_inv",
"M0",
"X1",
]:
# Nesting dictionaries allows to generate all this quantities
# for different qubits by just running the analysis several times
# with different rates_I_quad_ch_idx and cal points
pdd[key] = OrderedDict()
val_name_I = rdd["value_names"][self.rates_I_quad_ch_idx]
val_name_Q = rdd["value_names"][self.rates_Q_quad_ch_idx]
V0_I = np.nanmean(rdd["cal_pts_zero"][val_name_I])
V1_I = np.nanmean(rdd["cal_pts_one"][val_name_I])
V2_I = np.nanmean(rdd["cal_pts_two"][val_name_I])
V0_Q = np.nanmean(rdd["cal_pts_zero"][val_name_Q])
V1_Q = np.nanmean(rdd["cal_pts_one"][val_name_Q])
V2_Q = np.nanmean(rdd["cal_pts_two"][val_name_Q])
pdd["V0"][val_name_I] = V0_I
pdd["V1"][val_name_I] = V1_I
pdd["V2"][val_name_I] = V2_I
pdd["V0"][val_name_Q] = V0_Q
pdd["V1"][val_name_Q] = V1_Q
pdd["V2"][val_name_Q] = V2_Q
SI_I = np.nanmean(rdd["measured_values_I"][val_name_I], axis=1)
SX_I = np.nanmean(rdd["measured_values_X"][val_name_I], axis=1)
SI_Q = np.nanmean(rdd["measured_values_I"][val_name_Q], axis=1)
SX_Q = np.nanmean(rdd["measured_values_X"][val_name_Q], axis=1)
pdd["SI"][val_name_I] = SI_I
pdd["SX"][val_name_I] = SX_I
pdd["SI"][val_name_Q] = SI_Q
pdd["SX"][val_name_Q] = SX_Q
cal_triangle = np.array([[V0_I, V0_Q], [V1_I, V1_Q], [V2_I, V2_Q]])
pdd["cal_triangle"] = cal_triangle
# [2020-07-11 Victor]
# Here we correct for the cases when the measured points fall outside
# the triangle of the calibration points, such a case breaks the
# assumptions that S = V0 * P0 + V1 * P1 + V2 * P2
SI_I_corr, SI_Q_corr = geo.constrain_to_triangle(cal_triangle, SI_I, SI_Q)
SX_I_corr, SX_Q_corr = geo.constrain_to_triangle(cal_triangle, SX_I, SX_Q)
pdd["SI_corr"][val_name_I] = SI_I_corr
pdd["SX_corr"][val_name_I] = SX_I_corr
pdd["SI_corr"][val_name_Q] = SI_Q_corr
pdd["SX_corr"][val_name_Q] = SX_Q_corr
P0, P1, P2, M_inv = populations_using_rate_equations(
SI_I_corr + 1j * SI_Q_corr,
SX_I_corr + 1j * SX_Q_corr,
V0_I + 1j * V0_Q,
V1_I + 1j * V1_Q,
V2_I + 1j * V2_Q,
)
# There might be other qubits being measured at some point so we keep
# the results with the I quadrature label
pdd["P0"][val_name_I] = P0
pdd["P1"][val_name_I] = P1
pdd["P2"][val_name_I] = P2
pdd["M_inv"][val_name_I] = M_inv
# [2020-07-09 Victor] This is not being used for anything...
# classifier = logisticreg_classifier_machinelearning(
# pdd["cal_pts_zero"],
# pdd["cal_pts_one"],
# pdd["cal_pts_two"],
# )
# pdd["classifier"] = classifier
if self.classification_method == "rates":
pdd["M0"][val_name_I] = P0
pdd["X1"][val_name_I] = 1 - P2
else:
raise NotImplementedError()
def run_fitting(self, fit_input_tag: str = None):
"""
Args:
fit_input_tag (str): allows to fit specific M0 and X1
intended for use in 2Q RBs
"""
super().run_fitting()
rdd = self.raw_data_dict
pdd = self.proc_data_dict
if fit_input_tag is None:
# Default value for single qubit RB analysis
fit_input_tag = rdd["value_names"][self.rates_I_quad_ch_idx]
leak_mod = lmfit.Model(leak_decay, independent_vars="m")
leak_mod.set_param_hint("A", value=0.95, min=0, vary=True)
leak_mod.set_param_hint("B", value=0.1, min=0, vary=True)
leak_mod.set_param_hint("lambda_1", value=0.99, vary=True)
leak_mod.set_param_hint("L1", expr="(1-A)*(1-lambda_1)")
leak_mod.set_param_hint("L2", expr="A*(1-lambda_1)")
leak_mod.set_param_hint("L1_cz", expr="1-(1-(1-A)*(1-lambda_1))**(1/1.5)")
leak_mod.set_param_hint("L2_cz", expr="1-(1-(A*(1-lambda_1)))**(1/1.5)")
params = leak_mod.make_params()
try:
fit_res_leak = leak_mod.fit(
data=pdd["X1"][fit_input_tag], m=pdd["ncl"], params=params,
)
self.fit_res["leakage_decay_" + fit_input_tag] = fit_res_leak
lambda_1 = fit_res_leak.best_values["lambda_1"]
L1 = fit_res_leak.params["L1"].value
except Exception as e:
log.warning("Fitting {} failed!".format("leakage_decay"))
log.warning(e)
lambda_1 = 1
L1 = 0
self.fit_res["leakage_decay_" + fit_input_tag] = {}
fit_res_rb = self.fit_rb_decay(
fit_input_tag, lambda_1=lambda_1, L1=L1, simple=False
)
self.fit_res["rb_decay_" + fit_input_tag] = fit_res_rb
fit_res_rb_simple = self.fit_rb_decay(
fit_input_tag, lambda_1=1, L1=0, simple=True
)
self.fit_res["rb_decay_simple_" + fit_input_tag] = fit_res_rb_simple
def safe_get_par_from_fit_result(fit_res, par_name):
"""
Ensures an `lmfit.Parameter` is always returned even when the fit
failed and an empty dict is provided
"""
if fit_res: # Check for empty dict
params = fit_res.params
par = params[par_name]
else:
par = lmfit.Parameter(par_name)
par.value = np.NaN
par.stderr = np.NaN
return par
fr_rb_dict = self.fit_res["rb_decay_" + fit_input_tag]
eps = safe_get_par_from_fit_result(fr_rb_dict, "eps")
fr_rb_simple_dict = self.fit_res["rb_decay_simple_" + fit_input_tag]
eps_simple = safe_get_par_from_fit_result(fr_rb_simple_dict, "eps")
fr_dec = self.fit_res["leakage_decay_" + fit_input_tag]
L1 = safe_get_par_from_fit_result(fr_dec, "L1")
L2 = safe_get_par_from_fit_result(fr_dec, "L2")
text_msg = "Summary: \n"
text_msg += format_value_string(
r"$\epsilon_{{\mathrm{{simple}}}}$", eps_simple, "\n"
)
text_msg += format_value_string(r"$\epsilon_{{\chi_1}}$", eps, "\n")
text_msg += format_value_string(r"$L_1$", L1, "\n")
text_msg += format_value_string(r"$L_2$", L2, "\n")
pdd["rb_msg_" + fit_input_tag] = text_msg
pdd["quantities_of_interest"] = {}
qoi = pdd["quantities_of_interest"]
qoi["eps_simple_" + fit_input_tag] = ufloat(
eps_simple.value, eps_simple.stderr or np.NaN
)
qoi["eps_X1_" + fit_input_tag] = ufloat(eps.value, eps.stderr or np.NaN)
qoi["L1_" + fit_input_tag] = ufloat(L1.value, L1.stderr or np.NaN)
qoi["L2_" + fit_input_tag] = ufloat(L2.value, L2.stderr or np.NaN)
def fit_rb_decay(
self, val_name: str, lambda_1: float, L1: float, simple: bool = False
):
"""
Fits the data
"""
pdd = self.proc_data_dict
fit_mod_rb = lmfit.Model(full_rb_decay, independent_vars="m")
fit_mod_rb.set_param_hint("A", value=0.5, min=0, vary=True)
if simple:
fit_mod_rb.set_param_hint("B", value=0, vary=False)
else:
fit_mod_rb.set_param_hint("B", value=0.1, min=0, vary=True)
fit_mod_rb.set_param_hint("C", value=0.4, min=0, max=1, vary=True)
fit_mod_rb.set_param_hint("lambda_1", value=lambda_1, vary=False)
fit_mod_rb.set_param_hint("lambda_2", value=0.95, vary=True)
# d1 = dimensionality of computational subspace
fit_mod_rb.set_param_hint("d1", value=self.d1, vary=False)
fit_mod_rb.set_param_hint("L1", value=L1, vary=False)
# Note that all derived quantities are expressed directly in
fit_mod_rb.set_param_hint("F", expr="1/d1*((d1-1)*lambda_2+1-L1)", vary=True)
fit_mod_rb.set_param_hint("eps", expr="1-(1/d1*((d1-1)*lambda_2+1-L1))")
# Only valid for single qubit RB assumption equal error rates
fit_mod_rb.set_param_hint(
"F_g", expr="(1/d1*((d1-1)*lambda_2+1-L1))**(1/1.875)"
)
fit_mod_rb.set_param_hint(
"eps_g", expr="1-(1/d1*((d1-1)*lambda_2+1-L1))**(1/1.875)"
)
# Only valid for two qubit RB assumption all error in CZ
fit_mod_rb.set_param_hint("F_cz", expr="(1/d1*((d1-1)*lambda_2+1-L1))**(1/1.5)")
fit_mod_rb.set_param_hint(
"eps_cz", expr="1-(1/d1*((d1-1)*lambda_2+1-L1))**(1/1.5)"
)
params = fit_mod_rb.make_params()
try:
fit_res_rb = fit_mod_rb.fit(
data=pdd["M0"][val_name], m=pdd["ncl"], params=params
)
except Exception as e:
log.warning("Fitting failed!")
log.warning(e)
fit_res_rb = {}
return fit_res_rb
def prepare_plots(self, fit_input_tag: str = None):
"""
Args:
fit_input_tag (str): allows to fit specific M0 and X1
intended for use in 2Q RBs
"""
rdd = self.raw_data_dict
pdd = self.proc_data_dict
if fit_input_tag is None:
val_name_I = rdd["value_names"][self.rates_I_quad_ch_idx]
fit_input_tag = val_name_I
val_names = rdd["value_names"]
for i, val_name in enumerate(val_names):
self.plot_dicts["binned_data_{}".format(val_name)] = {
"plotfn": self.plot_line,
"xvals": rdd["bins"],
"yvals": np.nanmean(rdd["binned_vals"][val_name], axis=1),
"yerr": sem(rdd["binned_vals"][val_name], axis=1),
"xlabel": "Number of Cliffords",
"xunit": "#",
"ylabel": val_name,
"yunit": rdd["value_units"][i],
"title": rdd["timestamp_string"] + "\n" + rdd["measurementstring"],
}
fs = plt.rcParams["figure.figsize"]
fig_id_hex = "cal_points_hexbin_{}".format(val_name_I)
self.plot_dicts[fig_id_hex] = {
"plotfn": plot_cal_points_hexbin,
"shots_0": (
rdd["cal_pts_zero"][val_names[self.rates_I_quad_ch_idx]],
rdd["cal_pts_zero"][val_names[self.rates_Q_quad_ch_idx]],
),
"shots_1": (
rdd["cal_pts_one"][val_names[self.rates_I_quad_ch_idx]],
rdd["cal_pts_one"][val_names[self.rates_Q_quad_ch_idx]],
),
"shots_2": (
rdd["cal_pts_two"][val_names[self.rates_I_quad_ch_idx]],
rdd["cal_pts_two"][val_names[self.rates_Q_quad_ch_idx]],
),
"xlabel": val_names[self.rates_I_quad_ch_idx],
"xunit": rdd["value_units"][0],
"ylabel": val_names[self.rates_Q_quad_ch_idx],
"yunit": rdd["value_units"][1],
"title": rdd["timestamp_string"]
+ "\n"
+ rdd["measurementstring"]
+ " hexbin plot",
"plotsize": (fs[0] * 1.5, fs[1]),
}
num_cal_pnts = len(pdd["cal_triangle"])
fig_id_RB_on_IQ = "rb_on_iq_{}".format(val_name_I)
for ax_id in [fig_id_hex, fig_id_RB_on_IQ]:
self.plot_dicts[ax_id + "_cal_pnts"] = {
"plotfn": self.plot_line,
"ax_id": ax_id,
"xvals": pdd["cal_triangle"].T[0].reshape(num_cal_pnts, 1),
"yvals": pdd["cal_triangle"].T[1].reshape(num_cal_pnts, 1),
"setlabel": [
r"V$_{\left |" + str(i) + r"\right >}$"
for i in range(num_cal_pnts)
],
"marker": "d",
"line_kws": {"markersize": 14, "markeredgecolor": "white"},
"do_legend": True,
# "legend_title": "Calibration points",
"legend_ncol": 3,
"linestyle": "",
}
# define figure and axes here to have custom layout
self.figs[fig_id_RB_on_IQ], axs = plt.subplots(
ncols=2, figsize=(fs[0] * 2.0, fs[1])
)
self.figs[fig_id_RB_on_IQ].patch.set_alpha(0)
self.axs[fig_id_RB_on_IQ] = axs[0]
fig_id_RB_on_IQ_det = fig_id_RB_on_IQ + "_detailed"
self.axs[fig_id_RB_on_IQ_det] = axs[1]
axs[1].yaxis.set_label_position("right")
axs[1].yaxis.tick_right()
close_triangle = list(range(num_cal_pnts)) + [0]
self.plot_dicts[fig_id_RB_on_IQ] = {
"ax_id": fig_id_RB_on_IQ,
"plotfn": self.plot_line,
"xvals": pdd["cal_triangle"].T[0][close_triangle],
"yvals": pdd["cal_triangle"].T[1][close_triangle],
"xlabel": val_names[self.rates_I_quad_ch_idx],
"xunit": rdd["value_units"][0],
"ylabel": val_names[self.rates_Q_quad_ch_idx],
"yunit": rdd["value_units"][1],
"title": rdd["timestamp_string"]
+ "\n"
+ rdd["measurementstring"]
+ " hexbin plot",
"marker": "",
"color": "black",
"line_kws": {"linewidth": 1},
"setlabel": "NONE",
}
self.plot_dicts[fig_id_RB_on_IQ_det] = {
"ax_id": fig_id_RB_on_IQ_det,
"plotfn": self.plot_line,
"xvals": pdd["cal_triangle"].T[0][:2],
"yvals": pdd["cal_triangle"].T[1][:2],
"xlabel": val_names[self.rates_I_quad_ch_idx],
"xunit": rdd["value_units"][0],
"ylabel": val_names[self.rates_Q_quad_ch_idx],
"yunit": rdd["value_units"][1],
"title": r"Detailed view",
"marker": "",
"color": "black",
"line_kws": {"linewidth": 1},
"setlabel": "NONE",
}
val_name_Q = rdd["value_names"][self.rates_Q_quad_ch_idx]
rb_SI = (pdd["SI"][val_name_I], pdd["SI"][val_name_Q])
rb_SX = (pdd["SX"][val_name_I], pdd["SX"][val_name_Q])
rb_SI_corr = (pdd["SI_corr"][val_name_I], pdd["SI_corr"][val_name_Q])
rb_SX_corr = (pdd["SX_corr"][val_name_I], pdd["SX_corr"][val_name_Q])
sigs = (rb_SI, rb_SI_corr, rb_SX, rb_SX_corr)
ids = ("SI", "SI_corr", "SX", "SX_corr")
labels = ("SI", "SI corrected", "SX", "SX corrected")
cols = ["royalblue", "dodgerblue", "red", "salmon"]
mks = [8, 4, 8, 4]
for ax_id, do_legend in zip(
[fig_id_RB_on_IQ, fig_id_RB_on_IQ_det], [True, False]
):
for S, col, mk_size, ID, label in zip(sigs, cols, mks, ids, labels):
self.plot_dicts[ax_id + "_{}".format(ID)] = {
"plotfn": self.plot_line,
"ax_id": ax_id,
"xvals": S[0],
"yvals": S[1],
"setlabel": label,
"marker": "o",
"line_kws": {"markersize": mk_size},
"color": col,
"do_legend": do_legend,
"legend_ncol": 3,
"linestyle": "",
}
for idx in [self.rates_I_quad_ch_idx, self.rates_Q_quad_ch_idx]:
val_name = rdd["value_names"][idx]
self.plot_dicts["raw_RB_curve_data_{}".format(val_name)] = {
"plotfn": plot_raw_RB_curve,
"ncl": pdd["ncl"],
"SI": pdd["SI"][val_name],
"SX": pdd["SX"][val_name],
"V0": pdd["V0"][val_name],
"V1": pdd["V1"][val_name],
"V2": pdd["V2"][val_name],
"xlabel": "Number of Cliffords",
"xunit": "#",
"ylabel": val_name,
"yunit": pdd["value_units"][idx],
"title": pdd["timestamp_string"] + "\n" + pdd["measurementstring"],
}
self.plot_dicts["rb_rate_eq_pops_{}".format(val_name_I)] = {
"plotfn": plot_populations_RB_curve,
"ncl": pdd["ncl"],
"P0": pdd["P0"][val_name_I],
"P1": pdd["P1"][val_name_I],
"P2": pdd["P2"][val_name_I],
"title": pdd["timestamp_string"]
+ "\n"
+ "Population using rate equations ch{}".format(val_name_I),
}
# [2020-07-09 Victor] This is not being used for anything...
# self.plot_dicts["logres_decision_bound"] = {
# "plotfn": plot_classifier_decission_boundary,
# "classifier": pdd["classifier"],
# "shots_0": (
# pdd["cal_pts_zero"][val_names[ch_idx_0]],
# pdd["cal_pts_zero"][val_names[ch_idx_1]],
# ),
# "shots_1": (
# pdd["cal_pts_one"][val_names[ch_idx_0]],
# pdd["cal_pts_one"][val_names[ch_idx_1]],
# ),
# "shots_2": (
# pdd["cal_pts_two"][val_names[ch_idx_0]],
# pdd["cal_pts_two"][val_names[ch_idx_1]],
# ),
# "xlabel": val_names[ch_idx_0],
# "xunit": pdd["value_units"][0],
# "ylabel": val_names[ch_idx_1],
# "yunit": pdd["value_units"][1],
# "title": pdd["timestamp_string"]
# + "\n"
# + pdd["measurementstring"]
# + " Decision boundary",
# "plotsize": (fs[0] * 1.5, fs[1]),
# }
# #####################################################################
# End of plots for single qubit only
# #####################################################################
if self.do_fitting:
# define figure and axes here to have custom layout
rb_fig_id = "main_rb_decay_{}".format(fit_input_tag)
leak_fig_id = "leak_decay_{}".format(fit_input_tag)
self.figs[rb_fig_id], axs = plt.subplots(
nrows=2, sharex=True, gridspec_kw={"height_ratios": (2, 1)}
)
self.figs[rb_fig_id].patch.set_alpha(0)
self.axs[rb_fig_id] = axs[0]
self.axs[leak_fig_id] = axs[1]
self.plot_dicts[rb_fig_id] = {
"plotfn": plot_rb_decay_woods_gambetta,
"ncl": pdd["ncl"],
"M0": pdd["M0"][fit_input_tag],
"X1": pdd["X1"][fit_input_tag],
"ax1": axs[1],
"title": pdd["timestamp_string"] + "\n" + pdd["measurementstring"],
}
self.plot_dicts["fit_leak"] = {
"plotfn": self.plot_fit,
"ax_id": leak_fig_id,
"fit_res": self.fit_res["leakage_decay_" + fit_input_tag],
"setlabel": "Leakage fit",
"do_legend": True,
"color": "C2",
}
self.plot_dicts["fit_rb_simple"] = {
"plotfn": self.plot_fit,
"ax_id": rb_fig_id,
"fit_res": self.fit_res["rb_decay_simple_" + fit_input_tag],
"setlabel": "Simple RB fit",
"do_legend": True,
}
self.plot_dicts["fit_rb"] = {
"plotfn": self.plot_fit,
"ax_id": rb_fig_id,
"fit_res": self.fit_res["rb_decay_" + fit_input_tag],
"setlabel": "Full RB fit",
"do_legend": True,
"color": "C2",
}
self.plot_dicts["rb_text"] = {
"plotfn": self.plot_text,
"text_string": pdd["rb_msg_" + fit_input_tag],
"xpos": 1.05,
"ypos": 0.6,
"ax_id": rb_fig_id,
"horizontalalignment": "left",
}
class RandomizedBenchmarking_TwoQubit_Analysis(
RandomizedBenchmarking_SingleQubit_Analysis
):
def __init__(
self,
t_start: str = None,
t_stop: str = None,
label="",
options_dict: dict = None,
auto=True,
close_figs=True,
classification_method="rates",
rates_I_quad_ch_idxs: list = [0, 2],
ignore_f_cal_pts: bool = False,
extract_only: bool = False,
):
if options_dict is None:
options_dict = dict()
super(RandomizedBenchmarking_SingleQubit_Analysis, self).__init__(
t_start=t_start,
t_stop=t_stop,
label=label,
options_dict=options_dict,
close_figs=close_figs,
do_fitting=True,
extract_only=extract_only,
)
self.d1 = 4
self.rates_I_quad_ch_idxs = rates_I_quad_ch_idxs
# used to determine how to determine 2nd excited state population
self.classification_method = classification_method
# The interleaved analysis does a bit of nasty things and this becomes
# necessary
self.overwrite_qois = True
if auto:
self.run_analysis()
def extract_data(self):
"""
Custom data extraction for this specific experiment.
"""
self.raw_data_dict = OrderedDict()
# We run the single qubit analysis twice for each qubit
# It will generate all the quantities we want for each qubit
cal_2Q = ["00", "01", "10", "11", "02", "20", "22"]
rates_I_quad_ch_idx = self.rates_I_quad_ch_idxs[0]
cal_1Q = [state[rates_I_quad_ch_idx // 2] for state in cal_2Q]
a_q0 = RandomizedBenchmarking_SingleQubit_Analysis(
t_start=self.t_start,
rates_I_quad_ch_idx=rates_I_quad_ch_idx,
cal_pnts_in_dset=cal_1Q,
do_fitting=False,
extract_only=self.extract_only,
)
rates_I_quad_ch_idx = self.rates_I_quad_ch_idxs[1]
cal_1Q = [state[rates_I_quad_ch_idx // 2] for state in cal_2Q]
a_q1 = RandomizedBenchmarking_SingleQubit_Analysis(
t_start=self.t_start,
rates_I_quad_ch_idx=rates_I_quad_ch_idx,
cal_pnts_in_dset=cal_1Q,
do_fitting=False,
extract_only=self.extract_only,
)
# Upwards and downwards hierarchical compatibilities
rdd = self.raw_data_dict
self.timestamps = a_q0.timestamps
rdd["analyses"] = {"q0": a_q0, "q1": a_q1}
rdd["folder"] = a_q0.raw_data_dict["folder"]
rdd["timestamps"] = a_q0.raw_data_dict["timestamps"]
rdd["timestamp_string"] = a_q0.raw_data_dict["timestamp_string"]
rdd["measurementstring"] = a_q1.raw_data_dict["measurementstring"]
def process_data(self):
self.proc_data_dict = OrderedDict()
pdd = self.proc_data_dict
for key in ["M0", "X1"]:
# Keeping it compatible with 1Q on purpose
pdd[key] = OrderedDict()
rdd = self.raw_data_dict
pdd["folder"] = rdd["folder"]
pdd["timestamps"] = rdd["timestamps"]
pdd["timestamp_string"] = rdd["timestamp_string"]
pdd["measurementstring"] = rdd["measurementstring"]
val_names = rdd["analyses"]["q0"].raw_data_dict["value_names"]
if self.classification_method == "rates":
val_name_q0 = val_names[self.rates_I_quad_ch_idxs[0]]
val_name_q1 = val_names[self.rates_I_quad_ch_idxs[1]]
fit_input_tag = "2Q"
self.proc_data_dict["M0"][fit_input_tag] = (
rdd["analyses"]["q0"].proc_data_dict["P0"][val_name_q0]
* rdd["analyses"]["q1"].proc_data_dict["P0"][val_name_q1]
)
self.proc_data_dict["X1"][fit_input_tag] = (
1
- rdd["analyses"]["q0"].proc_data_dict["P2"][val_name_q0]
- rdd["analyses"]["q1"].proc_data_dict["P2"][val_name_q1]
)
else:
raise NotImplementedError()
# Required for the plotting in super()
pdd["ncl"] = rdd["analyses"]["q0"].raw_data_dict["ncl"]
def run_fitting(self):
# Call the prepare plots of the class above
fit_input_tag = "2Q"
super().run_fitting(fit_input_tag=fit_input_tag)
def prepare_plots(self):
# Call the prepare plots of the class above
fit_input_tag = "2Q"
super().prepare_plots(fit_input_tag=fit_input_tag)
class UnitarityBenchmarking_TwoQubit_Analysis(
RandomizedBenchmarking_SingleQubit_Analysis
):
def __init__(
self,
t_start: str = None,
t_stop: str = None,
label="",
options_dict: dict = None,
auto=True,
close_figs=True,
classification_method="rates",
rates_ch_idxs: list = [0, 2],
ignore_f_cal_pts: bool = False,
nseeds: int = None,
**kwargs
):
"""Analysis for unitarity benchmarking.
This analysis is based on
"""
log.error(
"[2020-07-12 Victor] This analysis requires to be "
"upgraded to the new version of the 1Q-RB analysis."
)
if nseeds is None:
raise TypeError("You must specify number of seeds!")
self.nseeds = nseeds
if options_dict is None:
options_dict = dict()
super(RandomizedBenchmarking_SingleQubit_Analysis, self).__init__(
t_start=t_start,
t_stop=t_stop,
label=label,
options_dict=options_dict,
close_figs=close_figs,
do_fitting=True,
**kwargs
)
self.d1 = 4
# used to determine how to determine 2nd excited state population
self.classification_method = classification_method
self.rates_ch_idxs = rates_ch_idxs
self.ignore_f_cal_pts = ignore_f_cal_pts
if auto:
self.run_analysis()
def extract_data(self):
"""Custom data extraction for Unitarity benchmarking.
To determine the unitarity data is acquired in different bases.
This method extracts that data and puts it in specific bins.
"""
self.raw_data_dict = OrderedDict()
self.timestamps = a_tools.get_timestamps_in_range(
self.t_start, self.t_stop, label=self.labels
)
a = ma_old.MeasurementAnalysis(
timestamp=self.timestamps[0], auto=False, close_file=False
)
a.get_naming_and_values()
if "bins" in a.data_file["Experimental Data"]["Experimental Metadata"].keys():
bins = a.data_file["Experimental Data"]["Experimental Metadata"]["bins"][()]
self.raw_data_dict["ncl"] = bins[:-7:10] # 7 calibration points
self.raw_data_dict["bins"] = bins
self.raw_data_dict["value_names"] = a.value_names
self.raw_data_dict["value_units"] = a.value_units
self.raw_data_dict["measurementstring"] = a.measurementstring
self.raw_data_dict["timestamp_string"] = a.timestamp_string
self.raw_data_dict["binned_vals"] = OrderedDict()
self.raw_data_dict["cal_pts_x0"] = OrderedDict()
self.raw_data_dict["cal_pts_x1"] = OrderedDict()
self.raw_data_dict["cal_pts_x2"] = OrderedDict()
self.raw_data_dict["cal_pts_0x"] = OrderedDict()
self.raw_data_dict["cal_pts_1x"] = OrderedDict()
self.raw_data_dict["cal_pts_2x"] = OrderedDict()
self.raw_data_dict["measured_values_ZZ"] = OrderedDict()
self.raw_data_dict["measured_values_XZ"] = OrderedDict()
self.raw_data_dict["measured_values_YZ"] = OrderedDict()
self.raw_data_dict["measured_values_ZX"] = OrderedDict()
self.raw_data_dict["measured_values_XX"] = OrderedDict()
self.raw_data_dict["measured_values_YX"] = OrderedDict()
self.raw_data_dict["measured_values_ZY"] = OrderedDict()
self.raw_data_dict["measured_values_XY"] = OrderedDict()
self.raw_data_dict["measured_values_YY"] = OrderedDict()
self.raw_data_dict["measured_values_mZmZ"] = OrderedDict()
for i, val_name in enumerate(a.value_names):
invalid_idxs = np.where(
(a.measured_values[0] == 0)
& (a.measured_values[1] == 0)
& (a.measured_values[2] == 0)
& (a.measured_values[3] == 0)
)[0]
a.measured_values[:, invalid_idxs] = np.array(
[[np.nan] * len(invalid_idxs)] * 4
)
binned_yvals = np.reshape(
a.measured_values[i], (len(bins), -1), order="F"
)
self.raw_data_dict["binned_vals"][val_name] = binned_yvals
# 7 cal points: [00, 01, 10, 11, 02, 20, 22]
# col_idx: [-7, -6, -5, -4, -3, -2, -1]
self.raw_data_dict["cal_pts_x0"][val_name] = binned_yvals[
(-7, -5), :
].flatten()
self.raw_data_dict["cal_pts_x1"][val_name] = binned_yvals[
(-6, -4), :
].flatten()
self.raw_data_dict["cal_pts_x2"][val_name] = binned_yvals[
(-3, -1), :
].flatten()
self.raw_data_dict["cal_pts_0x"][val_name] = binned_yvals[
(-7, -6), :
].flatten()
self.raw_data_dict["cal_pts_1x"][val_name] = binned_yvals[
(-5, -4), :
].flatten()
self.raw_data_dict["cal_pts_2x"][val_name] = binned_yvals[
(-2, -1), :
].flatten()
self.raw_data_dict["measured_values_ZZ"][val_name] = binned_yvals[
0:-7:10, :
]
self.raw_data_dict["measured_values_XZ"][val_name] = binned_yvals[
1:-7:10, :
]
self.raw_data_dict["measured_values_YZ"][val_name] = binned_yvals[
2:-7:10, :
]
self.raw_data_dict["measured_values_ZX"][val_name] = binned_yvals[
3:-7:10, :
]
self.raw_data_dict["measured_values_XX"][val_name] = binned_yvals[
4:-7:10, :
]
self.raw_data_dict["measured_values_YX"][val_name] = binned_yvals[
5:-7:10, :
]
self.raw_data_dict["measured_values_ZY"][val_name] = binned_yvals[
6:-7:10, :
]
self.raw_data_dict["measured_values_XY"][val_name] = binned_yvals[
7:-7:10, :
]
self.raw_data_dict["measured_values_YY"][val_name] = binned_yvals[
8:-7:10, :
]
self.raw_data_dict["measured_values_mZmZ"][val_name] = binned_yvals[
9:-7:10, :
]
else:
bins = None
self.raw_data_dict["folder"] = a.folder
self.raw_data_dict["timestamps"] = self.timestamps
a.finish() # closes data file
def process_data(self):
"""Averages shot data and calculates unitarity from raw_data_dict.
Note: this doe not correct the outcomes for leakage.
"""
self.proc_data_dict = deepcopy(self.raw_data_dict)
keys = [
"Vx0",
"V0x",
"Vx1",
"V1x",
"Vx2",
"V2x",
"SI",
"SX",
"Px0",
"P0x",
"Px1",
"P1x",
"Px2",
"P2x",
"M_inv_q0",
"M_inv_q1",
]
keys += [
"XX",
"XY",
"XZ",
"YX",
"YY",
"YZ",
"ZX",
"ZY",
"ZZ",
"XX_sq",
"XY_sq",
"XZ_sq",
"YX_sq",
"YY_sq",
"YZ_sq",
"ZX_sq",
"ZY_sq",
"ZZ_sq",
"unitarity_shots",
"unitarity",
]
keys += [
"XX_q0",
"XY_q0",
"XZ_q0",
"YX_q0",
"YY_q0",
"YZ_q0",
"ZX_q0",
"ZY_q0",
"ZZ_q0",
]
keys += [
"XX_q1",
"XY_q1",
"XZ_q1",
"YX_q1",
"YY_q1",
"YZ_q1",
"ZX_q1",
"ZY_q1",
"ZZ_q1",
]
for key in keys:
self.proc_data_dict[key] = OrderedDict()
for val_name in self.raw_data_dict["value_names"]:
for idx in ["x0", "x1", "x2", "0x", "1x", "2x"]:
self.proc_data_dict["V{}".format(idx)][val_name] = np.nanmean(
self.raw_data_dict["cal_pts_{}".format(idx)][val_name]
)
SI = np.nanmean(self.raw_data_dict["measured_values_ZZ"][val_name], axis=1)
SX = np.nanmean(
self.raw_data_dict["measured_values_mZmZ"][val_name], axis=1
)
self.proc_data_dict["SI"][val_name] = SI
self.proc_data_dict["SX"][val_name] = SX
Px0, Px1, Px2, M_inv_q0 = populations_using_rate_equations(
SI,
SX,
self.proc_data_dict["Vx0"][val_name],
self.proc_data_dict["Vx1"][val_name],
self.proc_data_dict["Vx2"][val_name],
)
P0x, P1x, P2x, M_inv_q1 = populations_using_rate_equations(
SI,
SX,
self.proc_data_dict["V0x"][val_name],
self.proc_data_dict["V1x"][val_name],
self.proc_data_dict["V2x"][val_name],
)
for key, val in [
("Px0", Px0),
("Px1", Px1),
("Px2", Px2),
("P0x", P0x),
("P1x", P1x),
("P2x", P2x),
("M_inv_q0", M_inv_q0),
("M_inv_q1", M_inv_q1),
]:
self.proc_data_dict[key][val_name] = val
for key in ["XX", "XY", "XZ", "YX", "YY", "YZ", "ZX", "ZY", "ZZ"]:
Vmeas = self.raw_data_dict["measured_values_" + key][val_name]
Px2 = self.proc_data_dict["Px2"][val_name]
V0 = self.proc_data_dict["Vx0"][val_name]
V1 = self.proc_data_dict["Vx1"][val_name]
V2 = self.proc_data_dict["Vx2"][val_name]
val = Vmeas + 0 # - (Px2*V2 - (1-Px2)*V1)[:,None]
val -= V1
val /= V0 - V1
val = np.mean(np.reshape(val, (val.shape[0], self.nseeds, -1)), axis=2)
self.proc_data_dict[key + "_q0"][val_name] = val * 2 - 1
P2x = self.proc_data_dict["P2x"][val_name]
V0 = self.proc_data_dict["V0x"][val_name]
V1 = self.proc_data_dict["V1x"][val_name]
# Leakage is ignored in this analysis.
# V2 = self.proc_data_dict['V2x'][val_name]
val = Vmeas + 0 # - (P2x*V2 - (1-P2x)*V1)[:,None]
val -= V1
val /= V0 - V1
val = np.mean(np.reshape(val, (val.shape[0], self.nseeds, -1)), axis=2)
self.proc_data_dict[key + "_q1"][val_name] = val * 2 - 1
if self.classification_method == "rates":
val_name_q0 = self.raw_data_dict["value_names"][self.rates_ch_idxs[0]]
val_name_q1 = self.raw_data_dict["value_names"][self.rates_ch_idxs[1]]
self.proc_data_dict["M0"] = (
self.proc_data_dict["Px0"][val_name_q0]
* self.proc_data_dict["P0x"][val_name_q1]
)
self.proc_data_dict["X1"] = (
1
- self.proc_data_dict["Px2"][val_name_q0]
- self.proc_data_dict["P2x"][val_name_q1]
)
# The unitarity is calculated here.
self.proc_data_dict["unitarity_shots"] = (
self.proc_data_dict["ZZ_q0"][val_name_q0] * 0
)
# Unitarity according to Eq. (10) Wallman et al. New J. Phys. 2015
# Pj = d/(d-1)*|n(rho_j)|^2
# Note that the dimensionality prefix is ignored here as it
# should drop out in the fits.
for key in ["XX", "XY", "XZ", "YX", "YY", "YZ", "ZX", "ZY", "ZZ"]:
self.proc_data_dict[key] = (
self.proc_data_dict[key + "_q0"][val_name_q0]
* self.proc_data_dict[key + "_q1"][val_name_q1]
)
self.proc_data_dict[key + "_sq"] = self.proc_data_dict[key] ** 2
self.proc_data_dict["unitarity_shots"] += self.proc_data_dict[
key + "_sq"
]
self.proc_data_dict["unitarity"] = np.mean(
self.proc_data_dict["unitarity_shots"], axis=1
)
else:
raise NotImplementedError()
def run_fitting(self):
super().run_fitting()
self.fit_res["unitarity_decay"] = self.fit_unitarity_decay()
unitarity_dec = self.fit_res["unitarity_decay"].params
text_msg = "Summary: \n"
text_msg += format_value_string(
"Unitarity\n" + r"$u$", unitarity_dec["u"], "\n"
)
text_msg += format_value_string(
"Error due to\nincoherent mechanisms\n" + r"$\epsilon$",
unitarity_dec["eps"],
)
self.proc_data_dict["unitarity_msg"] = text_msg
def fit_unitarity_decay(self):
"""Fits the data using the unitarity model."""
fit_mod_unitarity = lmfit.Model(unitarity_decay, independent_vars="m")
fit_mod_unitarity.set_param_hint("A", value=0.1, min=0, max=1, vary=True)
fit_mod_unitarity.set_param_hint("B", value=0.8, min=0, max=1, vary=True)
fit_mod_unitarity.set_param_hint("u", value=0.9, min=0, max=1, vary=True)
fit_mod_unitarity.set_param_hint("d1", value=self.d1, vary=False)
# Error due to incoherent sources
# Feng Phys. Rev. Lett. 117, 260501 (2016) eq. (4)
fit_mod_unitarity.set_param_hint("eps", expr="((d1-1)/d1)*(1-u**0.5)")
params = fit_mod_unitarity.make_params()
fit_mod_unitarity = fit_mod_unitarity.fit(
data=self.proc_data_dict["unitarity"],
m=self.proc_data_dict["ncl"],
params=params,
)
return fit_mod_unitarity
def prepare_plots(self):
val_names = self.proc_data_dict["value_names"]
for i, val_name in enumerate(val_names):
self.plot_dicts["binned_data_{}".format(val_name)] = {
"plotfn": self.plot_line,
"xvals": self.proc_data_dict["bins"],
"yvals": np.nanmean(
self.proc_data_dict["binned_vals"][val_name], axis=1
),
"yerr": sem(self.proc_data_dict["binned_vals"][val_name], axis=1),
"xlabel": "Number of Cliffords",
"xunit": "#",
"ylabel": val_name,
"yunit": self.proc_data_dict["value_units"][i],
"title": self.proc_data_dict["timestamp_string"]
+ "\n"
+ self.proc_data_dict["measurementstring"],
}
fs = plt.rcParams["figure.figsize"]
# define figure and axes here to have custom layout
self.figs["rb_populations_decay"], axs = plt.subplots(
ncols=2, sharex=True, sharey=True, figsize=(fs[0] * 1.5, fs[1])
)
self.figs["rb_populations_decay"].suptitle(
self.proc_data_dict["timestamp_string"]
+ "\n"
+ "Population using rate equations",
y=1.05,
)
self.figs["rb_populations_decay"].patch.set_alpha(0)
self.axs["rb_pops_q0"] = axs[0]
self.axs["rb_pops_q1"] = axs[1]
val_name_q0 = val_names[self.rates_ch_idxs[0]]
val_name_q1 = val_names[self.rates_ch_idxs[1]]
self.plot_dicts["rb_rate_eq_pops_{}".format(val_name_q0)] = {
"plotfn": plot_populations_RB_curve,
"ncl": self.proc_data_dict["ncl"],
"P0": self.proc_data_dict["Px0"][val_name_q0],
"P1": self.proc_data_dict["Px1"][val_name_q0],
"P2": self.proc_data_dict["Px2"][val_name_q0],
"title": " {}".format(val_name_q0),
"ax_id": "rb_pops_q0",
}
self.plot_dicts["rb_rate_eq_pops_{}".format(val_name_q1)] = {
"plotfn": plot_populations_RB_curve,
"ncl": self.proc_data_dict["ncl"],
"P0": self.proc_data_dict["P0x"][val_name_q1],
"P1": self.proc_data_dict["P1x"][val_name_q1],
"P2": self.proc_data_dict["P2x"][val_name_q1],
"title": " {}".format(val_name_q1),
"ax_id": "rb_pops_q1",
}
self.plot_dicts["cal_points_hexbin_q0"] = {
"plotfn": plot_cal_points_hexbin,
"shots_0": (
self.proc_data_dict["cal_pts_x0"][val_names[0]],
self.proc_data_dict["cal_pts_x0"][val_names[1]],
),
"shots_1": (
self.proc_data_dict["cal_pts_x1"][val_names[0]],
self.proc_data_dict["cal_pts_x1"][val_names[1]],
),
"shots_2": (
self.proc_data_dict["cal_pts_x2"][val_names[0]],
self.proc_data_dict["cal_pts_x2"][val_names[1]],
),
"xlabel": val_names[0],
"xunit": self.proc_data_dict["value_units"][0],
"ylabel": val_names[1],
"yunit": self.proc_data_dict["value_units"][1],
"common_clims": False,
"title": self.proc_data_dict["timestamp_string"]
+ "\n"
+ self.proc_data_dict["measurementstring"]
+ " hexbin plot q0",
"plotsize": (fs[0] * 1.5, fs[1]),
}
self.plot_dicts["cal_points_hexbin_q1"] = {
"plotfn": plot_cal_points_hexbin,
"shots_0": (
self.proc_data_dict["cal_pts_0x"][val_names[2]],
self.proc_data_dict["cal_pts_0x"][val_names[3]],
),
"shots_1": (
self.proc_data_dict["cal_pts_1x"][val_names[2]],
self.proc_data_dict["cal_pts_1x"][val_names[3]],
),
"shots_2": (
self.proc_data_dict["cal_pts_2x"][val_names[2]],
self.proc_data_dict["cal_pts_2x"][val_names[3]],
),
"xlabel": val_names[2],
"xunit": self.proc_data_dict["value_units"][2],
"ylabel": val_names[3],
"yunit": self.proc_data_dict["value_units"][3],
"common_clims": False,
"title": self.proc_data_dict["timestamp_string"]
+ "\n"
+ self.proc_data_dict["measurementstring"]
+ " hexbin plot q1",
"plotsize": (fs[0] * 1.5, fs[1]),
}
# define figure and axes here to have custom layout
self.figs["main_rb_decay"], axs = plt.subplots(
nrows=2, sharex=True, gridspec_kw={"height_ratios": (2, 1)}
)
self.figs["main_rb_decay"].patch.set_alpha(0)
self.axs["main_rb_decay"] = axs[0]
self.axs["leak_decay"] = axs[1]
self.plot_dicts["main_rb_decay"] = {
"plotfn": plot_rb_decay_woods_gambetta,
"ncl": self.proc_data_dict["ncl"],
"M0": self.proc_data_dict["M0"],
"X1": self.proc_data_dict["X1"],
"ax1": axs[1],
"title": self.proc_data_dict["timestamp_string"]
+ "\n"
+ self.proc_data_dict["measurementstring"],
}
self.plot_dicts["fit_leak"] = {
"plotfn": self.plot_fit,
"ax_id": "leak_decay",
"fit_res": self.fit_res["leakage_decay"],
"setlabel": "Leakage fit",
"do_legend": True,
"color": "C2",
}
self.plot_dicts["fit_rb_simple"] = {
"plotfn": self.plot_fit,
"ax_id": "main_rb_decay",
"fit_res": self.fit_res["rb_decay_simple"],
"setlabel": "Simple RB fit",
"do_legend": True,
}
self.plot_dicts["fit_rb"] = {
"plotfn": self.plot_fit,
"ax_id": "main_rb_decay",
"fit_res": self.fit_res["rb_decay"],
"setlabel": "Full RB fit",
"do_legend": True,
"color": "C2",
}
self.plot_dicts["rb_text"] = {
"plotfn": self.plot_text,
"text_string": self.proc_data_dict["rb_msg"],
"xpos": 1.05,
"ypos": 0.6,
"ax_id": "main_rb_decay",
"horizontalalignment": "left",
}
self.plot_dicts["correlated_readouts"] = {
"plotfn": plot_unitarity_shots,
"ncl": self.proc_data_dict["ncl"],
"unitarity_shots": self.proc_data_dict["unitarity_shots"],
"xlabel": "Number of Cliffords",
"xunit": "#",
"ylabel": "Unitarity",
"yunit": "",
"title": self.proc_data_dict["timestamp_string"]
+ "\n"
+ self.proc_data_dict["measurementstring"],
}
self.figs["unitarity"] = plt.subplots(nrows=1)
self.plot_dicts["unitarity"] = {
"plotfn": plot_unitarity,
"ax_id": "unitarity",
"ncl": self.proc_data_dict["ncl"],
"P": self.proc_data_dict["unitarity"],
"xlabel": "Number of Cliffords",
"xunit": "#",
"ylabel": "Unitarity",
"yunit": "frac",
"title": self.proc_data_dict["timestamp_string"]
+ "\n"
+ self.proc_data_dict["measurementstring"],
}
self.plot_dicts["fit_unitarity"] = {
"plotfn": self.plot_fit,
"ax_id": "unitarity",
"fit_res": self.fit_res["unitarity_decay"],
"setlabel": "Simple unitarity fit",
"do_legend": True,
}
self.plot_dicts["unitarity_text"] = {
"plotfn": self.plot_text,
"text_string": self.proc_data_dict["unitarity_msg"],
"xpos": 0.6,
"ypos": 0.8,
"ax_id": "unitarity",
"horizontalalignment": "left",
}
class InterleavedRandomizedBenchmarkingAnalysis(ba.BaseDataAnalysis):
"""
Analysis for two qubit interleaved randomized benchmarking of a CZ gate.
[2020-07-12 Victor] upgraded to allow for analysis of iRB for the
parked qubit during CZ on the other qubits
This is a meta-analysis. It runs
"RandomizedBenchmarking_TwoQubit_Analysis" for each of the individual
datasets in the "extract_data" method and uses the quantities of interest
to create the combined figure.
The figure as well as the quantities of interest are stored in
the interleaved data file.
"""
def __init__(
self,
ts_base: str = None,
ts_int: str = None,
ts_int_idle: str = None,
label_base: str = "",
label_int: str = "",
label_int_idle: str = "",
options_dict: dict = {},
auto=True,
close_figs=True,
rates_I_quad_ch_idxs: list = [0, 2],
ignore_f_cal_pts: bool = False,
plot_label="",
extract_only=False,
):
super().__init__(
do_fitting=True,
close_figs=close_figs,
options_dict=options_dict,
extract_only=extract_only,
)
self.ts_base = ts_base
self.ts_int = ts_int
self.ts_int_idle = ts_int_idle
self.label_base = label_base
self.label_int = label_int
self.label_int_idle = label_int_idle
self.include_idle = self.ts_int_idle or self.label_int_idle
assert ts_base or label_base
assert ts_int or label_int
self.rates_I_quad_ch_idxs = rates_I_quad_ch_idxs
self.options_dict = options_dict
self.close_figs = close_figs
self.ignore_f_cal_pts = ignore_f_cal_pts
self.plot_label = plot_label
# For other classes derived from this one this will change
self.fit_tag = "2Q"
self.int_name = "CZ"
if auto:
self.run_analysis()
def extract_data(self):
self.raw_data_dict = OrderedDict()
a_base = RandomizedBenchmarking_TwoQubit_Analysis(
t_start=self.ts_base,
label=self.label_base,
options_dict=self.options_dict,
auto=True,
close_figs=self.close_figs,
rates_I_quad_ch_idxs=self.rates_I_quad_ch_idxs,
extract_only=True,
ignore_f_cal_pts=self.ignore_f_cal_pts,
)
a_int = RandomizedBenchmarking_TwoQubit_Analysis(
t_start=self.ts_int,
label=self.label_int,
options_dict=self.options_dict,
auto=True,
close_figs=self.close_figs,
rates_I_quad_ch_idxs=self.rates_I_quad_ch_idxs,
extract_only=True,
ignore_f_cal_pts=self.ignore_f_cal_pts,
)
if self.include_idle:
a_int_idle = RandomizedBenchmarking_TwoQubit_Analysis(
t_start=self.ts_int_idle,
label=self.label_int_idle,
options_dict=self.options_dict,
auto=True,
close_figs=self.close_figs,
rates_I_quad_ch_idxs=self.rates_I_quad_ch_idxs,
extract_only=True,
ignore_f_cal_pts=self.ignore_f_cal_pts,
)
# order is such that any information (figures, quantities of interest)
# are saved in the interleaved file.
self.timestamps = [a_int.timestamps[0], a_base.timestamps[0]]
self.raw_data_dict["timestamps"] = self.timestamps
self.raw_data_dict["timestamp_string"] = a_int.proc_data_dict[
"timestamp_string"
]
self.raw_data_dict["folder"] = a_int.proc_data_dict["folder"]
a_dict = {"base": a_base, "int": a_int}
if self.include_idle:
a_dict["int_idle"] = a_int_idle
self.raw_data_dict["analyses"] = a_dict
if not self.plot_label:
self.plot_label = a_int.proc_data_dict["measurementstring"]
def process_data(self):
self.proc_data_dict = OrderedDict()
self.proc_data_dict["quantities_of_interest"] = {}
qoi = self.proc_data_dict["quantities_of_interest"]
qoi_base = self.raw_data_dict["analyses"]["base"].proc_data_dict[
"quantities_of_interest"
]
qoi_int = self.raw_data_dict["analyses"]["int"].proc_data_dict[
"quantities_of_interest"
]
self.overwrite_qois = True
qoi.update({k + "_ref": v for k, v in qoi_base.items()})
qoi.update({k + "_int": v for k, v in qoi_int.items()})
# The functionality of this analysis was extended to make it usable for
# interleaved parking idle flux pulse
fit_tag = self.fit_tag
int_name = self.int_name
qoi["eps_%s_X1" % int_name] = interleaved_error(
eps_int=qoi_int["eps_X1_%s" % fit_tag],
eps_base=qoi_base["eps_X1_%s" % fit_tag],
)
qoi["eps_%s_simple" % int_name] = interleaved_error(
eps_int=qoi_int["eps_simple_%s" % fit_tag],
eps_base=qoi_base["eps_simple_%s" % fit_tag],
)
qoi["L1_%s" % int_name] = interleaved_error(
eps_int=qoi_int["L1_%s" % fit_tag], eps_base=qoi_base["L1_%s" % fit_tag]
)
if self.include_idle:
qoi_int_idle = self.raw_data_dict["analyses"]["int_idle"].proc_data_dict[
"quantities_of_interest"
]
qoi.update({k + "_int_idle": v for k, v in qoi_int_idle.items()})
qoi["eps_idle_X1"] = interleaved_error(
eps_int=qoi_int_idle["eps_X1_%s" % fit_tag],
eps_base=qoi_base["eps_X1_%s" % fit_tag],
)
qoi["eps_idle_simple"] = interleaved_error(
eps_int=qoi_int_idle["eps_simple_%s" % fit_tag],
eps_base=qoi_base["eps_simple_%s" % fit_tag],
)
qoi["L1_idle"] = interleaved_error(
eps_int=qoi_int_idle["L1_%s" % fit_tag],
eps_base=qoi_base["L1_%s" % fit_tag],
)
if int_name == "CZ":
# This is the naive estimate, when all observed error is assigned
# to the CZ gate
try:
qoi["L1_%s_naive" % int_name] = 1 - (
1 - qoi_base["L1_%s" % fit_tag]
) ** (1 / 1.5)
qoi["eps_%s_simple_naive" % int_name] = 1 - (
1 - qoi_base["eps_simple_%s" % fit_tag]
) ** (1 / 1.5)
qoi["eps_%s_X1_naive" % int_name] = 1 - (
1 - qoi_base["eps_X1_%s" % fit_tag]
) ** (1 / 1.5)
except ValueError:
# prevents the analysis from crashing if the fits are bad.
qoi["L1_%s_naive" % int_name] = ufloat(np.NaN, np.NaN)
qoi["eps_%s_simple_naive" % int_name] = ufloat(np.NaN, np.NaN)
qoi["eps_%s_X1_naive" % int_name] = ufloat(np.NaN, np.NaN)
def prepare_plots(self):
# Might seem that are not used but there is an `eval` below
dd_ref = self.raw_data_dict["analyses"]["base"].proc_data_dict
dd_int = self.raw_data_dict["analyses"]["int"].proc_data_dict
fr_ref = self.raw_data_dict["analyses"]["base"].fit_res
fr_int = self.raw_data_dict["analyses"]["int"].fit_res
dds = {
"int": dd_int,
"ref": dd_ref,
}
frs = {
"int": fr_int,
"ref": fr_ref,
}
if self.include_idle:
fr_int_idle = self.raw_data_dict["analyses"]["int_idle"].fit_res
dd_int_idle = self.raw_data_dict["analyses"]["int_idle"].proc_data_dict
dds["int_idle"] = dd_int_idle
frs["int_idle"] = fr_int_idle
fs = plt.rcParams["figure.figsize"]
self.figs["main_irb_decay"], axs = plt.subplots(
nrows=2,
sharex=True,
gridspec_kw={"height_ratios": (2, 1)},
figsize=(fs[0] * 1.3, fs[1] * 1.3),
)
self.figs["main_irb_decay"].patch.set_alpha(0)
self.axs["main_irb_decay"] = axs[0]
self.axs["leak_decay"] = axs[1]
self.plot_dicts["main_irb_decay"] = {
"plotfn": plot_irb_decay_woods_gambetta,
"ncl": dd_ref["ncl"],
"include_idle": self.include_idle,
"fit_tag": self.fit_tag,
"int_name": self.int_name,
"qoi": self.proc_data_dict["quantities_of_interest"],
"ax1": axs[1],
"title": "{} - {}\n{}".format(
self.timestamps[0], self.timestamps[1], self.plot_label
),
}
def add_to_plot_dict(
plot_dict: dict,
tag: str,
dd_quantities: list,
fit_quantities: list,
dds: dict,
frs: dict,
):
for dd_q in dd_quantities:
plot_dict[dd_q + "_" + tag] = dds[tag][dd_q][self.fit_tag]
for fit_q in fit_quantities:
trans = {
"rb_decay": "fr_M0",
"rb_decay_simple": "fr_M0_simple",
"leakage_decay": "fr_X1",
}
plot_dict[trans[fit_q] + "_" + tag] = frs[tag][
fit_q + "_{}".format(self.fit_tag)
]
tags = ["ref", "int"]
if self.include_idle:
tags.append("int_idle")
for tag in tags:
add_to_plot_dict(
self.plot_dicts["main_irb_decay"],
tag=tag,
dd_quantities=["M0", "X1"],
fit_quantities=["rb_decay", "rb_decay_simple", "leakage_decay"],
dds=dds,
frs=frs,
)
class InterleavedRandomizedBenchmarkingParkingAnalysis(
InterleavedRandomizedBenchmarkingAnalysis, ba.BaseDataAnalysis
):
"""
Analysis for single qubit interleaved randomized benchmarking where the
interleaved gate is a parking identity (with the corresponding CZ being
applied on the other two qubits)
This is a meta-analysis. It runs
"RandomizedBenchmarking_SingleQubit_Analysis" for each of the individual
datasets in the "extract_data" method and uses the quantities of interest
to create the combined figure.
The figure as well as the quantities of interest are stored in
the interleaved data file.
"""
def __init__(
self,
ts_base: str = None,
ts_int: str = None,
label_base: str = "",
label_int: str = "",
options_dict: dict = {},
auto=True,
close_figs=True,
rates_I_quad_ch_idx: int = -2,
rates_Q_quad_ch_idx: int = None,
ignore_f_cal_pts: bool = False,
plot_label="",
):
# Here we don't want to run the __init__ of the Interleaved analysis,
# only the __init__ of the base class
ba.BaseDataAnalysis.__init__(
self, do_fitting=True, close_figs=close_figs, options_dict=options_dict
)
self.ts_base = ts_base
self.ts_int = ts_int
self.label_base = label_base
self.label_int = label_int
assert ts_base or label_base
assert ts_int or label_int
self.rates_I_quad_ch_idx = rates_I_quad_ch_idx
self.rates_Q_quad_ch_idx = rates_Q_quad_ch_idx
if self.rates_Q_quad_ch_idx is None:
self.rates_Q_quad_ch_idx = rates_I_quad_ch_idx + 1
self.options_dict = options_dict
self.close_figs = close_figs
self.ignore_f_cal_pts = ignore_f_cal_pts
self.plot_label = plot_label
# For other classes derived from this one this will change
self.fit_tag = None # to be set in the extract data
self.int_name = "Idle flux"
self.include_idle = False
if auto:
self.run_analysis()
def extract_data(self):
self.raw_data_dict = OrderedDict()
a_base = RandomizedBenchmarking_SingleQubit_Analysis(
t_start=self.ts_base,
label=self.label_base,
options_dict=self.options_dict,
auto=True,
close_figs=self.close_figs,
rates_I_quad_ch_idx=self.rates_I_quad_ch_idx,
extract_only=True,
ignore_f_cal_pts=self.ignore_f_cal_pts,
)
a_int = RandomizedBenchmarking_SingleQubit_Analysis(
t_start=self.ts_int,
label=self.label_int,
options_dict=self.options_dict,
auto=True,
close_figs=self.close_figs,
rates_I_quad_ch_idx=self.rates_I_quad_ch_idx,
extract_only=True,
ignore_f_cal_pts=self.ignore_f_cal_pts,
)
self.fit_tag = a_base.raw_data_dict["value_names"][self.rates_I_quad_ch_idx]
# order is such that any information (figures, quantities of interest)
# are saved in the interleaved file.
self.timestamps = [a_int.timestamps[0], a_base.timestamps[0]]
self.raw_data_dict["timestamps"] = self.timestamps
self.raw_data_dict["timestamp_string"] = a_int.proc_data_dict[
"timestamp_string"
]
self.raw_data_dict["folder"] = a_int.proc_data_dict["folder"]
self.raw_data_dict["analyses"] = {"base": a_base, "int": a_int}
if not self.plot_label:
self.plot_label = a_int.proc_data_dict["measurementstring"]
class CharacterBenchmarking_TwoQubit_Analysis(ba.BaseDataAnalysis):
"""
Analysis for character benchmarking.
"""
def __init__(
self,
t_start: str = None,
t_stop: str = None,
label="",
options_dict: dict = None,
auto=True,
close_figs=True,
ch_idxs: list = [0, 2],
):
if options_dict is None:
options_dict = dict()
super().__init__(
t_start=t_start,
t_stop=t_stop,
label=label,
options_dict=options_dict,
close_figs=close_figs,
do_fitting=True,
)
self.d1 = 4
self.ch_idxs = ch_idxs
if auto:
self.run_analysis()
def extract_data(self):
self.raw_data_dict = OrderedDict()
self.timestamps = a_tools.get_timestamps_in_range(
self.t_start, self.t_stop, label=self.labels
)
a = ma_old.MeasurementAnalysis(
timestamp=self.timestamps[0], auto=False, close_file=False
)
a.get_naming_and_values()
bins = a.data_file["Experimental Data"]["Experimental Metadata"]["bins"][()]
a.finish()
self.raw_data_dict["measurementstring"] = a.measurementstring
self.raw_data_dict["timestamp_string"] = a.timestamp_string
self.raw_data_dict["folder"] = a.folder
self.raw_data_dict["timestamps"] = self.timestamps
df = pd.DataFrame(
columns={"ncl", "pauli", "I_q0", "Q_q0", "I_q1", "Q_q1", "interleaving_cl"}
)
df["ncl"] = bins
# Assumptions on the structure of the datafile are made here.
# For every Clifford, 4 random pauli's are sampled from the different
# sub sets:
paulis = [
"II", # 'IZ', 'ZI', 'ZZ', # P00
"IX", # 'IY', 'ZX', 'ZY', # P01
"XI", # 'XZ', 'YI', 'YZ', # P10
"XX",
] # 'XY', 'YX', 'YY'] # P11
paulis_df = np.tile(paulis, 34)[: len(bins)]
# The calibration points do not correspond to a Pauli
paulis_df[-7:] = np.nan
df["pauli"] = paulis_df
# The four different random Pauli's are performed both with
# and without the interleaving CZ gate.
df["interleaving_cl"] = np.tile([""] * 4 + ["CZ"] * 4, len(bins) // 8 + 1)[
: len(bins)
]
# Data is grouped and single shots are averaged.
for i, ch in enumerate(["I_q0", "Q_q0", "I_q1", "Q_q1"]):
binned_yvals = np.reshape(a.measured_values[i], (len(bins), -1), order="F")
yvals = np.mean(binned_yvals, axis=1)
df[ch] = yvals
self.raw_data_dict["df"] = df
def process_data(self):
self.proc_data_dict = OrderedDict()
df = self.raw_data_dict["df"]
cal_points = [
# calibration point indices are when ignoring the f-state cal pts
[[-7, -5], [-6, -4], [-3, -1]], # q0
[[-7, -5], [-6, -4], [-3, -1]], # q0
[[-7, -6], [-5, -4], [-2, -1]], # q1
[[-7, -6], [-5, -4], [-2, -1]], # q1
]
for ch, cal_pt in zip(["I_q0", "Q_q0", "I_q1", "Q_q1"], cal_points):
df[ch + "_normed"] = a_tools.normalize_data_v3(
df[ch].values, cal_zero_points=cal_pt[0], cal_one_points=cal_pt[1]
)
df["P_|00>"] = (1 - df["I_q0_normed"]) * (1 - df["Q_q1_normed"])
P00 = (
df.loc[df["pauli"].isin(["II", "IZ", "ZI", "ZZ"])]
.loc[df["interleaving_cl"] == ""]
.groupby("ncl")
.mean()
)
P01 = (
df.loc[df["pauli"].isin(["IX", "IY", "ZX", "ZY"])]
.loc[df["interleaving_cl"] == ""]
.groupby("ncl")
.mean()
)
P10 = (
df.loc[df["pauli"].isin(["XI", "XZ", "YI", "YZ"])]
.loc[df["interleaving_cl"] == ""]
.groupby("ncl")
.mean()
)
P11 = (
df.loc[df["pauli"].isin(["XX", "XY", "YX", "YY"])]
.loc[df["interleaving_cl"] == ""]
.groupby("ncl")
.mean()
)
P00_CZ = (
df.loc[df["pauli"].isin(["II", "IZ", "ZI", "ZZ"])]
.loc[df["interleaving_cl"] == "CZ"]
.groupby("ncl")
.mean()
)
P01_CZ = (
df.loc[df["pauli"].isin(["IX", "IY", "ZX", "ZY"])]
.loc[df["interleaving_cl"] == "CZ"]
.groupby("ncl")
.mean()
)
P10_CZ = (
df.loc[df["pauli"].isin(["XI", "XZ", "YI", "YZ"])]
.loc[df["interleaving_cl"] == "CZ"]
.groupby("ncl")
.mean()
)
P11_CZ = (
df.loc[df["pauli"].isin(["XX", "XY", "YX", "YY"])]
.loc[df["interleaving_cl"] == "CZ"]
.groupby("ncl")
.mean()
)
# Calculate the character function
# Eq. 7 of Xue et al. ArXiv 1811.04002v1
C1 = P00["P_|00>"] - P01["P_|00>"] + P10["P_|00>"] - P11["P_|00>"]
C2 = P00["P_|00>"] + P01["P_|00>"] - P10["P_|00>"] - P11["P_|00>"]
C12 = P00["P_|00>"] - P01["P_|00>"] - P10["P_|00>"] + P11["P_|00>"]
C1_CZ = (
P00_CZ["P_|00>"] - P01_CZ["P_|00>"] + P10_CZ["P_|00>"] - P11_CZ["P_|00>"]
)
C2_CZ = (
P00_CZ["P_|00>"] + P01_CZ["P_|00>"] - P10_CZ["P_|00>"] - P11_CZ["P_|00>"]
)
C12_CZ = (
P00_CZ["P_|00>"] - P01_CZ["P_|00>"] - P10_CZ["P_|00>"] + P11_CZ["P_|00>"]
)
char_df = pd.DataFrame(
{
"P00": P00["P_|00>"],
"P01": P01["P_|00>"],
"P10": P10["P_|00>"],
"P11": P11["P_|00>"],
"P00_CZ": P00_CZ["P_|00>"],
"P01_CZ": P01_CZ["P_|00>"],
"P10_CZ": P10_CZ["P_|00>"],
"P11_CZ": P11_CZ["P_|00>"],
"C1": C1,
"C2": C2,
"C12": C12,
"C1_CZ": C1_CZ,
"C2_CZ": C2_CZ,
"C12_CZ": C12_CZ,
}
)
self.proc_data_dict["char_df"] = char_df
def run_fitting(self):
super().run_fitting()
char_df = self.proc_data_dict["char_df"]
# Eq. 8 of Xue et al. ArXiv 1811.04002v1
for char_key in ["C1", "C2", "C12", "C1_CZ", "C2_CZ", "C12_CZ"]:
char_mod = lmfit.Model(char_decay, independent_vars="m")
char_mod.set_param_hint("A", value=1, vary=True)
char_mod.set_param_hint("alpha", value=0.95)
params = char_mod.make_params()
self.fit_res[char_key] = char_mod.fit(
data=char_df[char_key].values, m=char_df.index, params=params
)
def analyze_fit_results(self):
fr = self.fit_res
self.proc_data_dict["quantities_of_interest"] = {}
qoi = self.proc_data_dict["quantities_of_interest"]
qoi["alpha1"] = ufloat(
fr["C1"].params["alpha"].value, fr["C1"].params["alpha"].stderr
)
qoi["alpha2"] = ufloat(
fr["C2"].params["alpha"].value, fr["C2"].params["alpha"].stderr
)
qoi["alpha12"] = ufloat(
fr["C12"].params["alpha"].value, fr["C12"].params["alpha"].stderr
)
# eq. 9 from Xue et al. ArXiv 1811.04002v1
qoi["alpha_char"] = (
3 / 15 * qoi["alpha1"] + 3 / 15 * qoi["alpha2"] + 9 / 15 * qoi["alpha12"]
)
qoi["alpha1_CZ_int"] = ufloat(
fr["C1_CZ"].params["alpha"].value, fr["C1_CZ"].params["alpha"].stderr
)
qoi["alpha2_CZ_int"] = ufloat(
fr["C2_CZ"].params["alpha"].value, fr["C2_CZ"].params["alpha"].stderr
)
qoi["alpha12_CZ_int"] = ufloat(
fr["C12_CZ"].params["alpha"].value, fr["C12_CZ"].params["alpha"].stderr
)
qoi["alpha_char_CZ_int"] = (
3 / 15 * qoi["alpha1_CZ_int"]
+ 3 / 15 * qoi["alpha2_CZ_int"]
+ 9 / 15 * qoi["alpha12_CZ_int"]
)
qoi["eps_ref"] = depolarizing_par_to_eps(qoi["alpha_char"], d=4)
qoi["eps_int"] = depolarizing_par_to_eps(qoi["alpha_char_CZ_int"], d=4)
# Interleaved error calculation Magesan et al. PRL 2012
qoi["eps_CZ"] = 1 - (1 - qoi["eps_int"]) / (1 - qoi["eps_ref"])
def prepare_plots(self):
char_df = self.proc_data_dict["char_df"]
# self.figs['puali_decays']
self.plot_dicts["pauli_decays"] = {
"plotfn": plot_char_RB_pauli_decays,
"ncl": char_df.index.values,
"P00": char_df["P00"].values,
"P01": char_df["P01"].values,
"P10": char_df["P10"].values,
"P11": char_df["P11"].values,
"P00_CZ": char_df["P00_CZ"].values,
"P01_CZ": char_df["P01_CZ"].values,
"P10_CZ": char_df["P10_CZ"].values,
"P11_CZ": char_df["P11_CZ"].values,
"title": self.raw_data_dict["measurementstring"]
+ "\n"
+ self.raw_data_dict["timestamp_string"]
+ "\nPauli decays",
}
self.plot_dicts["char_decay"] = {
"plotfn": plot_char_RB_decay,
"ncl": char_df.index.values,
"C1": char_df["C1"].values,
"C2": char_df["C2"].values,
"C12": char_df["C12"].values,
"C1_CZ": char_df["C1_CZ"].values,
"C2_CZ": char_df["C2_CZ"].values,
"C12_CZ": char_df["C12_CZ"].values,
"fr_C1": self.fit_res["C1"],
"fr_C2": self.fit_res["C2"],
"fr_C12": self.fit_res["C12"],
"fr_C1_CZ": self.fit_res["C1_CZ"],
"fr_C2_CZ": self.fit_res["C2_CZ"],
"fr_C12_CZ": self.fit_res["C12_CZ"],
"title": self.raw_data_dict["measurementstring"]
+ "\n"
+ self.raw_data_dict["timestamp_string"]
+ "\nCharacter decay",
}
self.plot_dicts["quantities_msg"] = {
"plotfn": plot_char_rb_quantities,
"ax_id": "char_decay",
"qoi": self.proc_data_dict["quantities_of_interest"],
}
def plot_cal_points_hexbin(
shots_0,
shots_1,
shots_2,
xlabel: str,
xunit: str,
ylabel: str,
yunit: str,
title: str,
ax,
common_clims: bool = True,
**kw
):
# Choose colormap
cmaps = [plt.cm.Blues, plt.cm.Reds, plt.cm.Greens]
alpha_cmaps = []
for cmap in cmaps:
my_cmap = cmap(np.arange(cmap.N))
my_cmap[:, -1] = np.linspace(0, 1, cmap.N)
my_cmap = ListedColormap(my_cmap)
alpha_cmaps.append(my_cmap)
f = plt.gcf()
mincnt = 1
hbs = []
shots_list = [shots_0, shots_1, shots_2]
for i, shots in enumerate(shots_list):
hb = ax.hexbin(
x=shots[0],
y=shots[1],
cmap=alpha_cmaps[i],
mincnt=mincnt,
norm=PowerNorm(gamma=0.25),
)
cb = f.colorbar(hb, ax=ax)
cb.set_label(r"Counts $|{}\rangle$".format(i))
hbs.append(hb)
if common_clims:
clims = [hb.get_clim() for hb in hbs]
clim = np.min(clims), np.max(clims)
for hb in hbs:
hb.set_clim(clim)
set_xlabel(ax, xlabel, xunit)
set_ylabel(ax, ylabel, yunit)
ax.set_title(title)
def plot_raw_RB_curve(
ncl, SI, SX, V0, V1, V2, title, ax, xlabel, xunit, ylabel, yunit, **kw
):
ax.plot(ncl, SI, label="SI", marker="o")
ax.plot(ncl, SX, label="SX", marker="o")
ax.plot(ncl[-1] + 0.5, V0, label="V0", marker="d", c="C0")
ax.plot(ncl[-1] + 1.5, V1, label="V1", marker="d", c="C1")
ax.plot(ncl[-1] + 2.5, V2, label="V2", marker="d", c="C2")
ax.set_title(title)
set_xlabel(ax, xlabel, xunit)
set_ylabel(ax, ylabel, yunit)
ax.legend()
def plot_populations_RB_curve(ncl, P0, P1, P2, title, ax, **kw):
ax.axhline(0.5, c="k", lw=0.5, ls="--")
ax.plot(ncl, P0, c="C0", label=r"P($|g\rangle$)", marker="v")
ax.plot(ncl, P1, c="C3", label=r"P($|e\rangle$)", marker="^")
ax.plot(ncl, P2, c="C2", label=r"P($|f\rangle$)", marker="d")
ax.set_xlabel("Number of Cliffords (#)")
ax.set_ylabel("Population")
ax.grid(axis="y")
ax.legend()
ax.set_ylim(-0.05, 1.05)
ax.set_title(title)
def plot_unitarity_shots(ncl, unitarity_shots, title, ax=None, **kw):
ax.axhline(0.5, c="k", lw=0.5, ls="--")
ax.plot(ncl, unitarity_shots, ".")
ax.set_xlabel("Number of Cliffords (#)")
ax.set_ylabel("unitarity")
ax.grid(axis="y")
ax.legend()
ax.set_ylim(-1.05, 1.05)
ax.set_title(title)
def plot_unitarity(ncl, P, title, ax=None, **kw):
ax.plot(ncl, P, "o")
ax.set_xlabel("Number of Cliffords (#)")
ax.set_ylabel("unitarity")
ax.grid(axis="y")
ax.legend()
ax.set_ylim(-0.05, 1.05)
ax.set_title(title)
def plot_char_RB_pauli_decays(
ncl, P00, P01, P10, P11, P00_CZ, P01_CZ, P10_CZ, P11_CZ, title, ax, **kw
):
"""
Plots the raw recovery probabilities for a character RB experiment.
"""
ax.plot(ncl, P00, c="C0", label=r"$P_{00}$", marker="o", ls="--")
ax.plot(ncl, P01, c="C1", label=r"$P_{01}$", marker="o", ls="--")
ax.plot(ncl, P10, c="C2", label=r"$P_{10}$", marker="o", ls="--")
ax.plot(ncl, P11, c="C3", label=r"$P_{11}$", marker="o", ls="--")
ax.plot(
ncl, P00_CZ, c="C0", label=r"$P_{00}$-int. CZ", marker="d", alpha=0.5, ls=":"
)
ax.plot(
ncl, P01_CZ, c="C1", label=r"$P_{01}$-int. CZ", marker="d", alpha=0.5, ls=":"
)
ax.plot(
ncl, P10_CZ, c="C2", label=r"$P_{10}$-int. CZ", marker="d", alpha=0.5, ls=":"
)
ax.plot(
ncl, P11_CZ, c="C3", label=r"$P_{11}$-int. CZ", marker="d", alpha=0.5, ls=":"
)
ax.set_xlabel("Number of Cliffords (#)")
ax.set_ylabel(r"$P |00\rangle$")
ax.legend(loc=(1.05, 0))
ax.set_ylim(-0.05, 1.05)
ax.set_title(title)
def plot_char_RB_decay(
ncl,
C1,
C2,
C12,
C1_CZ,
C2_CZ,
C12_CZ,
fr_C1,
fr_C2,
fr_C12,
fr_C1_CZ,
fr_C2_CZ,
fr_C12_CZ,
title,
ax,
**kw
):
ncl_fine = np.linspace(np.min(ncl), np.max(ncl), 101)
plot_fit(ncl_fine, fr_C1, ax, ls="-", c="C0")
ax.plot(
ncl, C1, c="C0", label=r"$C_1$: $A_1\cdot {\alpha_{1|2}}^m$", marker="o", ls=""
)
plot_fit(ncl_fine, fr_C2, ax, ls="-", c="C1")
ax.plot(
ncl, C2, c="C1", label=r"$C_2$: $A_1\cdot {\alpha_{2|1}}^m$", marker="o", ls=""
)
plot_fit(ncl_fine, fr_C12, ax, ls="-", c="C2")
ax.plot(
ncl,
C12,
c="C2",
label=r"$C_{12}$: $A_1\cdot {\alpha_{12}}^m$",
marker="o",
ls="",
)
plot_fit(ncl_fine, fr_C1_CZ, ax, ls="--", c="C0", alpha=0.5)
ax.plot(
ncl,
C1_CZ,
c="C0",
label=r"$C_1^{int.}$: $A_1' \cdot {\alpha_{1|2}'}^m$",
marker="d",
ls="",
alpha=0.5,
)
plot_fit(ncl_fine, fr_C2_CZ, ax, ls="--", c="C1", alpha=0.5)
ax.plot(
ncl,
C2_CZ,
c="C1",
label=r"$C_2^{int.}$: $A_2' \cdot {\alpha_{2|1}'}^m$",
marker="d",
ls="",
alpha=0.5,
)
plot_fit(ncl_fine, fr_C12_CZ, ax, ls="--", c="C2", alpha=0.5)
ax.plot(
ncl,
C12_CZ,
c="C2",
label=r"$C_{12}^{int.}$: $A_{12}' \cdot {\alpha_{12}'}^m$",
marker="d",
ls="",
alpha=0.5,
)
ax.set_xlabel("Number of Cliffords (#)")
ax.set_ylabel("Population")
ax.legend(title="Character decay", ncol=2, loc=(1.05, 0.6))
ax.set_title(title)
def plot_char_rb_quantities(ax, qoi, **kw):
"""
Plots a text message of the main quantities extracted from char rb
"""
def gen_val_str(alpha, alpha_p):
val_str = " {:.3f}$\pm${:.3f} {:.3f}$\pm${:.3f}"
return val_str.format(
alpha.nominal_value, alpha.std_dev, alpha_p.nominal_value, alpha_p.std_dev
)
alpha_msg = " Reference Interleaved"
alpha_msg += "\n" r"$\alpha_{1|2}$" + "\t"
alpha_msg += gen_val_str(qoi["alpha1"], qoi["alpha1_CZ_int"])
alpha_msg += "\n" r"$\alpha_{2|1}$" + "\t"
alpha_msg += gen_val_str(qoi["alpha2"], qoi["alpha2_CZ_int"])
alpha_msg += "\n" r"$\alpha_{12}$" + "\t"
alpha_msg += gen_val_str(qoi["alpha12"], qoi["alpha12_CZ_int"])
alpha_msg += "\n" + "_" * 40 + "\n"
alpha_msg += "\n" r"$\epsilon_{Ref.}$" + "\t"
alpha_msg += "{:.3f}$\pm${:.3f}%".format(
qoi["eps_ref"].nominal_value * 100, qoi["eps_ref"].std_dev * 100
)
alpha_msg += "\n" r"$\epsilon_{Int.}$" + "\t"
alpha_msg += "{:.3f}$\pm${:.3f}%".format(
qoi["eps_int"].nominal_value * 100, qoi["eps_int"].std_dev * 100
)
alpha_msg += "\n" r"$\epsilon_{CZ.}$" + "\t"
alpha_msg += "{:.3f}$\pm${:.3f}%".format(
qoi["eps_CZ"].nominal_value * 100, qoi["eps_CZ"].std_dev * 100
)
ax.text(1.05, 0.0, alpha_msg, transform=ax.transAxes)
def logisticreg_classifier_machinelearning(shots_0, shots_1, shots_2):
"""
"""
# reshaping of the entries in proc_data_dict
shots_0 = np.array(list(zip(list(shots_0.values())[0], list(shots_0.values())[1])))
shots_1 = np.array(list(zip(list(shots_1.values())[0], list(shots_1.values())[1])))
shots_2 = np.array(list(zip(list(shots_2.values())[0], list(shots_2.values())[1])))
shots_0 = shots_0[~np.isnan(shots_0[:, 0])]
shots_1 = shots_1[~np.isnan(shots_1[:, 0])]
shots_2 = shots_2[~np.isnan(shots_2[:, 0])]
X = np.concatenate([shots_0, shots_1, shots_2])
Y = np.concatenate(
[
0 * np.ones(shots_0.shape[0]),
1 * np.ones(shots_1.shape[0]),
2 * np.ones(shots_2.shape[0]),
]
)
logreg = linear_model.LogisticRegression(C=1e5)
logreg.fit(X, Y)
return logreg
def plot_classifier_decission_boundary(
shots_0,
shots_1,
shots_2,
classifier,
xlabel: str,
xunit: str,
ylabel: str,
yunit: str,
title: str,
ax,
**kw
):
"""
Plot decision boundary on top of the hexbin plot of the training dataset.
"""
grid_points = 200
x_min = np.nanmin([shots_0[0], shots_1[0], shots_2[0]])
x_max = np.nanmax([shots_0[0], shots_1[0], shots_2[0]])
y_min = np.nanmin([shots_0[1], shots_1[1], shots_2[1]])
y_max = np.nanmax([shots_0[1], shots_1[1], shots_2[1]])
xx, yy = np.meshgrid(
np.linspace(x_min, x_max, grid_points), np.linspace(y_min, y_max, grid_points)
)
Z = classifier.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plot_cal_points_hexbin(
shots_0=shots_0,
shots_1=shots_1,
shots_2=shots_2,
xlabel=xlabel,
xunit=xunit,
ylabel=ylabel,
yunit=yunit,
title=title,
ax=ax,
)
ax.pcolormesh(xx, yy, Z, cmap=c.ListedColormap(["C0", "C3", "C2"]), alpha=0.2)
def plot_rb_decay_woods_gambetta(ncl, M0, X1, ax, ax1, title="", **kw):
ax.plot(ncl, M0, marker="o", linestyle="")
ax1.plot(ncl, X1, marker="d", linestyle="")
ax.grid(axis="y")
ax1.grid(axis="y")
ax.set_ylim(-0.05, 1.05)
ax1.set_ylim(min(min(0.97 * X1), 0.92), 1.01)
ax.set_ylabel(r"$M_0$ probability")
ax1.set_ylabel(r"$\chi_1$ population")
ax1.set_xlabel("Number of Cliffords")
ax.set_title(title)
def plot_irb_decay_woods_gambetta(
ncl,
M0_ref,
M0_int,
X1_ref,
X1_int,
fr_M0_ref,
fr_M0_int,
fr_M0_simple_ref,
fr_M0_simple_int,
fr_X1_ref,
fr_X1_int,
qoi,
ax,
ax1,
fit_tag,
int_name,
title="",
include_idle=False,
M0_int_idle=None,
X1_int_idle=None,
fr_M0_int_idle=None,
fr_M0_simple_int_idle=None,
fr_X1_int_idle=None,
**kw
):
ncl_fine = np.linspace(ncl[0], ncl[-1], 1001)
ax.plot(ncl, M0_ref, marker="o", linestyle="", c="C0", label="Reference")
plot_fit(ncl_fine, fr_M0_ref, ax=ax, c="C0")
ax.plot(
ncl,
M0_int,
marker="d",
linestyle="",
c="C1",
label="Interleaved {}".format(int_name),
)
plot_fit(ncl_fine, fr_M0_int, ax=ax, c="C1")
if include_idle:
ax.plot(
ncl, M0_int_idle, marker="^", linestyle="", c="C2", label="Interleaved Idle"
)
plot_fit(ncl_fine, fr_M0_int_idle, ax=ax, c="C2")
ax.grid(axis="y")
ax.set_ylim(-0.05, 1.05)
ax.set_ylabel(r"$M_0$ probability")
ax1.plot(ncl, X1_ref, marker="o", linestyle="", c="C0")
ax1.plot(ncl, X1_int, marker="d", linestyle="", c="C1")
plot_fit(ncl_fine, fr_X1_ref, ax=ax1, c="C0")
plot_fit(ncl_fine, fr_X1_int, ax=ax1, c="C1")
if include_idle:
ax1.plot(ncl, X1_int_idle, marker="^", linestyle="", c="C2")
plot_fit(ncl_fine, fr_X1_int_idle, ax=ax1, c="C2")
ax1.grid(axis="y")
ax1.set_ylim(min(min(0.97 * X1_int), 0.92), 1.01)
ax1.set_ylabel(r"$\chi_1$ population")
ax1.set_xlabel("Number of Cliffords")
ax.set_title(title)
ax.legend(loc="best")
collabels = [r"$\epsilon_{\chi1}~(\%)$", r"$\epsilon~(\%)$", r"$L_1~(\%)$"]
idle_r_labels0 = ["Interl. Idle curve"] if include_idle else []
idle_r_labels1 = ["Idle-interleaved"] if include_idle else []
rowlabels = (
["Ref. curve"]
+ idle_r_labels0
+ ["Interl. {} curve".format(int_name)]
+ idle_r_labels1
+ ["{}-interleaved".format(int_name)]
)
if int_name == "CZ":
rowlabels += ["{}-naive".format(int_name)]
idle_r_extracted = (
[[qoi["eps_idle_X1"] * 100, qoi["eps_idle_simple"] * 100, qoi["L1_idle"] * 100]]
if include_idle
else []
)
idle_r_fit = (
[
[
qoi["eps_X1_{}_int_idle".format(fit_tag)] * 100,
qoi["eps_simple_{}_int_idle".format(fit_tag)] * 100,
qoi["L1_{}_int_idle".format(fit_tag)] * 100,
]
]
if include_idle
else []
)
table_data = (
[
[
qoi["eps_X1_{}_ref".format(fit_tag)] * 100,
qoi["eps_simple_{}_ref".format(fit_tag)] * 100,
qoi["L1_{}_ref".format(fit_tag)] * 100,
]
]
+ idle_r_fit
+ [
[
qoi["eps_X1_{}_int".format(fit_tag)] * 100,
qoi["eps_simple_{}_int".format(fit_tag)] * 100,
qoi["L1_{}_int".format(fit_tag)] * 100,
]
]
+ idle_r_extracted
+ [
[
qoi["eps_{}_X1".format(int_name)] * 100,
qoi["eps_{}_simple".format(int_name)] * 100,
qoi["L1_{}".format(int_name)] * 100,
]
]
)
if int_name == "CZ":
table_data += [
[
qoi["eps_{}_X1_naive".format(int_name)] * 100,
qoi["eps_{}_simple_naive".format(int_name)] * 100,
qoi["L1_{}_naive".format(int_name)] * 100,
]
]
# Avoid too many digits when the uncertainty is np.nan
for i, row in enumerate(table_data):
for j, u_val in enumerate(row):
if np.isnan(u_val.n) and np.isnan(u_val.s):
table_data[i][j] = "nan+/-nan"
elif np.isnan(u_val.s):
# Keep 3 significant digits only
table_data[i][j] = "{:.3g}+/-nan".format(u_val.n)
ax1.table(
cellText=table_data,
colLabels=collabels,
rowLabels=rowlabels,
transform=ax1.transAxes,
cellLoc="center",
rowLoc="center",
bbox=(0.1, -2.5, 1, 2),
)
def interleaved_error(eps_int, eps_base):
# Interleaved error calculation Magesan et al. PRL 2012
eps = 1 - (1 - eps_int) / (1 - eps_base)
return eps
def leak_decay(A, B, lambda_1, m):
"""
Eq. (9) of Wood Gambetta 2018.
A ~= L2/ (L1+L2)
B ~= L1/ (L1+L2) + eps_m
lambda_1 = 1 - L1 - L2
"""
return A + B * lambda_1 ** m
def full_rb_decay(A, B, C, lambda_1, lambda_2, m):
"""Eq. (15) of Wood Gambetta 2018."""
return A + B * lambda_1 ** m + C * lambda_2 ** m
def unitarity_decay(A, B, u, m):
"""Eq. (8) of Wallman et al. New J. Phys. 2015."""
return A + B * u ** m
def char_decay(A, alpha, m):
"""
From Helsen et al. A new class of efficient RB protocols.
Theory in Helsen et al. arXiv:1806.02048
Eq. 8 of Xue et al. ArXiv 1811.04002v1 (experimental implementation)
Parameters
----------
A (float):
Scaling factor of the decay
alpha (float):
depolarizing parameter to be estimated
m (array)
number of cliffords
returns:
A * α**m
"""
return A * alpha ** m
def depolarizing_par_to_eps(alpha, d):
"""
Convert depolarizing parameter to infidelity.
Dugas et al. arXiv:1610.05296v2 contains a nice overview table of
common RB paramater conversions.
Parameters
----------
alpha (float):
depolarizing parameter, also commonly referred to as lambda or p.
d (int):
dimension of the system, 2 for a single qubit, 4 for two-qubits.
Returns
-------
eps = (1-alpha)*(d-1)/d
"""
return (1 - alpha) * (d - 1) / d
|
mit
| 681,477,638,738,316,000
| 35.15968
| 88
| 0.494314
| false
| 3.157294
| false
| false
| false
|
mathstuf/ranger
|
ranger/gui/widgets/taskview.py
|
1
|
2838
|
# Copyright (C) 2009-2013 Roman Zimbelmann <hut@hut.pm>
# This software is distributed under the terms of the GNU GPL version 3.
"""The TaskView allows you to modify what the loader is doing."""
from . import Widget
from ranger.ext.accumulator import Accumulator
class TaskView(Widget, Accumulator):
old_lst = None
def __init__(self, win):
Widget.__init__(self, win)
Accumulator.__init__(self)
self.scroll_begin = 0
def draw(self):
base_clr = []
base_clr.append('in_taskview')
lst = self.get_list()
if self.old_lst != lst:
self.old_lst = lst
self.need_redraw = True
if self.need_redraw:
self.win.erase()
if not self.pointer_is_synced():
self.sync_index()
if self.hei <= 0:
return
self.addstr(0, 0, "Task View")
self.color_at(0, 0, self.wid, tuple(base_clr), 'title')
if lst:
for i in range(self.hei - 1):
i += self.scroll_begin
try:
obj = lst[i]
except IndexError:
break
y = i + 1
clr = list(base_clr)
if self.pointer == i:
clr.append('selected')
descr = obj.get_description()
if obj.progressbar_supported and obj.percent >= 0 \
and obj.percent <= 100:
self.addstr(y, 0, "%3.2f%% - %s" % \
(obj.percent, descr), self.wid)
wid = int(self.wid / 100.0 * obj.percent)
self.color_at(y, 0, self.wid, tuple(clr))
self.color_at(y, 0, wid, tuple(clr), 'loaded')
else:
self.addstr(y, 0, descr, self.wid)
self.color_at(y, 0, self.wid, tuple(clr))
else:
if self.hei > 1:
self.addstr(1, 0, "No task in the queue.")
self.color_at(1, 0, self.wid, tuple(base_clr), 'error')
self.color_reset()
def finalize(self):
y = self.y + 1 + self.pointer - self.scroll_begin
self.fm.ui.win.move(y, self.x)
def task_remove(self, i=None):
if i is None:
i = self.pointer
if self.fm.loader.queue:
self.fm.loader.remove(index=i)
def task_move(self, to, i=None):
if i is None:
i = self.pointer
self.fm.loader.move(_from=i, to=to)
def press(self, key):
self.fm.ui.keymaps.use_keymap('taskview')
self.fm.ui.press(key)
def get_list(self):
return self.fm.loader.queue
|
gpl-3.0
| 602,735,406,371,060,500
| 29.516129
| 75
| 0.471811
| false
| 3.789052
| false
| false
| false
|
altsen/diandiyun-platform
|
common/lib/xmodule/xmodule/html_module.py
|
1
|
11807
|
import copy
from fs.errors import ResourceNotFoundError
import logging
import os
import sys
from lxml import etree
from path import path
from pkg_resources import resource_string
from xblock.fields import Scope, String, Boolean, List
from xmodule.editing_module import EditingDescriptor
from xmodule.html_checker import check_html
from xmodule.stringify import stringify_children
from xmodule.x_module import XModule
from xmodule.xml_module import XmlDescriptor, name_to_pathname
import textwrap
from xmodule.contentstore.content import StaticContent
from xblock.core import XBlock
log = logging.getLogger("edx.courseware")
class HtmlFields(object):
display_name = String(
display_name="Display Name",
help="This name appears in the horizontal navigation at the top of the page.",
scope=Scope.settings,
# it'd be nice to have a useful default but it screws up other things; so,
# use display_name_with_default for those
default="Text"
)
data = String(help="Html contents to display for this module", default=u"", scope=Scope.content)
source_code = String(help="Source code for LaTeX documents. This feature is not well-supported.", scope=Scope.settings)
use_latex_compiler = Boolean(
help="Enable LaTeX templates?",
default=False,
scope=Scope.settings
)
class HtmlModule(HtmlFields, XModule):
js = {
'coffee': [
resource_string(__name__, 'js/src/javascript_loader.coffee'),
resource_string(__name__, 'js/src/collapsible.coffee'),
resource_string(__name__, 'js/src/html/display.coffee')
],
'js': [
resource_string(__name__, 'js/src/html/imageModal.js'),
resource_string(__name__, 'js/common_static/js/vendor/draggabilly.pkgd.js')
]
}
js_module_name = "HTMLModule"
css = {'scss': [resource_string(__name__, 'css/html/display.scss')]}
def get_html(self):
if self.system.anonymous_student_id:
return self.data.replace("%%USER_ID%%", self.system.anonymous_student_id)
return self.data
class HtmlDescriptor(HtmlFields, XmlDescriptor, EditingDescriptor):
"""
Module for putting raw html in a course
"""
mako_template = "widgets/html-edit.html"
module_class = HtmlModule
filename_extension = "xml"
template_dir_name = "html"
js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]}
js_module_name = "HTMLEditingDescriptor"
css = {'scss': [resource_string(__name__, 'css/editor/edit.scss'), resource_string(__name__, 'css/html/edit.scss')]}
# VS[compat] TODO (cpennington): Delete this method once all fall 2012 course
# are being edited in the cms
@classmethod
def backcompat_paths(cls, path):
if path.endswith('.html.xml'):
path = path[:-9] + '.html' # backcompat--look for html instead of xml
if path.endswith('.html.html'):
path = path[:-5] # some people like to include .html in filenames..
candidates = []
while os.sep in path:
candidates.append(path)
_, _, path = path.partition(os.sep)
# also look for .html versions instead of .xml
nc = []
for candidate in candidates:
if candidate.endswith('.xml'):
nc.append(candidate[:-4] + '.html')
return candidates + nc
@classmethod
def filter_templates(cls, template, course):
"""
Filter template that contains 'latex' from templates.
Show them only if use_latex_compiler is set to True in
course settings.
"""
return (not 'latex' in template['template_id'] or course.use_latex_compiler)
def get_context(self):
"""
an override to add in specific rendering context, in this case we need to
add in a base path to our c4x content addressing scheme
"""
_context = EditingDescriptor.get_context(self)
# Add some specific HTML rendering context when editing HTML modules where we pass
# the root /c4x/ url for assets. This allows client-side substitutions to occur.
_context.update({
'base_asset_url': StaticContent.get_base_url_path_for_course_assets(self.location) + '/',
'enable_latex_compiler': self.use_latex_compiler,
})
return _context
# NOTE: html descriptors are special. We do not want to parse and
# export them ourselves, because that can break things (e.g. lxml
# adds body tags when it exports, but they should just be html
# snippets that will be included in the middle of pages.
@classmethod
def load_definition(cls, xml_object, system, location):
'''Load a descriptor from the specified xml_object:
If there is a filename attribute, load it as a string, and
log a warning if it is not parseable by etree.HTMLParser.
If there is not a filename attribute, the definition is the body
of the xml_object, without the root tag (do not want <html> in the
middle of a page)
'''
filename = xml_object.get('filename')
if filename is None:
definition_xml = copy.deepcopy(xml_object)
cls.clean_metadata_from_xml(definition_xml)
return {'data': stringify_children(definition_xml)}, []
else:
# html is special. cls.filename_extension is 'xml', but
# if 'filename' is in the definition, that means to load
# from .html
# 'filename' in html pointers is a relative path
# (not same as 'html/blah.html' when the pointer is in a directory itself)
pointer_path = "{category}/{url_path}".format(
category='html',
url_path=name_to_pathname(location.name)
)
base = path(pointer_path).dirname()
# log.debug("base = {0}, base.dirname={1}, filename={2}".format(base, base.dirname(), filename))
filepath = "{base}/{name}.html".format(base=base, name=filename)
# log.debug("looking for html file for {0} at {1}".format(location, filepath))
# VS[compat]
# TODO (cpennington): If the file doesn't exist at the right path,
# give the class a chance to fix it up. The file will be written out
# again in the correct format. This should go away once the CMS is
# online and has imported all current (fall 2012) courses from xml
if not system.resources_fs.exists(filepath):
candidates = cls.backcompat_paths(filepath)
# log.debug("candidates = {0}".format(candidates))
for candidate in candidates:
if system.resources_fs.exists(candidate):
filepath = candidate
break
try:
with system.resources_fs.open(filepath) as file:
html = file.read().decode('utf-8')
# Log a warning if we can't parse the file, but don't error
if not check_html(html) and len(html) > 0:
msg = "Couldn't parse html in {0}, content = {1}".format(filepath, html)
log.warning(msg)
system.error_tracker("Warning: " + msg)
definition = {'data': html}
# TODO (ichuang): remove this after migration
# for Fall 2012 LMS migration: keep filename (and unmangled filename)
definition['filename'] = [filepath, filename]
return definition, []
except (ResourceNotFoundError) as err:
msg = 'Unable to load file contents at path {0}: {1} '.format(
filepath, err)
# add more info and re-raise
raise Exception(msg), None, sys.exc_info()[2]
# TODO (vshnayder): make export put things in the right places.
def definition_to_xml(self, resource_fs):
''' Write <html filename="" [meta-attrs="..."]> to filename.xml, and the html
string to filename.html.
'''
# Write html to file, return an empty tag
pathname = name_to_pathname(self.url_name)
filepath = u'{category}/{pathname}.html'.format(
category=self.category,
pathname=pathname
)
resource_fs.makedir(os.path.dirname(filepath), recursive=True, allow_recreate=True)
with resource_fs.open(filepath, 'w') as filestream:
html_data = self.data.encode('utf-8')
filestream.write(html_data)
# write out the relative name
relname = path(pathname).basename()
elt = etree.Element('html')
elt.set("filename", relname)
return elt
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(HtmlDescriptor, self).non_editable_metadata_fields
non_editable_fields.append(HtmlDescriptor.use_latex_compiler)
return non_editable_fields
class AboutFields(object):
display_name = String(
help="Display name for this module",
scope=Scope.settings,
default="overview",
)
data = String(
help="Html contents to display for this module",
default="",
scope=Scope.content
)
@XBlock.tag("detached")
class AboutModule(AboutFields, HtmlModule):
"""
Overriding defaults but otherwise treated as HtmlModule.
"""
pass
@XBlock.tag("detached")
class AboutDescriptor(AboutFields, HtmlDescriptor):
"""
These pieces of course content are treated as HtmlModules but we need to overload where the templates are located
in order to be able to create new ones
"""
template_dir_name = "about"
module_class = AboutModule
class StaticTabFields(object):
"""
The overrides for Static Tabs
"""
display_name = String(
display_name="Display Name",
help="This name appears in the horizontal navigation at the top of the page.",
scope=Scope.settings,
default="Empty",
)
data = String(
default=textwrap.dedent("""\
<p>This is where you can add additional pages to your courseware. Click the 'edit' button to begin editing.</p>
"""),
scope=Scope.content,
help="HTML for the additional pages"
)
@XBlock.tag("detached")
class StaticTabModule(StaticTabFields, HtmlModule):
"""
Supports the field overrides
"""
pass
@XBlock.tag("detached")
class StaticTabDescriptor(StaticTabFields, HtmlDescriptor):
"""
These pieces of course content are treated as HtmlModules but we need to overload where the templates are located
in order to be able to create new ones
"""
template_dir_name = None
module_class = StaticTabModule
class CourseInfoFields(object):
"""
Field overrides
"""
items = List(
help="List of course update items",
default=[],
scope=Scope.content
)
data = String(
help="Html contents to display for this module",
default="<ol></ol>",
scope=Scope.content
)
@XBlock.tag("detached")
class CourseInfoModule(CourseInfoFields, HtmlModule):
"""
Just to support xblock field overrides
"""
# statuses
STATUS_VISIBLE = 'visible'
STATUS_DELETED = 'deleted'
@XBlock.tag("detached")
class CourseInfoDescriptor(CourseInfoFields, HtmlDescriptor):
"""
These pieces of course content are treated as HtmlModules but we need to overload where the templates are located
in order to be able to create new ones
"""
template_dir_name = None
module_class = CourseInfoModule
|
agpl-3.0
| -4,910,680,462,322,662,000
| 35.329231
| 123
| 0.621665
| false
| 4.247122
| false
| false
| false
|
euphi/homie-esp8266
|
scripts/ota_updater/ota_updater.py
|
1
|
6619
|
#!/usr/bin/env python
from __future__ import division, print_function
import paho.mqtt.client as mqtt
import base64, sys, math
from hashlib import md5
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
if rc != 0:
print("Connection Failed with result code {}".format(rc))
client.disconnect()
else:
print("Connected with result code {}".format(rc))
# calcluate firmware md5
firmware_md5 = md5(userdata['firmware']).hexdigest()
userdata.update({'md5': firmware_md5})
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("{base_topic}{device_id}/$implementation/ota/status".format(**userdata))
client.subscribe("{base_topic}{device_id}/$implementation/ota/enabled".format(**userdata))
client.subscribe("{base_topic}{device_id}/$fw/#".format(**userdata))
# Wait for device info to come in and invoke the on_message callback where update will continue
print("Waiting for device info...")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
# decode string for python2/3 compatiblity
msg.payload = msg.payload.decode()
if msg.topic.endswith('$implementation/ota/status'):
status = int(msg.payload.split()[0])
if userdata.get("published"):
if status == 206: # in progress
# state in progress, print progress bar
progress, total = [int(x) for x in msg.payload.split()[1].split('/')]
bar_width = 30
bar = int(bar_width*(progress/total))
print("\r[", '+'*bar, ' '*(bar_width-bar), "] ", msg.payload.split()[1], end='', sep='')
if (progress == total):
print()
sys.stdout.flush()
elif status == 304: # not modified
print("Device firmware already up to date with md5 checksum: {}".format(userdata.get('md5')))
client.disconnect()
elif status == 403: # forbidden
print("Device ota disabled, aborting...")
client.disconnect()
elif msg.topic.endswith('$fw/checksum'):
checksum = msg.payload
if userdata.get("published"):
if checksum == userdata.get('md5'):
print("Device back online. Update Successful!")
else:
print("Expecting checksum {}, got {}, update failed!".format(userdata.get('md5'), checksum))
client.disconnect()
else:
if checksum != userdata.get('md5'): # save old md5 for comparison with new firmware
userdata.update({'old_md5': checksum})
else:
print("Device firmware already up to date with md5 checksum: {}".format(checksum))
client.disconnect()
elif msg.topic.endswith('ota/enabled'):
if msg.payload == 'true':
userdata.update({'ota_enabled': True})
else:
print("Device ota disabled, aborting...")
client.disconnect()
if ( not userdata.get("published") ) and ( userdata.get('ota_enabled') ) and \
( 'old_md5' in userdata.keys() ) and ( userdata.get('md5') != userdata.get('old_md5') ):
# push the firmware binary
userdata.update({"published": True})
topic = "{base_topic}{device_id}/$implementation/ota/firmware/{md5}".format(**userdata)
print("Publishing new firmware with checksum {}".format(userdata.get('md5')))
client.publish(topic, userdata['firmware'])
def main(broker_host, broker_port, broker_username, broker_password, base_topic, device_id, firmware):
# initialise mqtt client and register callbacks
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
# set username and password if given
if broker_username and broker_password:
client.username_pw_set(broker_username, broker_password)
# save data to be used in the callbacks
client.user_data_set({
"base_topic": base_topic,
"device_id": device_id,
"firmware": firmware
})
# start connection
print("Connecting to mqtt broker {} on port {}".format(broker_host, broker_port))
client.connect(broker_host, broker_port, 60)
# Blocking call that processes network traffic, dispatches callbacks and handles reconnecting.
client.loop_forever()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='ota firmware update scirpt for ESP8226 implemenation of the Homie mqtt IoT convention.')
# ensure base topic always ends with a '/'
def base_topic_arg(s):
s = str(s)
if not s.endswith('/'):
s = s + '/'
return s
# specify arguments
parser.add_argument('-l', '--broker-host', type=str, required=False,
help='host name or ip address of the mqtt broker', default="127.0.0.1")
parser.add_argument('-p', '--broker-port', type=int, required=False,
help='port of the mqtt broker', default=1883)
parser.add_argument('-u', '--broker-username', type=str, required=False,
help='username used to authenticate with the mqtt broker')
parser.add_argument('-d', '--broker-password', type=str, required=False,
help='password used to authenticate with the mqtt broker')
parser.add_argument('-t', '--base-topic', type=base_topic_arg, required=False,
help='base topic of the homie devices on the broker', default="homie/")
parser.add_argument('-i', '--device-id', type=str, required=True,
help='homie device id')
parser.add_argument('firmware', type=argparse.FileType('rb'),
help='path to the firmware to be sent to the device')
# workaround for http://bugs.python.org/issue9694
parser._optionals.title = "arguments"
# get and validate arguments
args = parser.parse_args()
# read the contents of firmware into buffer
fw_buffer = args.firmware.read()
args.firmware.close()
firmware = bytearray()
firmware.extend(fw_buffer)
# Invoke the business logic
main(args.broker_host, args.broker_port, args.broker_username,
args.broker_password, args.base_topic, args.device_id, firmware)
|
mit
| 6,659,864,722,361,300,000
| 41.703226
| 109
| 0.610666
| false
| 4.162893
| false
| false
| false
|
Saluev/cocos2d-gui
|
cocosgui/css/__init__.py
|
1
|
2523
|
__all__ = [
'styles', 'Style', 'CSSNode',
'evaluate'
]
# importing basic names to publish them
from .style import styles, Style
from .node import CSSNode
# importing extensions
import border, borderimage, background, font
import rendering
def evaluate(window, element = None):
if element is None:
element = window
element.evaluate_style()
children = element.get_nodes()
for child in children:
assert(child.parent is element)
evaluate(window, child)
_evaluate_node(element)
def _evaluate_node(node):
parent, style = node.parent, node.evaluated_style
left, bottom = style['left'], style['top']
left = 0 if left == 'auto' else left
bottom = 0 if bottom == 'auto' else bottom
position = style['position']
if position == 'absolute':
raise NotImplementedError
# TODO fixed?
margin_offset = [left, bottom]
border_offset = [margin_offset[0] + style['margin-left'],
margin_offset[1] + style['margin-bottom' ]]
padding_offset = [border_offset[0] + style['border-left-width'],
border_offset[1] + style['border-bottom-width' ]]
content_offset = [padding_offset[0] + style['padding-left'],
padding_offset[1] + style['padding-bottom' ]]
content_box = content_offset + list(node.get_content_size())
padding_box = padding_offset + [sum((
content_box[2],
style['padding-left' ],
style['padding-right' ],
)), sum((
content_box[3],
style['padding-top' ],
style['padding-bottom'],
))]
border_box = border_offset + [sum((
padding_box[2],
style['border-left-width' ],
style['border-right-width' ],
)), sum((
padding_box[3],
style['border-top-width' ],
style['border-bottom-width'],
))]
margin_box = margin_offset + [sum((
border_box[2],
style['margin-left' ],
style['margin-right' ],
)), sum((
border_box[3],
style['margin-top' ],
style['margin-bottom'],
))]
#width, height = style['width'], style['height'] # TODO percentages?
#width = margin_box[2] if width == 'auto' else width
#height = margin_box[3] if height == 'auto' else height
#dw, dh = width - margin_box[2], height - margin_box[3]
#if dw != 0 or dh != 0:
#for box in [margin_box, border_box, padding_box, content_box]:
#box[2] += dw
#box[3] += dh
info = {
'node': node,
'margin_box' : margin_box,
'border_box' : border_box,
'padding_box': padding_box,
'content_box': content_box,
}
node.apply_style(**info)
|
mit
| -5,700,135,919,683,113,000
| 29.768293
| 70
| 0.613555
| false
| 3.337302
| false
| false
| false
|
pclubuiet/website
|
home/views.py
|
1
|
3396
|
from django import views
from django.shortcuts import render, get_object_or_404
from django.views.generic import TemplateView
from django.views.generic.edit import CreateView
from .models import *
from .forms import *
import requests
import http
from django.urls import reverse_lazy
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
class Template404(TemplateView):
template_name = "404.html"
class Home(TemplateView):
template_name = 'home/home.html'
class Topics(views.View):
def get(self, request, *args, **kwargs):
return render(request, "home/resources/topics.html", {'topics': Topic.objects.all()})
class Resources(views.View):
def get(self, request, pk, *args, **kwargs):
topic = get_object_or_404(Topic, pk=pk)
return render(request, "home/resources/resources.html", {'resources': topic.resource_set.all(), 'topic' : topic})
class BlogPostList(views.View):
def get(self, request, *args, **kwargs):
posts = BlogPost.objects.all()
return render(request, "home/blog/index.html", {'posts': posts})
class BlogPostView(views.View):
def get(self, request, pk, *args, **kwargs):
post = get_object_or_404(BlogPost, pk=pk)
return render(request, "home/blog/blog_post.html", {'post': post})
class Leaderboard(views.View):
def get(self, request, *args, **kwargs):
users = Users.objects.all()
for user in users:
connected = False
while not connected:
try:
user_name = user.github_handle
response = requests.get('https://api.github.com/search/issues?sort=created&q=author:{}&type:pr&per_page=100'.format(user_name), verify = False).json()
pr_count = 0
print(response)
for obj in response['items']:
if('pull_request' in obj):
if('2018-09-30T00:00:00Z'<obj['created_at']<'2018-10-31T23:59:59Z'):
pr_count += 1
user.pr_count = pr_count
user.save()
connected = True
except:
pass
return render(request, 'home/leaderboard.html', {'users': users})
class RegisterUser(CreateView):
form_class = RegisterUserForm
template_name = "home/registeruser.html"
success_url = reverse_lazy('home:home')
@csrf_exempt
def GithubEmailCheck(request):
github_handle = request.POST.get('github_handle')
email = request.POST.get('email')
print("Received ", github_handle)
users = Users.objects.all()
for user in users:
if user.github_handle == github_handle:
return JsonResponse({'message' : 'Duplicate Github Handle'})
if user.email == email:
return JsonResponse({'message' : 'Duplicate Email'})
return JsonResponse({'message' : 'New'})
@csrf_exempt
def GithubCheck(request):
github_handle = request.POST.get('github_handle')
response = requests.get("https://api.github.com/users/{}".format(github_handle), verify = False).json()
print("https://api.github.com/users/{}".format(github_handle))
if ('login' in response):
print("Found")
return JsonResponse({'message' : 'Found'})
else:
return JsonResponse({'message' : 'Not Found'})
|
gpl-3.0
| -1,718,221,211,592,258,300
| 38.045977
| 170
| 0.620436
| false
| 3.876712
| false
| false
| false
|
brain-research/acai
|
lib/eval.py
|
1
|
4490
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lib import data
import numpy as np
import scipy.spatial
def closest_line(query_lines, metric='cosine'):
"""Compute the distance to, and parameters for, the closest line to each
line in query_lines.
Args:
- query_lines: Array of lines to compute closest matches for, shape
(n_lines, width, height, 1)
- metric: String to pass to scipy.spatial.distance.cdist to choose
which distance metric to use
Returns:
- min_dist, starts, ends: Arrays of shape (n_lines,) denoting the
distance to the nearest ``true'' line and the start and end points.
"""
h, w = query_lines.shape[1:-1]
# Construct 10000 lines with these dimensions
angles = np.linspace(0, 2*np.pi - 2*np.pi/10000, 10000)
all_lines = np.array(
[(data.draw_line(angle, h, w)) for angle in angles])
# Produce vectorized versions of both for use with scipy.spatial
flat_query = query_lines.reshape(query_lines.shape[0], -1)
flat_all = all_lines.reshape(all_lines.shape[0], -1)
# Compute pairwise distance matrix of query lines with all valid lines
distances = scipy.spatial.distance.cdist(flat_query, flat_all, metric)
min_dist_idx = np.argmin(distances, axis=-1)
min_dist = distances[np.arange(distances.shape[0]), min_dist_idx]
angles = np.array([angles[n] for n in min_dist_idx])
return min_dist, angles
def smoothness_score(angles):
"""Computes the smoothness score of a line interpolation according to the
angles of each line.
Args:
- angles: Array of shape (n_interpolations, n_lines_per_interpolation)
giving the angle of each line in each interpolation.
Returns:
- smoothness_scores: Array of shape (n_interpolations,) giving the
average smoothness score for all of the provided interpolations.
"""
angles = np.atleast_2d(angles)
# Remove discontinuities larger than np.pi
angles = np.unwrap(angles)
diffs = np.abs(np.diff(angles, axis=-1))
# Compute the angle difference from the first and last point
total_diff = np.abs(angles[:, :1] - angles[:, -1:])
# When total_diff is zero, there's no way to compute this score
zero_diff = (total_diff < 1e-4).flatten()
normalized_diffs = diffs/total_diff
deviation = np.max(normalized_diffs, axis=-1) - 1./(angles.shape[1] - 1)
# Set score to NaN when we aren't able to compute it
deviation[zero_diff] = np.nan
return deviation
def line_eval(interpolated_lines):
"""Given a group of line interpolations, compute mean nearest line distance
and mean smoothness score for all of the interpolations.
This version of this metric is meant for vertical lines only.
Args:
- interpolated_lines: Collection of line interpolation images, shape
(n_interpolations, n_lines_per_interpolation, height, width, 1)
Returns:
- mean_distance: Average distance to closest ``real'' line.
- mean_smoothness: Average interpolation smoothness
"""
original_shape = interpolated_lines.shape
min_dist, angles = closest_line(
interpolated_lines.reshape((-1,) + original_shape[2:]))
mean_distance = np.mean(min_dist)
smoothness_scores = smoothness_score(
angles.reshape(original_shape[0], original_shape[1]))
nan_scores = np.isnan(smoothness_scores)
# If all scores were NaN, set the mean score to NaN
if np.all(nan_scores):
mean_smoothness = np.nan
# Otherwise only compute mean for non-NaN scores
else:
sum_smoothness = np.sum(smoothness_scores[np.logical_not(nan_scores)])
mean_smoothness = sum_smoothness/float(len(nan_scores))
return np.float32(mean_distance), np.float32(mean_smoothness)
|
apache-2.0
| 3,405,448,565,287,546,400
| 39.089286
| 79
| 0.688641
| false
| 3.824532
| false
| false
| false
|
bklakew/OpenAgClassifier
|
src/model/server.py
|
1
|
6199
|
"""
# Copyright 2017 Foundation Center. All Rights Reserved.
#
# Licensed under the Foundation Center Public License, Version 1.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://gis.foundationcenter.org/licenses/LICENSE-1.0.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
from base.prediction import Predictor
from base.model import TextClassifier
from nltk.data import load
from base.database import MySqlDataBase
from base.graph import run, bfs
from base import config as c
import json
import os
import time
import warnings
import itertools
from concurrent.futures import ThreadPoolExecutor, as_completed
from flask import Flask, request, Response
from flask_cors import cross_origin
app = Flask(__name__)
warnings.simplefilter("ignore", UserWarning)
def _load_to_memory(name, level):
clf = TextClassifier()
clf.load(path='model/clf_data/', name=name, in_db=False)
del clf.doc_ids
return Predictor(classifier=clf, high_t=c.high_threshold[level], low_t=c.low_threshold[level])
def _get_lookup():
db = MySqlDataBase(c.db)
query = """
SELECT Code, ifnull(ifnull(ifnull(ifnull(ifnull(L7, L6), L5), L4), L3), L2) AS `description`
FROM (
SELECT Code, nullif(L7, '') AS L7, nullif(L6, '') AS L6, nullif(L5, '') AS L5
, nullif(L4, '') AS L4, nullif(L3, '') AS L3, nullif(L2, '') AS L2
, nullif(L1, '') AS L1
FROM agrovoc_autocode.agrovoc_terms
WHERE `Use?` = 'Y'
) as a
"""
db.execute(query)
d = {}
for row in db.cursor:
code = row["Code"].strip()
description = row["description"].strip()
d[code] = description
db.teardown()
return d
def _validate(js, k):
return isinstance(js, dict) and k in js
print("[INFO] Loading AGROVOC classifiers")
p1 = _load_to_memory(name='hierarchy_1_76021167-b4ce-463d-bab0-bc7fb044b74b', level=1)
p2 = _load_to_memory(name='hierarchy_2_2fd8b6a0-6786-42ef-9eea-66ea02a1dfdd', level=2)
p3 = _load_to_memory(name='hierarchy_3_2b946288-5eeb-4d35-a1fe-6987c118c3b5', level=3)
p4 = _load_to_memory(name='hierarchy_4_3e787d47-5183-4df2-ba4b-509926f029d3', level=4)
lookup = _get_lookup()
graph = run(MySqlDataBase(c.db))
sentence_detector = load("tokenizers/punkt/english.pickle")
def taxonomy_rollup(results):
"""
Does the taxonomy rollup using a graph breadth-first-search
algorithm
:param results: (list of dictionaries)
:return: (list of dictionaries)
"""
all_codes = set([r["code"] for r in results])
to_keep = set()
node_check = all_codes - to_keep
for n in node_check:
to_keep.add(n)
k = bfs(graph=graph, start=n, to_check=node_check, keep=to_keep)
to_keep.add(k)
return [r for r in results if r["code"] in to_keep if r["code"] is not None]
@app.route('/predict', methods=['POST', 'GET'])
@cross_origin(origin='*', headers=['Content-Type', 'Authorization'])
def predict():
"""
Single text predictions
:return: (JSON)
"""
j = request.get_json()
if j is None:
j = request.args
if not j:
j = request.form
if _validate(j, 'text'):
st = time.time()
text = j['text']
threshold = 0
chunk = False
if 'chunk' in j and j['chunk'].lower() == 'true':
text = [sub for sent in sentence_detector.tokenize(text) for sub in sent.split(';')]
chunk = True
if 'threshold' in j and j['threshold'] == 'high':
threshold = 1
# get all predictions, for every hierarchy asynchronously
results = []
with ThreadPoolExecutor(max_workers=4) as executor:
future_results = {executor.submit(func, (text, lookup, threshold)): idx + 1
for idx, func in enumerate([p1.predict,
p2.predict,
p3.predict,
p4.predict
])}
for future in as_completed(future_results):
results.extend(future.result())
# resolve duplication that arises due to chunking (accept the result with the maximum confidence per class)
if chunk:
results_sort = sorted(results, key=lambda x: (x["code"], x["confidence"]))
grouped = itertools.groupby(results_sort, lambda s: s["code"])
results = [max(v, key=lambda x: x["confidence"]) for k, v in grouped]
# add logic to toggle the agrovoc graph roll up on and off
if 'roll_up' in j and j['roll_up'].lower() == 'false':
agg = [r for r in results if r["code"] is not None]
else:
agg = taxonomy_rollup(results)
if not agg:
agg = [{"code": None, "description": None, "confidence": 0.0}]
agg = sorted(agg, key=lambda s: s["confidence"], reverse=True)
return Response(response=json.dumps({"success": True, "duration": time.time() - st, "data": agg}, indent=4),
status=200,
mimetype='application/json')
return Response(response=json.dumps({"success": False, "status": "Incorrect parameters"}, indent=4),
status=404,
mimetype='application/json')
if __name__ == '__main__':
debug = os.environ.get('DEBUG', False)
port = os.environ.get('PORT', 9091)
testing = os.environ.get('TESTING', False)
app.run(host='0.0.0.0', port=port, debug=debug)
|
mpl-2.0
| 8,984,252,178,927,677,000
| 33.786127
| 116
| 0.581812
| false
| 3.591067
| false
| false
| false
|
snipsco/snipsskills
|
snipsmanager/commands/setup/systemd/snipsmanager.py
|
1
|
1887
|
# -*-: coding utf-8 -*-
import os
import time
from ...base import Base
from ....utils.os_helpers import is_raspi_os, which
from ....utils.systemd import Systemd
from .... import DEFAULT_SNIPSFILE_PATH
from snipsmanagercore import pretty_printer as pp
class SystemdSnipsManagerException(Exception):
pass
class SystemdSnipsManager(Base):
SNIPSMANAGER_SERVICE_NAME = "snipsmanager"
SNIPSMANAGER_COMMAND = "snipsmanager"
def run(self):
snipsfile_path = self.options['--snipsfile_path'] or os.getcwd()
try:
SystemdSnipsManager.setup(snipsfile_path=snipsfile_path)
except Exception as e:
pp.perror(str(e))
@staticmethod
def setup(snipsfile_path=None):
pp.pcommand("Setting up Snips Manager as a Systemd service")
snipsfile_path = snipsfile_path or DEFAULT_SNIPSFILE_PATH
working_directory = os.path.dirname(snipsfile_path)
if not is_raspi_os():
raise SystemdSnipsManagerException("Snips Systemd configuration is only available on Raspberry Pi. Skipping Systemd setup")
snipsmanager_path = which('snipsmanager')
if snipsmanager_path is None:
raise SystemdSnipsManagerException("Error: cannot find command 'snipsmanager' on the system. Make sure the Snips Manager CLI is correctly installed. Skipping Systemd setup")
contents = Systemd.get_template(SystemdSnipsManager.SNIPSMANAGER_SERVICE_NAME)
contents = contents.replace("{{SNIPSMANAGER_COMMAND}}", snipsmanager_path)
contents = contents.replace("{{WORKING_DIRECTORY}}", working_directory)
Systemd.write_systemd_file(SystemdSnipsManager.SNIPSMANAGER_SERVICE_NAME, None, contents)
Systemd.enable_service(None, SystemdSnipsManager.SNIPSMANAGER_SERVICE_NAME)
pp.psuccess("Successfully set up Snips Manager as a Systemd service")
|
mit
| 9,124,192,898,552,893,000
| 36
| 185
| 0.711182
| false
| 3.614943
| false
| false
| false
|
haricot/djangocms-bs4forcascade
|
cmsplugin_bs4forcascade/bootstrap4/utils.py
|
1
|
11099
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from collections import OrderedDict
from django.forms import widgets
from cmsplugin_cascade import app_settings
from cmsplugin_cascade.plugin_base import CascadePluginBase
from cmsplugin_cascade.utils import compute_aspect_ratio, get_image_size, parse_responsive_length
__all__ = ['reduce_breakpoints', 'compute_media_queries', 'get_image_tags', 'get_picture_elements',
'get_widget_choices']
logger = logging.getLogger('cascade')
BS4_BREAKPOINTS = OrderedDict(app_settings.CMSPLUGIN_CASCADE['bootstrap4']['breakpoints'])
BS4_BREAKPOINT_KEYS = list(tp[0] for tp in app_settings.CMSPLUGIN_CASCADE['bootstrap4']['breakpoints'])
def get_widget_choices():
breakpoints = list(BS4_BREAKPOINTS)
i = 0
widget_choices = []
for br, br_options in BS4_BREAKPOINTS.items():
if i == 0:
widget_choices.append((br, '{} (<{}px)'.format(br_options[2], br_options[0])))
elif i == len(breakpoints[:-1]):
widget_choices.append((br, '{} (≥{}px)'.format(br_options[2], br_options[0])))
else:
widget_choices.append((br, '{} (≥{}px and <{}px)'.format(br_options[2], br_options[0], BS4_BREAKPOINTS[breakpoints[(i + 1)]][0])))
i += 1
return widget_choices
def reduce_breakpoints(plugin, field_name, request=None, obj=None):
"""
Narrow down the number of breakpoints in the widget of the named glossary_field. This is useful
in case the container was defined with a subset of these breakpoints: xs, sm, md, lg.
"""
if not isinstance(plugin, CascadePluginBase):
raise ValueError('Plugin is not of type CascadePluginBase')
parent_instance = plugin.get_parent_instance(request, obj)
if not parent_instance:
return
complete_glossary = parent_instance.get_complete_glossary()
if 'breakpoints' not in complete_glossary:
return
try:
# find the glossary_field named field_name and restrict its breakpoint to the available ones
widget = [f for f in plugin.glossary_fields if f.name == field_name][0].widget
except IndexError:
return
if not isinstance(widget, widgets.MultiWidget):
raise ValueError('Widget for glossary_field {0} is not a multiple value field')
temp = [(l, widget.widgets[k]) for k, l in enumerate(widget.labels) if l in complete_glossary['breakpoints']]
widget.labels, widget.widgets = (list(t) for t in zip(*temp))
def compute_media_queries(element):
"""
For e given Cascade element, compute the current media queries for each breakpoint,
even for nested containers, rows and columns.
"""
parent_glossary = element.get_parent_glossary()
# compute the max width and the required media queries for each chosen breakpoint
element.glossary['container_max_widths'] = max_widths = {}
element.glossary['media_queries'] = media_queries = {}
breakpoints = element.glossary.get('breakpoints', parent_glossary.get('breakpoints', []))
last_index = len(breakpoints) - 1
fluid = element.glossary.get('fluid')
for index, bp in enumerate(breakpoints):
try:
key = 'container_fluid_max_widths' if fluid else 'container_max_widths'
max_widths[bp] = parent_glossary[key][bp]
except KeyError:
max_widths[bp] = BS4_BREAKPOINTS[bp][4 if fluid else 3]
if last_index > 0:
if index == 0:
next_bp = breakpoints[1]
media_queries[bp] = ['(max-width: {0}px)'.format(BS4_BREAKPOINTS[next_bp][0])]
elif index == last_index:
media_queries[bp] = ['(min-width: {0}px)'.format(BS4_BREAKPOINTS[bp][0])]
else:
next_bp = breakpoints[index + 1]
media_queries[bp] = ['(min-width: {0}px)'.format(BS4_BREAKPOINTS[bp][0]),
'(max-width: {0}px)'.format(BS4_BREAKPOINTS[next_bp][0])]
def get_image_tags(context, instance, options):
"""
Create a context returning the tags to render an <img ...> element:
``sizes``, ``srcset``, a fallback ``src`` and if required inline styles.
"""
try:
aspect_ratio = compute_aspect_ratio(instance.image)
except Exception as e:
# if accessing the image file fails, abort here
return
is_responsive = options.get('is_responsive', False)
resize_options = options.get('resize_options', {})
crop = 'crop' in resize_options
upscale = 'upscale' in resize_options
subject_location = instance.image.subject_location if 'subject_location' in resize_options else False
resolutions = (False, True) if 'high_resolution' in resize_options else (False,)
tags = {'sizes': [], 'srcsets': {}, 'is_responsive': is_responsive, 'extra_styles': {}}
if is_responsive:
image_width = parse_responsive_length(options.get('image_width_responsive') or '100%')
assert(image_width[1]), "The given image has no valid width"
if image_width[1] != 1.0:
tags['extra_styles'].update({'max-width': '{:.0f}%'.format(100 * image_width[1])})
else:
image_width = parse_responsive_length(options['image_width_fixed'])
if not image_width[0]:
image_width = (instance.image.width, image_width[1])
try:
image_height = parse_responsive_length(options['image_height'])
except KeyError:
image_height = (None, None)
set_defaults(options)
if is_responsive:
max_width = 0
for bp in options['breakpoints']:
if bp not in options['container_max_widths']:
continue
width = int(image_width[1] * options['container_max_widths'][bp])
max_width = max(max_width, width)
size = get_image_size(width, image_height, aspect_ratio)
if bp in options['media_queries']:
tags['sizes'].append('{0} {1}px'.format(' and '.join(options['media_queries'][bp]), width))
for high_res in resolutions:
if high_res:
size = (size[0] * 2, size[1] * 2)
key = '{0}w'.format(size[0])
tags['srcsets'][key] = {'size': size, 'crop': crop, 'upscale': upscale,
'subject_location': subject_location}
# use an existing image as fallback for the <img ...> element
if not max_width > 0:
logger.warning('image tags: image max width is zero')
size = (int(round(max_width)), int(round(max_width * aspect_ratio)))
else:
size = get_image_size(image_width[0], image_height, aspect_ratio)
if len(resolutions) > 1:
for high_res in resolutions:
if high_res:
tags['srcsets']['2x'] = {'size': (size[0] * 2, size[1] * 2), 'crop': crop,
'upscale': upscale, 'subject_location': subject_location}
else:
tags['srcsets']['1x'] = {'size': size, 'crop': crop,
'upscale': upscale, 'subject_location': subject_location}
tags['src'] = {'size': size, 'crop': crop, 'upscale': upscale,
'subject_location': subject_location}
return tags
def set_defaults(options):
options.setdefault('breakpoints', ['xs', 'sm', 'md', 'lg', 'xl'])
options.setdefault('container_max_widths', {'xs': 576, 'sm': 767, 'md': 991, 'lg': 1199, 'xl': 1980})
options.setdefault('fluid', False)
options.setdefault('media_queries', {
'xs': ['(max-width: 576px)'],
'sm': ['(min-width: 576px)', '(max-width: 767px)'],
'md': ['(min-width: 768px)', '(max-width: 991px)'],
'lg': ['(min-width: 992px)','(max-width: 1199px)'],
'xl': ['(min-width: 1200px)'],
})
def get_picture_elements(context, instance):
"""
Create a context, used to render a <picture> together with all its ``<source>`` elements:
It returns a list of HTML elements, each containing the information to render a ``<source>``
element.
The purpose of this HTML entity is to display images with art directions. For normal images use
the ``<img>`` element.
"""
if not instance.image:
return
complete_glossary = instance.get_complete_glossary()
aspect_ratio = compute_aspect_ratio(instance.image)
container_max_heights = complete_glossary.get('container_max_heights', {})
resize_options = instance.glossary.get('resize_options', {})
crop = 'crop' in resize_options
upscale = 'upscale' in resize_options
subject_location = instance.image.subject_location if 'subject_location' in resize_options else False
max_width = 0
max_zoom = 0
elements = []
for bp in complete_glossary['breakpoints']:
try:
width = float(complete_glossary['container_max_widths'][bp])
except KeyError:
width = 0
max_width = max(max_width, round(width))
size = None
try:
image_height = parse_responsive_length(instance.glossary['responsive_heights'][bp])
except KeyError:
image_height = (None, None)
if image_height[0]: # height was given in px
size = (int(width), image_height[0])
elif image_height[1]: # height was given in %
size = (int(width), int(round(width * aspect_ratio * image_height[1])))
elif bp in container_max_heights:
container_height = parse_responsive_length(container_max_heights[bp])
if container_height[0]:
size = (int(width), container_height[0])
elif container_height[1]:
size = (int(width), int(round(width * aspect_ratio * container_height[1])))
try:
zoom = int(
instance.glossary['responsive_zoom'][bp].strip().rstrip('%')
)
except (AttributeError, KeyError, ValueError):
zoom = 0
max_zoom = max(max_zoom, zoom)
if size is None:
# as fallback, adopt height to current width
size = (int(width), int(round(width * aspect_ratio)))
try:
media_queries = complete_glossary['media_queries'][bp][:]
except KeyError:
media_queries = []
media = ' and '.join(media_queries)
elem = {'tag': 'source', 'size': size, 'zoom': zoom, 'crop': crop,
'upscale': upscale, 'subject_location': subject_location, 'media': media}
if 'high_resolution' in resize_options:
elem['size2'] = (size[0] * 2, size[1] * 2)
elements.append(elem)
# add a fallback image for old browsers which can't handle the <picture> element
if image_height[1]:
size = (int(max_width), int(round(max_width * aspect_ratio * image_height[1])))
else:
size = (int(max_width), int(round(max_width * aspect_ratio)))
elements.append({'tag': 'img', 'size': size, 'zoom': max_zoom, 'crop': crop,
'upscale': upscale, 'subject_location': subject_location})
return elements
|
mit
| -605,115,729,172,687,700
| 45.422594
| 142
| 0.605588
| false
| 3.688497
| false
| false
| false
|
lcpt/xc
|
verif/tests/elements/shell/test_shell_mitc4_11.py
|
1
|
3573
|
# -*- coding: utf-8 -*-
''' Taken from example 2-005 of the SAP 2000 verification manual.'''
# The obtained error is near 1.8% it can be the aspect ratio
# of the element. See comments on page EXAMPLE 2-005 - 7
# in the SAP 2000 manual.
__author__= "Luis C. Pérez Tato (LCPT) and Ana Ortega (AOO)"
__copyright__= "Copyright 2015, LCPT and AOO"
__license__= "GPL"
__version__= "3.0"
__email__= "l.pereztato@gmail.com"
# feProblem.setVerbosityLevel(0)
NumDivI= 32
NumDivJ= 32
CooMaxX= 10
CooMaxY= 2
E= 17472000 # Elastic modulus en lb/in2
nu= 0.3 # Poisson's ratio
G= 6720000
thickness= 0.0001 # Cross section depth expressed in inches.
unifLoad= 0.0001 # Uniform load in lb/in2.
ptLoad= 0.0004 # Punctual load in lb.
import xc_base
import geom
import xc
from solution import predefined_solutions
from model import predefined_spaces
from materials import typical_materials
# Problem type
feProblem= xc.FEProblem()
preprocessor= feProblem.getPreprocessor
nodes= preprocessor.getNodeHandler
modelSpace= predefined_spaces.StructuralMechanics3D(nodes)
# Define materials
elast= typical_materials.defElasticMaterial(preprocessor, "elast",E)
nodes.newSeedNode()
# Define materials
nmb1= typical_materials.defElasticMembranePlateSection(preprocessor, "memb1",E,nu,0.0,thickness)
seedElemHandler= preprocessor.getElementHandler.seedElemHandler
seedElemHandler.defaultMaterial= "memb1"
seedElemHandler.defaultTag= 1
elem= seedElemHandler.newElement("ShellMITC4",xc.ID([0,0,0,0]))
points= preprocessor.getMultiBlockTopology.getPoints
pt= points.newPntIDPos3d(1,geom.Pos3d(0.0,0.0,0.0))
pt= points.newPntIDPos3d(2,geom.Pos3d(CooMaxX,0.0,0.0))
pt= points.newPntIDPos3d(3,geom.Pos3d(CooMaxX,CooMaxY,0.0))
pt= points.newPntIDPos3d(4,geom.Pos3d(0.0,CooMaxY,0.0))
surfaces= preprocessor.getMultiBlockTopology.getSurfaces
surfaces.defaultTag= 1
s= surfaces.newQuadSurfacePts(1,2,3,4)
s.nDivI= NumDivI
s.nDivJ= NumDivJ
# Constraints
f1= preprocessor.getSets.getSet("f1")
f1.genMesh(xc.meshDir.I)
sides= s.getEdges
#Edge iterator
for l in sides:
for i in l.getEdge.getNodeTags():
modelSpace.fixNode000_000(i)
# Loads definition
loadHandler= preprocessor.getLoadHandler
lPatterns= loadHandler.getLoadPatterns
#Load modulation.
ts= lPatterns.newTimeSeries("constant_ts","ts")
lPatterns.currentTimeSeries= "ts"
#Load case definition
lp0= lPatterns.newLoadPattern("default","0")
#lPatterns.currentLoadPattern= "0"
f1= preprocessor.getSets.getSet("f1")
nNodes= f1.getNumNodes
node= f1.getNodeIJK(1,NumDivI/2+1,NumDivJ/2+1)
# print "Central node: ", node.tag
# print "Central node coordinates: ", node.getCoo
lp0.newNodalLoad(node.tag,xc.Vector([0,0,-ptLoad,0,0,0])) # Concentrated load
nElems= f1.getNumElements
#We add the load case to domain.
lPatterns.addToDomain("0")
# Solution procedure
analisis= predefined_solutions.simple_static_linear(feProblem)
analOk= analisis.analyze(1)
f1= preprocessor.getSets.getSet("f1")
nodes= preprocessor.getNodeHandler
node= f1.getNodeIJK(1,NumDivI/2+1,NumDivJ/2+1)
# print "Central node: ", node.tag
# print "Central node coordinates: ", node.getCoo
# print "Central node displacements: ", node.getDisp
UZ= node.getDisp[2]
UZTeor= -7.25
ratio1= (abs((UZ-UZTeor)/UZTeor))
ratio2= (abs((nElems-1024)/1024))
'''
print "UZ= ",UZ
print "Number of nodes: ",nNodes
print "Number of elements: ",nElems
print "ratio1: ",ratio1
'''
import os
from miscUtils import LogMessages as lmsg
fname= os.path.basename(__file__)
if (abs(ratio1)<2e-2) & (abs(ratio2)<1e-9):
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
|
gpl-3.0
| -1,432,100,207,234,984,400
| 26.060606
| 96
| 0.755879
| false
| 2.710167
| false
| false
| false
|
pdelsante/thug
|
thug/Analysis/virustotal/VirusTotal.py
|
1
|
3786
|
#!/usr/bin/env python
#
# VirusTotal.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
import os
import json
import tempfile
import logging
import requests
import six.moves.configparser as ConfigParser
log = logging.getLogger("Thug")
class VirusTotal(object):
def __init__(self):
self.enabled = True
self.opts = dict()
self.__init_config()
def __init_config(self):
conf_file = os.path.join(log.configuration_path, 'thug.conf')
if not os.path.exists(conf_file):
log.warning("[WARNING] VirusTotal disabled (no configuration file found)")
self.enabled = False
return
config = ConfigParser.ConfigParser()
config.read(conf_file)
for option in config.options('virustotal'):
self.opts[option] = config.get('virustotal', option)
runtime_apikey = log.ThugOpts.get_vt_runtime_apikey()
if runtime_apikey:
self.opts['apikey'] = runtime_apikey
if not self.opts.get('apikey', None):
self.enabled = False
def save_report(self, response_dict, basedir, sample):
log_dir = os.path.join(basedir, 'analysis', 'virustotal')
content = json.dumps(response_dict)
log.ThugLogging.log_virustotal(log_dir, sample, content)
positives = str(response_dict.get("positives", {}))
total = str(response_dict.get("total", {}))
log.warning("[VirusTotal] Sample %s analysis ratio: %s/%s", response_dict['md5'], positives, total)
def get_report(self, report):
params = { "resource": report,
"allinfo" : 1,
"apikey" : self.opts['apikey']}
response = requests.get(self.opts["reporturl"], params = params)
return response
def query(self, sample, basedir):
md5 = sample['md5']
response = self.get_report(md5)
response_dict = response.json()
response_code = response_dict.get(u"response_code")
if response.ok:
if response_code == 1:
self.save_report(response_dict, basedir, sample)
return True
log.warning("[VirusTotal] %s", response_dict['verbose_msg'])
return False
def submit(self, data, sample):
md5 = sample['md5']
fd, s = tempfile.mkstemp()
with open(s, "wb") as fd:
fd.write(data)
params = {'apikey': self.opts['apikey']}
files = {'file' : (md5, open(s, "rb"))}
response = requests.post(self.opts["scanurl"], files = files, params = params)
if response.ok:
log.warning("[VirusTotal] Sample %s submitted", md5)
os.remove(s)
def analyze(self, data, sample, basedir):
if not self.enabled:
return
if not self.opts['apikey']:
return
if sample.get('md5', None) and log.ThugOpts.vt_query and self.query(sample, basedir):
return
if log.ThugOpts.vt_submit:
self.submit(data, sample)
|
gpl-2.0
| 2,766,726,902,379,941,000
| 30.084746
| 107
| 0.591125
| false
| 3.935551
| true
| false
| false
|
vidartf/hyperspyUI
|
hyperspyui/uiprogressbar.py
|
1
|
10235
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 The HyperSpyUI developers
#
# This file is part of HyperSpyUI.
#
# HyperSpyUI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpyUI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpyUI. If not, see <http://www.gnu.org/licenses/>.
"""
Created on Wed Nov 26 19:11:19 2014
@author: Vidar Tonaas Fauske
"""
from __future__ import division, absolute_import
# future division is important to divide integers and get as
# a result precise floating numbers (instead of truncated int)
# import compatibility functions and utilities
import sys
from time import time
from QtCore import QObject, Signal, SIGNAL
import hyperspy.external.progressbar
from tqdm import tqdm
from hyperspyui.exceptions import ProcessCanceled
# Create signal object which will handle all events
signaler = QObject()
signaler.created = Signal(object)
signaler.progress = Signal((object, int), (object, int, str))
signaler.finished = Signal(int)
signaler.cancel = Signal(int)
# This is necessary as it bugs out if not (it's a daisy chained event)
def _on_cancel(pid):
signaler.emit(SIGNAL('cancel(int)'), pid)
signaler.on_cancel = _on_cancel
# Hook function
def _wrap(*args, **kwargs):
"""
Replacement function for hyperspy.external.progressbar.progressbar().
Causes a UIProgressBar() to be made, which the MainWindow can connect to
in order to create a progress indicator. It is important that the
connection is made with QtCore.Signals, as they are thread aware, and the
signal is processed on the GUI main event loop, i.e. the main thread. This
is necessary as all UI operations have to happen on the main thread, and
the hyperspy processing might be pushed to a worker thread "threaded.py".
"""
return UIProgressBar(*args, **kwargs)
# Override hyperspy prgoressbar implementation
orig = hyperspy.external.progressbar.progressbar
def takeover_progressbar():
"""
Replace hyperspy.external.progressbar.progressbar() with uiprogressbar.wrap().
The main_window will be connected to all the events whenever a progressbar
is created.
"""
hyperspy.external.progressbar.progressbar = _wrap
def reset_progressbar():
hyperspy.external.progressbar.progressbar = orig
class UIProgressBar(tqdm):
"""
Connector between hyperspy process with a progressbar, and the UI. See also
the doc for wrap() for more details.
"""
uid = 1
@classmethod
def write(cls, s, file=sys.stdout, end="\n"):
"""
Print a message via tqdm_gui (just an alias for print)
"""
# TODO: print text on GUI?
file.write(s)
file.write(end)
def __init__(self, *args, **kwargs):
self.id = self.uid
self.uid += 1
kwargs['gui'] = True
self.cancelled = False
super().__init__(*args, **kwargs)
# Initialize the GUI display
if self.disable or not kwargs['gui']:
return
self.mininterval = max(self.mininterval, 0.5)
# assert maxval >= 0
# self.maxval = maxval
self.signal_set = False
global signaler
signaler.connect(signaler, SIGNAL('cancel(int)'),
self.cancel)
self.currval = 0
self.finished = False
self.start_time = None
self.seconds_elapsed = 0
signaler.emit(SIGNAL('created(int, int, QString)'), self.id,
self.total, "")
def cancel(self, pid):
"""
Slot for the UI to call if it wants to cancel the process. Thread safe.
"""
if pid == self.id:
self.cancelled = True
@staticmethod
def format_string(n, total, elapsed, rate=None):
return "ETA: " + (tqdm.format_interval((total - n) / rate)
if rate else '?')
def __iter__(self):
iterable = self.iterable
if self.disable:
for obj in iterable:
if self.cancelled is True:
raise ProcessCanceled("User cancelled operation")
yield obj
return
# ncols = self.ncols
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
start_t = self.start_t
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
# dynamic_ncols = self.dynamic_ncols
smoothing = self.smoothing
avg_time = self.avg_time
for obj in iterable:
if self.cancelled is True:
raise ProcessCanceled("User cancelled operation")
yield obj
# Update and print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
delta_it = n - last_print_n
# check the counter first (avoid calls to time())
if delta_it >= miniters:
cur_t = time()
delta_t = cur_t - last_print_t
if delta_t >= mininterval:
elapsed = cur_t - start_t
# EMA (not just overall average)
if smoothing and delta_t:
avg_time = delta_t / delta_it \
if avg_time is None \
else smoothing * delta_t / delta_it + \
(1 - smoothing) * avg_time
txt = self.format_string(
n, self.total, elapsed,
1 / avg_time if avg_time else None)
global signaler
signaler.emit(SIGNAL('progress(int, int, QString)'),
self.id, n, txt)
# If no `miniters` was specified, adjust automatically
# to the maximum iteration rate seen so far.
if dynamic_miniters:
if maxinterval and delta_t > maxinterval:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif mininterval and delta_t:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
miniters = smoothing * delta_it * mininterval \
/ delta_t + (1 - smoothing) * miniters
else:
miniters = smoothing * delta_it + \
(1 - smoothing) * miniters
# Store old values for next call
last_print_n = n
last_print_t = cur_t
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.close()
def update(self, n=1):
"""
Updates the progress bar to a new value. Called by the hyperspy side.
Not safe to call from UI.
"""
if self.disable:
return
if self.cancelled is True:
raise ProcessCanceled("User cancelled operation")
if n < 0:
n = 1
self.n += n
delta_it = self.n - self.last_print_n # should be n?
if delta_it >= self.miniters:
# We check the counter first, to reduce the overhead of time()
cur_t = time()
delta_t = cur_t - self.last_print_t
if delta_t >= self.mininterval:
elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t:
self.avg_time = delta_t / delta_it \
if self.avg_time is None \
else self.smoothing * delta_t / delta_it + \
(1 - self.smoothing) * self.avg_time
txt = self.format_string(
self.n, self.total, elapsed,
1 / self.avg_time if self.avg_time else None)
global signaler
signaler.emit(SIGNAL('progress(int, int, QString)'),
self.id, self.n, txt)
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t > self.maxinterval:
self.miniters = self.miniters * self.maxinterval \
/ delta_t
elif self.mininterval and delta_t:
self.miniters = self.smoothing * delta_it \
* self.mininterval / delta_t + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = self.smoothing * delta_it + \
(1 - self.smoothing) * self.miniters
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
if self.disable:
return
self.disable = True
self.finish()
self._instances.remove(self)
def finish(self):
"""
Used to tell the progress is finished. Called by hyperspy side.
"""
global signaler
signaler.emit(SIGNAL('finished(int)'), self.id)
|
gpl-3.0
| -3,904,834,961,505,757,700
| 34.538194
| 82
| 0.557792
| false
| 4.311289
| false
| false
| false
|
umax/diabetto2
|
category/views.py
|
1
|
1429
|
# -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse_lazy
from django.views.generic import (ListView, DetailView, CreateView,
DeleteView, UpdateView)
from . import forms
from . import models
__all__ = (
'CategoryIndexView',
'CategoryDetailView',
'CategoryCreateView',
'CategoryDeleteView',
'CategoryUpdateView',
)
class CategoryIndexView(ListView):
context_object_name = 'categories'
template_name = 'category/index.html'
def get_queryset(self):
return models.Category.objects.all().prefetch_related('products')
class CategoryDetailView(DetailView):
context_object_name = 'category'
template_name = 'category/detail.html'
def get_queryset(self):
return models.Category.objects.all().prefetch_related('products')
class CategoryCreateView(CreateView):
form_class = forms.CategoryForm
template_name = 'category/create.html'
success_url = reverse_lazy('index_category')
class CategoryUpdateView(UpdateView):
model = models.Category
form_class = forms.CategoryForm
context_object_name = 'category'
template_name = 'category/update.html'
success_url = reverse_lazy('index_category')
class CategoryDeleteView(DeleteView):
model = models.Category
context_object_name = 'category'
template_name = 'category/delete.html'
success_url = reverse_lazy('index_category')
|
gpl-2.0
| 5,028,435,370,511,008,000
| 25.962264
| 73
| 0.69909
| false
| 3.99162
| false
| false
| false
|
papedaniel/oioioi
|
oioioi/contests/handlers.py
|
1
|
4452
|
import json
import logging
import traceback
import pprint
import socket
import time
from smtplib import SMTPException
from django.core.mail import mail_admins
from django.db import transaction
from oioioi.contests.models import Contest, ProblemInstance, Submission, \
SubmissionReport, FailureReport
logger = logging.getLogger(__name__)
WAIT_FOR_SUBMISSION_RETRIES = 9
WAIT_FOR_SUBMISSION_SLEEP_SECONDS = 1
def wait_for_submission_in_db(env, **kwargs):
"""Celery may start handling a submission before it is actually saved
in the DB. This is a workaround for this.
"""
for _i in xrange(WAIT_FOR_SUBMISSION_RETRIES):
with transaction.atomic():
if bool(Submission.objects.filter(id=env['submission_id'])):
break
time.sleep(WAIT_FOR_SUBMISSION_SLEEP_SECONDS)
return env
@transaction.atomic
def update_report_statuses(env, **kwargs):
submission = Submission.objects.get(id=env['submission_id'])
problem_instance = submission.problem_instance
reports = SubmissionReport.objects.filter(submission=submission)
problem_instance.controller.update_report_statuses(submission, reports)
return env
@transaction.atomic
def update_submission_score(env, **kwargs):
submission = Submission.objects.get(id=env['submission_id'])
problem_instance = submission.problem_instance
problem_instance.controller.update_submission_score(submission)
return env
def update_user_results(env, **kwargs):
with transaction.atomic():
submission = Submission.objects.get(id=env['submission_id'])
user = submission.user
if not user:
return env
problem_instance = \
ProblemInstance.objects.get(id=env['problem_instance_id'])
round = problem_instance.round
contest = None
if round is not None:
assert round.id == env['round_id']
contest = round.contest
assert contest.id == env['contest_id']
else:
assert 'round_id' not in env
assert 'contest_id' not in env
problem_instance.controller.update_user_results(user, problem_instance)
return env
@transaction.atomic
def call_submission_judged(env, **kwargs):
submission = Submission.objects.get(id=env['submission_id'])
contest = submission.problem_instance.contest
if contest is None:
assert 'contest_id' not in env
return env
assert contest.id == env['contest_id']
contest.controller.submission_judged(submission,
rejudged=env['is_rejudge'])
contest.controller.submission_unqueued(submission, env['job_id'])
return env
@transaction.atomic
def create_error_report(env, exc_info, **kwargs):
"""Builds a :class:`oioioi.contests.models.SubmissionReport` for
an evaulation which have failed.
USES
* `env['submission_id']`
"""
logger.error("System Error evaluating submission #%s:\n%s",
env.get('submission_id', '???'),
pprint.pformat(env, indent=4), exc_info=exc_info)
if 'submission_id' not in env:
return env
try:
submission = Submission.objects.get(id=env['submission_id'])
except Submission.DoesNotExist:
return env
submission_report = SubmissionReport(submission=submission)
submission_report.kind = 'FAILURE'
submission_report.save()
failure_report = FailureReport(submission_report=submission_report)
failure_report.json_environ = json.dumps(env)
failure_report.message = traceback.format_exc(exc_info)
failure_report.save()
return env
def mail_admins_on_error(env, exc_info, **kwargs):
"""Sends email to all admins defined in settings.ADMINS on each
grading error occurrence.
USES
* `env['submission_id']`
"""
# We don't want to spam admins when the evaluation of a deleted
# submission fails. See also SIO-1254.
try:
if 'submission_id' in env:
Submission.objects.get(id=env['submission_id'])
except Submission.DoesNotExist:
return env
try:
mail_admins("System Error evaluating submission #%s" %
env.get('submission_id', '???'),
traceback.format_exc(exc_info))
except (socket.error, SMTPException), e:
logger.error("An error occurred while sending email: %s",
e.message)
return env
|
gpl-3.0
| 4,431,240,814,176,651,300
| 29.703448
| 75
| 0.66442
| false
| 3.964381
| true
| false
| false
|
joaormatos/anaconda
|
mmfparser/data/checksum.py
|
1
|
2357
|
# Copyright (c) Mathias Kaerlev 2012.
# This file is part of Anaconda.
# Anaconda is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Anaconda is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Anaconda. If not, see <http://www.gnu.org/licenses/>.
from mmfparser.bytereader import ByteReader
import struct
def wrap(value):
return value & 0xFFFFFFFF
def wrap_signed_char(value):
value = value & 0xFF
if value > 127:
value -= 256
return value
def make_checksum(data):
result = 0
bufferOffset = 0
numberOfBytes = len(data)
numberOfReads = numberOfBytes >> 2
for _ in xrange(numberOfReads):
newInt, = struct.unpack_from('<I', data, bufferOffset)
result = newInt + (wrap(result) >> 31) + 2 * result
result = wrap(result)
bufferOffset += 4
for _ in xrange(numberOfBytes & 3):
v7 = (wrap(result) >> 31) + struct.unpack_from('<B', data, bufferOffset)[0]
bufferOffset += 1
result = wrap(v7 + 2*result)
return wrap(result)
GROUP_WORDS = list('mqojhm:qskjhdsmkjsmkdjhq\x63clkcdhdlkjhd')
def make_group_checksum(password, group_name):
v4 = 57
for c in group_name:
v4 += ord(c) ^ 0x7F
v5 = 0
for c in password:
v4 += wrap_signed_char(ord(GROUP_WORDS[v5]) + (ord(c) ^ 0xC3)) ^ 0xF3
v5 += 1
if v5 > len(GROUP_WORDS):
v5 = 0
return v4
def make_pame_checksum(data):
checksum = make_checksum(data)
lastByte = checksum & 0x000000FF # get last byte
xorByte = lastByte ^ 13
checksum = checksum & 0xFFFFFF00 | xorByte
return int(checksum)
class Checksum(object):
data = None
def __init__(self, data = None):
if data:
self.data = data
def getChecksum(self):
return make_pame_checksum(self.data)
if __name__ == '__main__':
print hex(make_group_checksum('klonoafan', 'yay'))
|
gpl-3.0
| 1,744,356,720,394,358,800
| 29.230769
| 83
| 0.647857
| false
| 3.481536
| false
| false
| false
|
mdraeger/gmapcatcher
|
gmapcatcher/widgets/widComboBoxEntry.py
|
1
|
4319
|
# -*- coding: utf-8 -*-
## @package gmapcatcher.widgets.widComboBoxEntry
# ComboBoxEntry widget used to collect data to search
import gtk
import re
from gmapcatcher.mapConst import *
## This widget is where we collect data to search
class ComboBoxEntry(gtk.ComboBoxEntry):
DEFAULT_TEXT = "Enter location here!"
def __init__(self, confirm_clicked, conf):
super(ComboBoxEntry, self).__init__()
self.connect('changed', self.changed_combo, confirm_clicked)
self.connect('key-press-event', self.key_press_combo)
# Launch clean_entry for all the signals/events below
self.child.connect("button-press-event", self.clean_entry)
self.child.connect("cut-clipboard", self.clean_entry)
self.child.connect("copy-clipboard", self.clean_entry)
self.child.connect("paste-clipboard", self.clean_entry)
self.child.connect("move-cursor", self.clean_entry)
self.child.connect("populate-popup", self.populate_popup, conf)
# Launch the default_entry on the focus out
self.child.connect("focus-out-event", self.default_entry)
# Start search after hit 'ENTER'
self.child.connect('activate', confirm_clicked)
## Clean out the entry box if text = default
def clean_entry(self, *args):
if (self.child.get_text() == self.DEFAULT_TEXT):
self.child.set_text("")
self.child.grab_focus()
## Reset the default text if entry is empty
def default_entry(self, *args):
if (self.child.get_text().strip() == ''):
self.child.set_text(self.DEFAULT_TEXT)
## Add a new item to the menu of the EntryBox
def populate_popup(self, w, menu, conf):
def menuitem_response(w, string, conf):
conf.match_func = string
subMenu = gtk.Menu()
for item in ENTRY_SUB_MENU:
iMenuItem = gtk.RadioMenuItem(None, item)
iMenuItem.set_active(item == conf.match_func)
iMenuItem.connect("activate", menuitem_response, item, conf)
subMenu.append(iMenuItem)
menuItem = gtk.MenuItem()
menu.append(menuItem)
menuItem = gtk.MenuItem('Auto-Completion Method')
menuItem.set_submenu(subMenu)
menu.append(menuItem)
menu.show_all()
## Show the combo list if is not empty
def combo_popup(self):
if self.get_model().get_iter_root() is not None:
self.popup()
## Handles the pressing of arrow keys
def key_press_combo(self, w, event):
if event.keyval in [65362, 65364]:
self.combo_popup()
return True
## Handles the change event of the ComboBox
def changed_combo(self, w, confirm_clicked):
str = self.child.get_text()
if (str.endswith(SEPARATOR)):
self.child.set_text(str.strip())
confirm_clicked(None)
## Set the auto-completion for the entry box
def set_completion(self, ctx_map, confirm_clicked, conf):
completion = gtk.EntryCompletion()
completion.connect('match-selected', self.on_completion_match, confirm_clicked)
self.child.set_completion(completion)
completion.set_model(ctx_map.completion_model())
completion.set_text_column(0)
completion.set_minimum_key_length(3)
completion.set_match_func(self.match_func, conf)
# Populate the dropdownlist
self.set_model(ctx_map.completion_model(SEPARATOR))
self.set_text_column(0)
## Automatically display after selecting
def on_completion_match(self, completion, model, iter, confirm_clicked):
self.child.set_text(model[iter][0])
confirm_clicked(None)
## Match function for the auto-completion
def match_func(self, completion, key, iter, conf):
model = completion.get_model()
key = key.lower()
text = model.get_value(iter, 0).lower()
if conf.match_func == ENTRY_SUB_MENU[STARTS_WITH]:
return text.startswith(key)
elif conf.match_func == ENTRY_SUB_MENU[ENDS_WITH]:
return text.endswith(key)
elif conf.match_func == ENTRY_SUB_MENU[REGULAR_EXPRESSION]:
p = re.compile(key, re.IGNORECASE)
return (p.search(text) is not None)
else:
return (text.find(key) != -1)
|
gpl-2.0
| -1,651,217,596,343,355,100
| 38.623853
| 87
| 0.634869
| false
| 3.772052
| false
| false
| false
|
Som-Energia/somenergia-tomatic
|
tomatic_sandbox.py
|
1
|
2204
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import click
import re
from consolemsg import warn, step, error, u
from datetime import datetime, timedelta
from shutil import copyfile
from pathlib import Path
from slugify import slugify
@click.command()
@click.help_option()
@click.option('-d', '--description',
help="Description tagline to add to the schedule",
)
@click.option('--fromdate',
default=datetime.today().strftime("%Y-%m-%d"),
help="Choose a monday for computing schedules. Format: YYYY-MM-DD",
)
@click.option('--linenumber',
default=7,
help="Choose the numer of lines to attend calls",
)
def tomatic_sandbox(fromdate, description, linenumber):
try:
step("Generating graella sandbox for week {}",fromdate)
fromdate = datetime.strptime(fromdate, '%Y-%m-%d')
if not fromdate.weekday() == 0:
fromdate = fromdate + timedelta(days=-fromdate.weekday(), weeks=1)
graellaFolder = fromdate.strftime("%Y-%m-%d")
if description:
graellaFolder = '{}-{}'.format(graellaFolder, slugify(description))
step("Generating directory {}", graellaFolder)
Path(graellaFolder).mkdir()
linkCertificate = Path(graellaFolder+'/drive-certificate.json')
step("Creating certificate link {}", linkCertificate)
linkCertificate.symlink_to('../drive-certificate.json')
source = Path('config.yaml')
destination = Path(graellaFolder+'/config.yaml')
step("Creating file {}", source)
copyfile(u(source), u(destination))
if linenumber:
step("Adding number of lines {} to file {}", linenumber, source)
text = destination.read_text()
text2fix = re.compile(r'nTelefons: \d+')
text = text.replace(text2fix.findall(text)[0], "nTelefons: "+str(linenumber))
destination.write_text(text)
source = Path('holidays.conf')
destination = Path(graellaFolder+'/holidays.conf')
step("Creating {} file", source)
copyfile(u(source), u(destination))
except Exception as e:
error(e)
raise
if __name__ == '__main__':
tomatic_sandbox()
# vim: et ts=4 sw=4
|
gpl-3.0
| 4,193,262,625,800,079,000
| 31.411765
| 89
| 0.635662
| false
| 3.8
| false
| false
| false
|
kain88-de/mdanalysis
|
testsuite/MDAnalysisTests/test_failure.py
|
1
|
1352
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import absolute_import
import os
def test_failure():
"""Fail if the MDA_FAILURE_TEST environment variable is set.
"""
# Have a file open to trigger an output from the open_files plugin.
f = open('./failure.txt', 'w')
if u'MDA_FAILURE_TEST' in os.environ:
assert False
|
gpl-2.0
| 7,582,456,156,020,400,000
| 39.969697
| 79
| 0.701923
| false
| 2.913793
| false
| false
| false
|
hooram/ownphotos-backend
|
densecap/webcam/server2.py
|
1
|
3086
|
import argparse, random, os, time, json
from PIL import Image
from io import BytesIO
import base64
from flask import Flask, request
from flask.ext.cors import CORS
from flask_restful import Resource, Api
import ipdb
app = Flask(__name__)
app.config['DEBUG'] = True
ext2conttype2 = {
"jpg": "JPEG",
"jpeg": "JPEG",
"png": "PNG",
"gif": "GIF",
"image/jpeg": "JPEG",
"image/png": "PNG",
"image/gif": "GIF"
}
ext2conttype = {
"jpg": "image/jpeg",
"jpeg": "image/jpeg",
"png": "image/png",
"gif": "image/gif"
}
input_dir = 'webcam/inputs'
output_dir = 'webcam/outputs'
@app.route('/media/upload',methods=['POST','GET'])
def densecap():
if request.method=='POST':
ipdb.set_trace()
file = request.files['file']
if file and file.filename:
img_id = random.randint(1,1000000)
img_path = os.path.join(input_dir, '%d.jpg' % img_id)
filename = file.filename
extension = filename[filename.rfind(".")+1:].lower()
content_type = ext2conttype[extension]
image = Image.open(file)
image.save(img_path)
json_name = os.path.join(output_dir, '%d,json' % img_id)
while not os.path.isfile(json_name):
time.sleep(0.05)
with open(json_name, 'r') as f:
ann = json.load(f)
os.remove(json_name)
return ann
else:
return 'error 2'
else:
return 'running'
class DenseCap(Resource):
def get(self):
return 'The DenseCap server seems to be running!'
def post(self):
img_id = random.randint(1, 1000000)
img_name = os.path.join(input_dir, '%d.jpg' % img_id)
# Get the base64 image data out of the request.
# for some reason Flask doesn't parse this out at all for use, so we'll just
# do it manually. There is a prefix telling us that this is an image and the
# type of the image, then a comma, then the raw base64 data for the image.
# We just grab the part after the comma and decode it.
idx = request.data.find(',') + 1
img_data = request.data[idx:]
im = Image.open(BytesIO(base64.b64decode(img_data)))
im.save(img_name)
# request.files['image'].save(img_name)
json_name = os.path.join(output_dir, '%d.json' % img_id)
while not os.path.isfile(json_name):
time.sleep(0.05)
with open(json_name, 'r') as f:
ann = json.load(f)
os.remove(json_name)
return ann
if __name__ == '__main__':
app.run(debug=True)
# from tornado.wsgi import WSGIContainer
# from tornado.httpserver import HTTPServer
# from tornado.ioloop import IOLoop
#
# http_server = HTTPServer(WSGIContainer(app), ssl_options={
# 'certfile': 'webcam/ssl/server.crt',
# 'keyfile': 'webcam/ssl/server.key'
# })
#
# http_server.listen(5000)
#
# # We have to do a little weirdness to make the server actually die
# # when we hit CTRL+C
# try:
# IOLoop.instance().start()
# except KeyboardInterrupt:
# IOLoop.instance().stop()
|
mit
| 3,539,640,382,857,798,700
| 25.152542
| 80
| 0.602722
| false
| 3.269068
| false
| false
| false
|
asntech/jaspar
|
portal/migrations/0002_auto_20170617_1217.py
|
1
|
1491
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-17 12:17
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('portal', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='NewsAndUpdate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=25)),
('body', models.TextField()),
('category', models.CharField(choices=[('realese', 'New release'), ('bug', 'Bug fix'), ('announcement', 'Announcement')], max_length=150)),
('date', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
# migrations.AlterModelOptions(
# name='matrixannotation',
# options={'managed': False},
# ),
# migrations.AlterModelOptions(
# name='matrixprotein',
# options={'managed': False},
# ),
# migrations.AlterModelOptions(
# name='matrixspecies',
# options={'managed': False},
# ),
]
|
bsd-3-clause
| 3,353,032,720,810,922,000
| 35.365854
| 155
| 0.574782
| false
| 4.26
| false
| false
| false
|
gwind/YWeb
|
yweb/yweb/utils/translation/trans_real.py
|
1
|
25606
|
"""Translation helper functions."""
from __future__ import unicode_literals
import locale
import os
import re
import sys
import gettext as gettext_module
from threading import local
import warnings
from yweb.utils.importlib import import_module
from yweb.utils.datastructures import SortedDict
from yweb.utils.encoding import force_str, force_text
from yweb.utils.functional import memoize
from yweb.utils._os import upath
from yweb.utils.safestring import mark_safe, SafeData
from yweb.utils import six
from yweb.utils.six import StringIO
from yweb.utils.translation import TranslatorCommentWarning
# Translations are cached in a dictionary for every language+app tuple.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = local()
# The default translation is based on the settings file.
_default = None
# This is a cache for normalized accept-header languages to prevent multiple
# file lookups when checking the same locale on repeated requests.
_accepted = {}
_checked_languages = {}
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = "\x04"
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9
# and RFC 3066, section 2.1
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*) # "en", "en-au", "x-y-z", "es-419", "*"
(?:\s*;\s*q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
language_code_prefix_re = re.compile(r'^/([\w-]+)(/|$)')
def to_locale(language, to_lower=False):
"""
Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
True, the last component is lower-cased (en_us).
"""
p = language.find('-')
if p >= 0:
if to_lower:
return language[:p].lower()+'_'+language[p+1:].lower()
else:
# Get correct locale for sr-latn
if len(language[p+1:]) > 2:
return language[:p].lower()+'_'+language[p+1].upper()+language[p+2:].lower()
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower()+'-'+locale[p+1:].lower()
else:
return locale.lower()
class DjangoTranslation(gettext_module.GNUTranslations):
"""
This class sets up the GNUTranslations context with regard to output
charset.
"""
def __init__(self, *args, **kw):
gettext_module.GNUTranslations.__init__(self, *args, **kw)
self.set_output_charset('utf-8')
self.__language = '??'
def merge(self, other):
self._catalog.update(other._catalog)
def set_language(self, language):
self.__language = language
self.__to_language = to_language(language)
def language(self):
return self.__language
def to_language(self):
return self.__to_language
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
def translation(language):
"""
Returns a translation object.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct a object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
global _translations
t = _translations.get(language, None)
if t is not None:
return t
from yweb.conf import settings
globalpath = os.path.join(os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale')
def _fetch(lang, fallback=None):
global _translations
res = _translations.get(lang, None)
if res is not None:
return res
loc = to_locale(lang)
def _translation(path):
try:
t = gettext_module.translation('yweb', path, [loc], DjangoTranslation)
t.set_language(lang)
return t
except IOError:
return None
res = _translation(globalpath)
# We want to ensure that, for example, "en-gb" and "en-us" don't share
# the same translation object (thus, merging en-us with a local update
# doesn't affect en-gb), even though they will both use the core "en"
# translation. So we have to subvert Python's internal gettext caching.
base_lang = lambda x: x.split('-', 1)[0]
if base_lang(lang) in [base_lang(trans) for trans in list(_translations)]:
res._info = res._info.copy()
res._catalog = res._catalog.copy()
def _merge(path):
t = _translation(path)
if t is not None:
if res is None:
return t
else:
res.merge(t)
return res
for appname in reversed(settings.INSTALLED_APPS):
app = import_module(appname)
apppath = os.path.join(os.path.dirname(upath(app.__file__)), 'locale')
if os.path.isdir(apppath):
res = _merge(apppath)
for localepath in reversed(settings.LOCALE_PATHS):
if os.path.isdir(localepath):
res = _merge(localepath)
if res is None:
if fallback is not None:
res = fallback
else:
return gettext_module.NullTranslations()
_translations[lang] = res
return res
default_translation = _fetch(settings.LANGUAGE_CODE)
current_translation = _fetch(language, fallback=default_translation)
return current_translation
def activate(language):
"""
Fetches the translation object for a given tuple of application name and
language and installs it as the current translation object for the current
thread.
"""
_active.value = translation(language)
def deactivate():
"""
Deinstalls the currently active translation object so that further _ calls
will resolve against the default translation object, again.
"""
if hasattr(_active, "value"):
del _active.value
def deactivate_all():
"""
Makes the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active.value = gettext_module.NullTranslations()
def get_language():
"""Returns the currently selected language."""
t = getattr(_active, "value", None)
if t is not None:
try:
return t.to_language()
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
from yweb.conf import settings
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Returns selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
from yweb.conf import settings
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Returns the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default
t = getattr(_active, "value", None)
if t is not None:
return t
if _default is None:
from yweb.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return _default
def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
# str() is allowing a bytestring message to remain bytestring on Python 2
eol_message = message.replace(str('\r\n'), str('\n')).replace(str('\r'), str('\n'))
t = getattr(_active, "value", None)
if t is not None:
result = getattr(t, translation_function)(eol_message)
else:
if _default is None:
from yweb.conf import settings
_default = translation(settings.LANGUAGE_CODE)
result = getattr(_default, translation_function)(eol_message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def gettext(message):
"""
Returns a string of the translation of the message.
Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
"""
return do_translate(message, 'gettext')
if six.PY3:
ugettext = gettext
else:
def ugettext(message):
return do_translate(message, 'ugettext')
def pgettext(context, message):
msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
result = ugettext(msg_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
# force unicode, because lazy version expects unicode
result = force_text(message)
return result
def gettext_noop(message):
"""
Marks strings for translation but doesn't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default
t = getattr(_active, "value", None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
from yweb.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Returns a string of the translation of either the singular or plural,
based on the number.
Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
if six.PY3:
ungettext = ngettext
else:
def ungettext(singular, plural, number):
"""
Returns a unicode strings of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ungettext')
def npgettext(context, singular, plural, number):
msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
"%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
number)
result = ungettext(*msgs_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = ungettext(singular, plural, number)
return result
def all_locale_paths():
"""
Returns a list of paths to user-provides languages files.
"""
from yweb.conf import settings
globalpath = os.path.join(
os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale')
return [globalpath] + list(settings.LOCALE_PATHS)
def check_for_language(lang_code):
"""
Checks whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available. This is only used for language codes from either the cookies
or session and during format localization.
"""
for path in all_locale_paths():
if gettext_module.find('django', path, [to_locale(lang_code)]) is not None:
return True
return False
check_for_language = memoize(check_for_language, _checked_languages, 1)
def get_supported_language_variant(lang_code, supported=None, strict=False):
"""
Returns the language-code that's listed in supported languages, possibly
selecting a more generic variant. Raises LookupError if nothing found.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
"""
if supported is None:
from yweb.conf import settings
supported = SortedDict(settings.LANGUAGES)
if lang_code:
# if fr-CA is not supported, try fr-ca; if that fails, fallback to fr.
generic_lang_code = lang_code.split('-')[0]
variants = (lang_code, lang_code.lower(), generic_lang_code,
generic_lang_code.lower())
for code in variants:
if code in supported and check_for_language(code):
return code
if not strict:
# if fr-fr is not supported, try fr-ca.
for supported_code in supported:
if supported_code.startswith((generic_lang_code + '-',
generic_lang_code.lower() + '-')):
return supported_code
raise LookupError(lang_code)
def get_language_from_path(path, supported=None, strict=False):
"""
Returns the language-code if there is a valid language-code
found in the `path`.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
"""
if supported is None:
from yweb.conf import settings
supported = SortedDict(settings.LANGUAGES)
regex_match = language_code_prefix_re.match(path)
if not regex_match:
return None
lang_code = regex_match.group(1)
try:
return get_supported_language_variant(lang_code, supported, strict=strict)
except LookupError:
return None
def get_language_from_request(request, check_path=False):
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
If check_path is True, the URL path prefix will be checked for a language
code, otherwise this is skipped for backwards compatibility.
"""
global _accepted
from yweb.conf import settings
supported = SortedDict(settings.LANGUAGES)
if check_path:
lang_code = get_language_from_path(request.path_info, supported)
if lang_code is not None:
return lang_code
if hasattr(request, 'session'):
lang_code = request.session.get('django_language', None)
if lang_code in supported and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
try:
return get_supported_language_variant(lang_code, supported)
except LookupError:
pass
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
# 'normalized' is the root name of the locale in POSIX format (which is
# the format used for the directories holding the MO files).
normalized = locale.locale_alias.get(to_locale(accept_lang, True))
if not normalized:
continue
# Remove the default encoding from locale_alias.
normalized = normalized.split('.')[0]
if normalized in _accepted:
# We've seen this locale before and have an MO file for it, so no
# need to check again.
return _accepted[normalized]
try:
accept_lang = get_supported_language_variant(accept_lang, supported)
except LookupError:
continue
else:
_accepted[normalized] = accept_lang
return accept_lang
try:
return get_supported_language_variant(settings.LANGUAGE_CODE, supported)
except LookupError:
return settings.LANGUAGE_CODE
dot_re = re.compile(r'\S')
def blankout(src, char):
"""
Changes every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src)
context_re = re.compile(r"""^\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?'))\s*""")
inline_re = re.compile(r"""^\s*trans\s+((?:"[^"]*?")|(?:'[^']*?'))(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?\s*""")
block_re = re.compile(r"""^\s*blocktrans(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?(?:\s+|$)""")
endblock_re = re.compile(r"""^\s*endblocktrans$""")
plural_re = re.compile(r"""^\s*plural$""")
constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
one_percent_re = re.compile(r"""(?<!%)%(?!%)""")
def templatize(src, origin=None):
"""
Turns a Django template into something that is understood by xgettext. It
does so by translating the Django translation tags into standard gettext
function invocations.
"""
from yweb.conf import settings
from yweb.template import (Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK,
TOKEN_COMMENT, TRANSLATOR_COMMENT_MARK)
src = force_text(src, settings.FILE_CHARSET)
out = StringIO()
message_context = None
intrans = False
inplural = False
singular = []
plural = []
incomment = False
comment = []
lineno_comment_map = {}
comment_lineno_cache = None
for t in Lexer(src, origin).tokenize():
if incomment:
if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment':
content = ''.join(comment)
translators_comment_start = None
for lineno, line in enumerate(content.splitlines(True)):
if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
translators_comment_start = lineno
for lineno, line in enumerate(content.splitlines(True)):
if translators_comment_start is not None and lineno >= translators_comment_start:
out.write(' # %s' % line)
else:
out.write(' #\n')
incomment = False
comment = []
else:
comment.append(t.contents)
elif intrans:
if t.token_type == TOKEN_BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
if inplural:
if message_context:
out.write(' npgettext(%r, %r, %r,count) ' % (message_context, ''.join(singular), ''.join(plural)))
else:
out.write(' ngettext(%r, %r, count) ' % (''.join(singular), ''.join(plural)))
for part in singular:
out.write(blankout(part, 'S'))
for part in plural:
out.write(blankout(part, 'P'))
else:
if message_context:
out.write(' pgettext(%r, %r) ' % (message_context, ''.join(singular)))
else:
out.write(' gettext(%r) ' % ''.join(singular))
for part in singular:
out.write(blankout(part, 'S'))
message_context = None
intrans = False
inplural = False
singular = []
plural = []
elif pluralmatch:
inplural = True
else:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
raise SyntaxError("Translation blocks must not include other block tags: %s (%sline %d)" % (t.contents, filemsg, t.lineno))
elif t.token_type == TOKEN_VAR:
if inplural:
plural.append('%%(%s)s' % t.contents)
else:
singular.append('%%(%s)s' % t.contents)
elif t.token_type == TOKEN_TEXT:
contents = one_percent_re.sub('%%', t.contents)
if inplural:
plural.append(contents)
else:
singular.append(contents)
else:
# Handle comment tokens (`{# ... #}`) plus other constructs on
# the same line:
if comment_lineno_cache is not None:
cur_lineno = t.lineno + t.contents.count('\n')
if comment_lineno_cache == cur_lineno:
if t.token_type != TOKEN_COMMENT:
for c in lineno_comment_map[comment_lineno_cache]:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
warn_msg = ("The translator-targeted comment '%s' "
"(%sline %d) was ignored, because it wasn't the last item "
"on the line.") % (c, filemsg, comment_lineno_cache)
warnings.warn(warn_msg, TranslatorCommentWarning)
lineno_comment_map[comment_lineno_cache] = []
else:
out.write('# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache]))
comment_lineno_cache = None
if t.token_type == TOKEN_BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
if imatch:
g = imatch.group(1)
if g[0] == '"':
g = g.strip('"')
elif g[0] == "'":
g = g.strip("'")
g = one_percent_re.sub('%%', g)
if imatch.group(2):
# A context is provided
context_match = context_re.match(imatch.group(2))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
out.write(' pgettext(%r, %r) ' % (message_context, g))
message_context = None
else:
out.write(' gettext(%r) ' % g)
elif bmatch:
for fmatch in constant_re.findall(t.contents):
out.write(' _(%s) ' % fmatch)
if bmatch.group(1):
# A context is provided
context_match = context_re.match(bmatch.group(1))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
intrans = True
inplural = False
singular = []
plural = []
elif cmatches:
for cmatch in cmatches:
out.write(' _(%s) ' % cmatch)
elif t.contents == 'comment':
incomment = True
else:
out.write(blankout(t.contents, 'B'))
elif t.token_type == TOKEN_VAR:
parts = t.contents.split('|')
cmatch = constant_re.match(parts[0])
if cmatch:
out.write(' _(%s) ' % cmatch.group(1))
for p in parts[1:]:
if p.find(':_(') >= 0:
out.write(' %s ' % p.split(':',1)[1])
else:
out.write(blankout(p, 'F'))
elif t.token_type == TOKEN_COMMENT:
if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
lineno_comment_map.setdefault(t.lineno,
[]).append(t.contents)
comment_lineno_cache = t.lineno
else:
out.write(blankout(t.contents, 'X'))
return force_str(out.getvalue())
def parse_accept_lang_header(lang_string):
"""
Parses the lang_string, which is the body of an HTTP Accept-Language
header, and returns a list of (lang, q-value), ordered by 'q' values.
Any format errors in lang_string results in an empty list being returned.
"""
result = []
pieces = accept_language_re.split(lang_string)
if pieces[-1]:
return []
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i : i + 3]
if first:
return []
if priority:
priority = float(priority)
if not priority: # if priority is 0.0 at this point make it 1.0
priority = 1.0
result.append((lang, priority))
result.sort(key=lambda k: k[1], reverse=True)
return result
|
mit
| 615,606,380,099,209,600
| 36.878698
| 143
| 0.573772
| false
| 4.257732
| false
| false
| false
|
john123951/SmartQQBot
|
MsgHandler.py
|
1
|
6793
|
# -*- coding: utf-8 -*-
# Code by Yinzo: https://github.com/Yinzo
# Origin repository: https://github.com/Yinzo/SmartQQBot
from Group import *
from Pm import *
from Sess import *
import threading
logging.basicConfig(
filename='smartqq.log',
level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
)
class MsgHandler:
def __init__(self, operator):
if not isinstance(operator, QQ):
raise TypeError("Operator must be a logined QQ instance")
self.__operator = operator
self.process_threads = {}
self.__group_list = {}
self.__pm_list = {}
self.__sess_list = {}
def handle(self, msg_list):
assert isinstance(msg_list, list), "msg_list is NOT a LIST"
for msg in msg_list:
# 仅处理程序管理层面上的操作 Only do the operation of the program management
if not isinstance(msg, (Msg, Notify)):
logging.error("Handler received a not a Msg or Notify instance.")
return
elif isinstance(msg, MsgWithContent):
logging.info(str(self.__get_account(msg)) + ":" + msg.content)
if isinstance(msg, GroupMsg): # 群聊信息的处理
# 判断群对象是否存在,info_seq实际上为群号
if msg.info_seq not in self.__group_list:
self.__group_list[msg.info_seq] = Group(self.__operator, msg)
# 维护一个线程队列,然后每一个线程处理各自的信息
self.process_threads[msg.info_seq] = MsgHandleQueue(self.__group_list[msg.info_seq])
self.process_threads[msg.info_seq].start()
logging.debug("Now group list: " + str(self.__group_list))
tgt_group = self.__group_list[msg.info_seq]
if len(tgt_group.msg_list) >= 1 and msg.seq == tgt_group.msg_list[-1].seq:
# 若如上一条seq重复则抛弃此条信息不处理
logging.info("消息重复,抛弃")
return
tgt_group.msg_id = msg.msg_id
self.process_threads[msg.info_seq].append(msg)
elif isinstance(msg, PmMsg): # 私聊信息处理
tid = self.__get_account(msg)
if tid not in self.__pm_list:
self.__pm_list[tid] = Pm(self.__operator, msg)
# 维护一个线程队列,然后每一个线程处理各自的信息
self.process_threads[tid] = MsgHandleQueue(self.__pm_list[tid])
self.process_threads[tid].start()
logging.debug("Now pm thread list: " + str(self.__pm_list))
tgt_pm = self.__pm_list[tid]
if len(tgt_pm.msg_list) >= 1 and msg.time == tgt_pm.msg_list[-1].time \
and msg.from_uin == tgt_pm.msg_list[-1].from_uin \
and msg.content == tgt_pm.msg_list[-1].content:
# 私聊没有seq可用于判断重复,只能抛弃同一个人在同一时间戳发出的内容相同的消息。
logging.info("消息重复,抛弃")
return
tgt_pm.msg_id = msg.msg_id
self.process_threads[tid].append(msg)
elif isinstance(msg, SessMsg): # 临时会话的处理
tid = self.__get_account(msg)
if tid not in self.__sess_list:
self.__sess_list[tid] = Sess(self.__operator, msg)
self.process_threads[tid] = MsgHandleQueue(self.__sess_list[tid])
self.process_threads[tid].start()
logging.debug("Now sess thread list: " + str(self.__sess_list))
tgt_sess = self.__sess_list[tid]
if len(tgt_sess.msg_list) >= 1 and msg.time == tgt_sess.msg_list[-1].time \
and msg.from_uin == tgt_sess.msg_list[-1].from_uin \
and msg.content == tgt_sess.msg_list[-1].content:
# 私聊没有seq可用于判断重复,只能抛弃同一个人在同一时间戳发出的同一内容的消息。
logging.info("消息重复,抛弃")
return
tgt_sess.msg_id = msg.msg_id
self.process_threads[tid].append(msg)
elif isinstance(msg, InputNotify):
self.__input_notify_handler(msg)
elif isinstance(msg, BuddiesStatusChange):
self.__buddies_status_change_handler(msg)
elif isinstance(msg, KickMessage):
self.__kick_message(msg)
else:
logging.warning("Unsolved Msg type :" + str(msg.poll_type))
return
def __get_account(self, msg):
assert isinstance(msg, (Msg, Notify)), "function get_account received a not Msg or Notify parameter."
if isinstance(msg, (PmMsg, SessMsg, InputNotify)):
# 如果消息的发送者的真实QQ号码不在FriendList中,则自动去取得真实的QQ号码并保存到缓存中
tuin = msg.from_uin
account = self.__operator.uin_to_account(tuin)
return account
elif isinstance(msg, GroupMsg):
return str(msg.info_seq).join("[]") + str(self.__operator.uin_to_account(msg.send_uin))
def __input_notify_handler(self, inputNotify):
logging.info(str(self.__get_account(inputNotify)) + " is typing...")
if isinstance(inputNotify, GroupAddMessage):
pass
return
def __buddies_status_change_handler(self, buddiesStatusChange):
pass
def __kick_message(self, kickMessage):
logging.warning(str(kickMessage.to_uin) + " is kicked. Reason: " + str(kickMessage.reason))
logging.warning("[{0}]{1} is kicked. Reason: {2}".format(
str(kickMessage.to_uin),
self.__operator.username,
str(kickMessage.reason),
))
raise KeyboardInterrupt("Kicked")
# 为了加速程序处理消息,采用了多线程技术
class MsgHandleQueue(threading.Thread):
def __init__(self, handler):
super(MsgHandleQueue, self).__init__()
self.handler = handler
self.msg_queue = []
self.setDaemon(True)
def run(self):
while 1:
if len(self.msg_queue):
self.handler.handle(self.msg_queue.pop(0))
logging.debug("queue handling.Now queue length:" + str(len(self.msg_queue)))
else:
time.sleep(1)
def append(self, msg):
self.msg_queue.append(msg)
|
gpl-3.0
| -3,575,861,079,491,298,300
| 37.962733
| 109
| 0.545194
| false
| 3.211982
| false
| false
| false
|
zemon1/CrawfoSys
|
weather.py
|
1
|
2138
|
#!/usr/bin/env python2
#weather.py
#Original author: Josh McSavaney (mcsaucy@csh.rit.edu)
#Current maintainer: Jeff Haak (zemon1@csh.rit.edu)
#A script used to scrape and parse weather information
import urllib, re, argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Gets weather info from weather.gov')
parser.add_argument('--noTroll'
, help='Display temp in Kelvin'
, default=False
, required=False)
args = vars(parser.parse_args())
#print args
# get the file from the site
file = urllib.urlopen('http://www.weather.gov/data/current_obs/KROC.xml')
# make the file into a string
data = file.read()
weather = "N/A"
temp = "N/A"
windchill = "N/A"
# search the file for the weather and store the string
try:
re2 = re.search(r'<weather>(.*?)</weather>', data)
weather = re2.group(1)
except (AttributeError):
pass
# search the file for the temp and store the string
try:
re3 = re.search(r'<temperature_string>(.*?)</temperature_string>', data)
temp = re3.group(1)
except (AttributeError):
pass
# search the file for the windchill and store the string
try:
re4 = re.search(r'<windchill_string>(.*?)</windchill_string>', data)
windchill = re4.group(1)
except (AttributeError):
pass
#use Kelvin
if not args['noTroll']:
windchill = float(windchill.split()[2][1:]) + 273.15
temp = float(temp.split()[2][1:]) + 273.15
windchill = "Windchill:" + str(windchill) + "K"
temp = "Temp:" + str(temp) + "K"
res = temp + " " + windchill + " " + weather
else:
windchill = int(windchill.split()[0].split(".")[0])
temp = int(temp.split()[0].split(".")[0])
windchill = "Windchill:" + str(windchill) + "F"
temp = "Temp:" + str(temp) + "F"
res = temp + " " + windchill + " " + weather
print res
|
apache-2.0
| -8,801,125,068,099,630,000
| 25.395062
| 86
| 0.546305
| false
| 3.522241
| false
| false
| false
|
EachenKuang/PythonRepository
|
MedicineSCI/Tools/Dao.py
|
1
|
1155
|
# -*- coding: utf-8 -*-
import pymssql
class Dao:
def __init__(self):
self.conn = None
self.cur = None
def connect(self):
# 数据库连接信息
self.conn = pymssql.connect(host="localhost:59318", user="eachen", password="123456", database="mydata",
charset="utf8")
# host = "localhost:59318", user = "eachen", pwd = "123456", db = "mydata"
self.cur = self.conn.cursor()
if not self.cur:
raise (NameError, "数据库连接失败")
else:
print("数据库连接成功")
def create(self, sql):
# print(sql)
try:
self.cur.execute(sql)
self.conn.commit()
except:
print('create failed')
else:
print('create succeed')
def insert(self, sql):
# print(sql)
self.cur.execute(sql)
self.conn.commit()
def select(self, sql):
# print(sql)
self.cur.execute(sql)
# fetchall()是接收全部的返回结果行
return self.cur.fetchall()
def close(self):
self.conn.close()
|
apache-2.0
| -7,258,135,829,876,773,000
| 23.795455
| 112
| 0.507791
| false
| 3.420063
| false
| false
| false
|
skycucumber/Messaging-Gateway
|
src/Command/HeartBeat.py
|
1
|
1072
|
'''
Created on 2013-8-12
@author: E525649
'''
from BaseCommand import CBaseCommand
from twisted.internet import threads
import BaseCommand
from DB import SBDB
class CHeartBeat(CBaseCommand):
'''
classdocs
'''
command_id=0x00000002
def __init__(self,data=None,protocol=None):
'''
Constructor
'''
CBaseCommand.__init__(self, data, protocol)
def Run(self):
with self.protocol.lockCmd:
if self.Authorized():
CBaseCommand.Run(self)
self.SendResp()
if self.protocol.role==BaseCommand.PV_ROLE_HUMAN:
threads.deferToThread(SBDB.UpdateActiveTime,self.protocol.role,self.protocol.client_id,id(self.protocol.transport))
elif self.protocol.role==BaseCommand.PV_ROLE_SUPERBOX:
threads.deferToThread(SBDB.UpdateActiveTime,self.protocol.role,self.protocol.superbox_id,id(self.protocol.transport))
else:
self.SendUnauthorizedResp()
|
gpl-2.0
| -7,245,791,321,416,922,000
| 29.529412
| 137
| 0.60541
| false
| 4.060606
| false
| false
| false
|
martinggww/lucasenlights
|
MachineLearning/sklearn/mrjbq7-ta-lib-c553531/setup.py
|
1
|
3712
|
#!/usr/bin/env python
import sys
import os
import warnings
from distutils.dist import Distribution
display_option_names = Distribution.display_option_names + ['help', 'help-commands']
query_only = any('--' + opt in sys.argv for opt in display_option_names) or len(sys.argv) < 2 or sys.argv[1] == 'egg_info'
# Use setuptools for querying the package, normal builds use distutils
if query_only:
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
else:
from distutils.core import setup
from distutils.extension import Extension
lib_talib_name = 'ta_lib' # the underlying C library's name
platform_supported = False
for prefix in ['darwin', 'linux', 'bsd', 'sunos']:
if prefix in sys.platform:
platform_supported = True
include_dirs = [
'/usr/include',
'/usr/local/include',
'/opt/include',
'/opt/local/include',
]
if 'TA_INCLUDE_PATH' in os.environ:
include_dirs.append(os.environ['TA_INCLUDE_PATH'])
lib_talib_dirs = [
'/usr/lib',
'/usr/local/lib',
'/usr/lib64',
'/usr/local/lib64',
'/opt/lib',
'/opt/local/lib',
]
if 'TA_LIBRARY_PATH' in os.environ:
lib_talib_dirs.append(os.environ['TA_LIBRARY_PATH'])
break
if sys.platform == "win32":
platform_supported = True
lib_talib_name = 'ta_libc_cdr'
include_dirs = [r"c:\ta-lib\c\include"]
lib_talib_dirs = [r"c:\ta-lib\c\lib"]
if not platform_supported:
raise NotImplementedError(sys.platform)
# Do not require numpy or cython for just querying the package
if not query_only:
import numpy
include_dirs.insert(0, numpy.get_include())
try:
from Cython.Distutils import build_ext
has_cython = True
except ImportError:
has_cython = False
for lib_talib_dir in lib_talib_dirs:
try:
files = os.listdir(lib_talib_dir)
if any(lib_talib_name in f for f in files):
break
except OSError:
pass
else:
warnings.warn('Cannot find ta-lib library, installation may fail.')
cmdclass = {}
if has_cython:
cmdclass['build_ext'] = build_ext
ext_modules = [
Extension(
'talib._ta_lib',
['talib/_ta_lib.pyx' if has_cython else 'talib/_ta_lib.c'],
include_dirs=include_dirs,
library_dirs=lib_talib_dirs,
libraries=[lib_talib_name]
)
]
setup(
name = 'TA-Lib',
version = '0.4.10',
description = 'Python wrapper for TA-Lib',
author = 'John Benediktsson',
author_email = 'mrjbq7@gmail.com',
url = 'http://github.com/mrjbq7/ta-lib',
download_url = 'https://github.com/mrjbq7/ta-lib/releases',
classifiers = [
"License :: OSI Approved :: BSD License",
"Development Status :: 4 - Beta",
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Cython",
"Topic :: Office/Business :: Financial",
"Topic :: Scientific/Engineering :: Mathematics",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Intended Audience :: Financial and Insurance Industry",
],
packages = ['talib'],
ext_modules = ext_modules,
cmdclass = cmdclass,
requires = ['numpy'],
)
|
cc0-1.0
| -1,881,101,097,156,988,400
| 28.935484
| 122
| 0.60695
| false
| 3.632094
| false
| false
| false
|
endreszabo/pdnsapp
|
dns.py
|
1
|
6190
|
#!/usr/bin/env python
from sys import exit, stdin, stderr, argv, stdout
from inspect import stack
from config import *
import os
import csv
CONT=0
FINAL=1
default_ttl=60
loglevel=3
class istr(str):
def __eq__(self, text):
return str.__eq__(self.lower(), text.lower())
class qname(istr):
def __new__(cls, value, *args, **kwargs):
return istr.__new__(cls, value)
def _domain_parts(self,request):
return map(lambda x: istr(x), filter(lambda x: x!='', self.split('.')))
def _domain_parts_len(self,request):
return len(domain_parts(request))
def _tld(self, count=2):
return istr('.'.join(self.domain_parts[-count:]))
def __init__(self, value, minlen=None, maxlen=None):
self.domain_parts=self._domain_parts(value)
self.domain_parts_count=len(self.domain_parts)
self.tld=self._tld()
def host_part(self, substring):
try:
if self.lower().index(substring+'.')==0:
return True
except ValueError:
return False
return False
def is_subdomain(string, substring):
try:
return (string.lower().rindex('.'+substring)+len(substring)+1 == len(string))
except ValueError:
return False
return False
def logd(level=loglevel, message=None, kwargs={}):
if level>=loglevel:
print("LOG\t%s(): %s" % (stack()[1][3],'; '.join([message,', '.join(map(lambda (k,v): "%s='%s'" % (k,v), kwargs.iteritems()))])))
def log(level=loglevel, message=None, **kwargs):
if level>=loglevel:
print(
"LOG\t%s(): %s" % (
stack()[1][3],
'; '.join(
[
message,
', '.join(
map(lambda (k,v): "%s='%s'" % (k,v), kwargs.iteritems())
)
]
)
)
)
def MX(priority=0, data=None, ttl=default_ttl):
if data:
return {
'qtype': 'MX',
'data':"%s\t%s" % (priority, data),
'ttl': ttl
}
else:
return {}
def LOG(msg):
pass
def A(data=None, ttl=default_ttl):
if data:
return {
'qtype': 'A',
'data': data,
'ttl': ttl
}
else:
return {}
def match_domain(name, domain):
if name[-len(domain):] == domain or name[-len(domain)-1:] == '.'+domain:
return True
return False
matches=[]
def match(host=None, fqdn=None, domain=None, dns_class=None, type=None, remote_ip=None, local_ip=None, cache=True):
params=locals()
def wrapper(f):
matches.append([f, params])
return wrapper
def represent(response):
return "\t".join([
'DATA',
response['qname'],
response['qclass'],
response['qtype'],
str(response['ttl']),
response['id'],
response['data']
])
def route(request):
retval=[]
if request['qname'] in skip_zones:
retval.append("LOG\tqname '%s' is in skipped zones list, skipping" % request['qname'])
return retval
for f, conditions in matches:
if (conditions['fqdn'] is None or conditions['fqdn'] == request['qname']) and \
(conditions['domain'] is None or match_domain(request['qname'], conditions['domain'])) and \
(conditions['type'] is None or conditions['type'] == request['qtype'] or request['qtype'] == 'ANY') and \
(conditions['dns_class'] is None or conditions['dns_class'] == request['qclass']) and \
(conditions['remote_ip'] is None or conditions['remote_ip'] == request['remote-ip']) and \
(conditions['local_ip'] is None or conditions['local_ip'] == request['local-ip']):
returned=f(request)
if returned:
if returned[1]:
if type(returned[1]) is list:
for item in returned[1]:
retval.append(
represent(
dict(request.items() + item.items())
)
)
else:
retval.append(
represent(
dict(request.items() + returned[1].items())
)
)
if returned[0] == FINAL:
break
return retval
def run(f_in=stdin, f_out=stdout):
line = f_in.readline().strip()
if not line.startswith('HELO'):
print >>f_out, 'FAIL'
f_out.flush()
f_in.readline()
else:
print >>f_out, "OK\tapp firing up"
f_out.flush()
while True:
line = f_in.readline().strip()
if not line:
break
#request = line.split('\t')
request = dict(
zip(
['cmd','qname','qclass','qtype','id','remote-ip','local-ip','edns-subnet-address'],
line.split('\t')
)
)
request['qname']=qname(request['qname'])
#request['id']=1
#logd(3, 'Processing request', request)
if request['cmd'] == 'Q':
if request['qname'] != '':
datas=route(request)
if datas:
print >>f_out, "\n".join(datas)
#print >>f_out, "LOG\t"+"\nLOG\t".join(datas)
print >>f_out, "END"
f_out.flush()
elif request['cmd'] == 'PING':
print >>f_out, "LOG\tPONG"
f_out.flush()
continue
elif request['cmd'] == 'HELO':
print >>f_out, "OK\trunning"
f_out.flush()
continue
elif request['cmd'] == 'AXFR':
print >>f_out, "END"
f_out.flush()
continue
else:
print >>f_out, "LOG\tUnprocessed"
def acme_b64encode(acme_challenge):
return acme_challenge.replace('_','_u').replace('-','_h')
def acme_b64decode(acme_challenge):
return acme_challenge.replace('_h','-').replace('_u','_')
|
gpl-2.0
| 1,141,596,529,849,517,300
| 30.907216
| 137
| 0.485784
| false
| 3.912769
| false
| false
| false
|
nixingyang/Kaggle-Competitions
|
Face Verification/Extra/Cross Validation/Cross_Validation.py
|
1
|
5595
|
from joblib import Parallel, delayed
from sklearn.cross_validation import KFold
import numpy as np
import prepare_data
import pylab
import solution_basic
def inspect_final_data_set_without_labels(image_index_list, seed):
np.random.seed(seed)
image_index_array = np.array(image_index_list)
# Cross Validation
fold_num = 5
label_kfold = KFold(image_index_array.size, n_folds=fold_num, shuffle=True)
true_records_num_list = []
false_records_num_list = []
for _, fold_item in enumerate(label_kfold):
# Generate final data set
selected_index_array = image_index_array[fold_item[0]]
_, Y_train = solution_basic.get_record_map(selected_index_array, None)
true_records = Y_train == 1
true_records_num = np.sum(true_records)
false_records_num = Y_train.size - true_records_num
true_records_num_list.append(true_records_num)
false_records_num_list.append(false_records_num)
return (true_records_num_list, false_records_num_list)
def inspect_final_data_set_with_labels(image_index_list, seed):
np.random.seed(seed)
# Cross Validation
fold_num = 5
unique_label_values = np.unique(image_index_list)
selected_label_values = np.random.choice(unique_label_values, \
size=np.ceil(unique_label_values.size * (fold_num - 1) / fold_num), \
replace=False)
selected_index_list = []
for single_image_index in image_index_list:
if single_image_index in selected_label_values:
selected_index_list.append(single_image_index)
selected_index_array = np.array(selected_index_list)
_, Y_train = solution_basic.get_record_map(selected_index_array, None)
true_records = Y_train == 1
true_records_num = np.sum(true_records)
false_records_num = Y_train.size - true_records_num
return ([true_records_num], [false_records_num])
def inspect_number_of_occurrences():
# Get image paths in the training and testing datasets
_, training_image_index_list = prepare_data.get_image_paths_in_training_dataset(
)
repeated_num = 20
seed_array = np.random.choice(range(repeated_num),
size=repeated_num,
replace=False)
records_list = (Parallel(n_jobs=-1)(delayed(
inspect_final_data_set_without_labels)(training_image_index_list, seed)
for seed in seed_array))
# repeated_num = 100
# seed_array = np.random.choice(range(repeated_num), size=repeated_num, replace=False)
# records_list = (Parallel(n_jobs=-1)(delayed(inspect_final_data_set_with_labels)(training_image_index_list, seed) for seed in seed_array))
true_records_num_list = []
false_records_num_list = []
for single_true_records_num_list, single_false_records_num_list in records_list:
for value in single_true_records_num_list:
true_records_num_list.append(value)
for value in single_false_records_num_list:
false_records_num_list.append(value)
for single_list in [true_records_num_list, false_records_num_list]:
repeated_times_list = []
min_value_list = []
max_value_list = []
mean_value_list = []
for end_index in range(len(single_list)):
current_list = single_list[0:end_index + 1]
repeated_times_list.append(len(current_list))
min_value_list.append(np.min(current_list))
max_value_list.append(np.max(current_list))
mean_value_list.append(np.mean(current_list))
pylab.figure()
pylab.plot(repeated_times_list,
min_value_list,
color="yellowgreen",
label="Minimum")
pylab.plot(repeated_times_list,
max_value_list,
color="lightskyblue",
label="Maximum")
pylab.plot(repeated_times_list,
mean_value_list,
color="darkorange",
label="Mean")
pylab.legend(bbox_to_anchor=(0., 1.02, 1., .102),
loc=3,
ncol=3,
mode="expand",
borderaxespad=0.)
pylab.xlabel("Repeated Times", fontsize="large")
pylab.ylabel("Number of Occurrences", fontsize="large")
pylab.grid()
pylab.show()
def inspect_number_of_images():
# Get image paths in the training and testing datasets
_, training_image_index_list = prepare_data.get_image_paths_in_training_dataset(
)
images_number_list = []
for current_image_index in np.unique(training_image_index_list):
images_number_list.append(
np.sum(np.array(training_image_index_list) == current_image_index))
# the histogram of the data with histtype="step"
bins = np.arange(np.min(images_number_list),
np.max(images_number_list) + 2) - 0.5
_, _, patches = pylab.hist(images_number_list, bins=bins)
pylab.setp(patches, "facecolor", "yellowgreen", "alpha", 0.75)
pylab.xlim([bins[0], bins[-1]])
pylab.xticks(
np.arange(np.min(images_number_list),
np.max(images_number_list) + 1))
pylab.xlabel("Number of Images from the Same Person", fontsize="large")
pylab.ylabel("Number of Occurrences", fontsize="large")
pylab.title("Histogram of Number of Images from the Same Person")
pylab.show()
inspect_number_of_occurrences()
|
mit
| -2,729,778,496,074,134,000
| 36.3
| 143
| 0.611796
| false
| 3.67367
| false
| false
| false
|
DMSalesman/Nemris
|
modules/pkgutils.py
|
1
|
3330
|
"""Module with functions for management of installed APK lists."""
import glob
import re
import subprocess
import apkutils # needed for AndroidManifest.xml dump
import utils # needed for sudo
# Creates a APK/path dictionary to avoid the sluggish "pm path"
def create_pkgdict():
"""Creates a dict for fast path lookup from /data/system/packages.xml; returns dict."""
(out, err) = utils.sudo("cat /data/system/packages.xml")
if err: return False
xml_dump = [i for i in out.decode("utf-8").split("\n") if "<package name=" in i]
pkgdict = {}
for i in xml_dump:
pkgname = re.findall("<package name=\"(.*?)\"", i)[0]
pkgpath = re.findall("codePath=\"(.*?)\"", i)[0]
# Normalizes each entry
if not pkgpath.endswith(".apk"):
try:
pkgpath = glob.glob(pkgpath + "/*.apk")[0]
except:
continue
pkgdict[pkgname] = pkgpath
return pkgdict
def list_installed_pkgs(args):
"""Lists the members of a given category of packages; returns list."""
prefix = "pm list packages"
if args.user:
suffix = "-3"
elif args.system:
suffix = "-s"
elif args.disabled:
suffix = "-d"
else:
suffix = ""
pkgs = [i[8:] for i in subprocess.Popen("{0} {1}".format(prefix, suffix), stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = True).communicate()[0].decode("utf-8").split("\n") if i]
return pkgs
def list_installed_pkgs_nougat(args):
"""Uses Nougat's cmd command to query the package service (faster); returns list."""
prefix = "cmd package list packages"
if args.user:
suffix = "-3"
elif args.system:
suffix = "-s"
elif args.disabled:
suffix = "-d"
else:
suffix = ""
pkgs = [i[8:] for i in utils.sudo("{0} {1}".format(prefix, suffix))[0].decode("utf-8").split("\n") if i]
return pkgs
def check_substratum(nougat):
"""Checks if the Substratum engine is installed; returns bool."""
if nougat:
user_pkgs = [i[8:] for i in utils.sudo("cmd package list packages -3")[0].decode("utf-8").split("\n") if i]
else:
user_pkgs = [i[8:] for i in subprocess.Popen("pm list packages -3", stdout = subprocess.PIPE, shell = True).communicate()[0].decode("utf-8").split("\n") if i]
substratum_installed = True if "projekt.substratum" in user_pkgs else False
return substratum_installed
def exclude_overlays(aapt, pkgdict, pkgs):
"""Excludes Substratum overlays from the packages to extract; returns nothing."""
for i in pkgs:
pkgpath = pkgdict.get(i)
out = apkutils.get_pkgxml(aapt, pkgpath)[0].decode("utf-8")
if "Substratum_Parent" in out: pkgs.remove(i)
def exclude_arcus_variants(pkgs):
"""Excludes Arcus theme variants from the packages to extract; returns nothing."""
for i in pkgs:
if "pixkart.arcus.user" in i: pkgs.remove(i)
def check_already_extracted(pkgpath, md5sums):
"""Checks if an APK has already been extracted; returns bool, str."""
pkgsum = utils.compute_md5sum(pkgpath)
already_extracted = True if pkgsum in md5sums else False
return already_extracted, pkgsum
|
unlicense
| -4,136,320,089,613,756,400
| 30.714286
| 194
| 0.606607
| false
| 3.671444
| false
| false
| false
|
zhongwcool/Muzei
|
web/handlers/backroomarthelper.py
|
1
|
6229
|
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
import sys
import webapp2
from google.appengine.api import images
from google.appengine.api import urlfetch
sys.path.append(os.path.join(os.path.dirname(__file__),'../lib'))
from bs4 import BeautifulSoup
import cloudstorage as gcs
from handlers.common import *
from models import FeaturedArtwork
THUMB_HEIGHT=600
NO_CROP_TUPLE=(0, 0, 1, 1)
def add_art_from_external_details_url(publish_date, url):
if FeaturedArtwork.all().filter('publish_date =', publish_date).get() != None:
webapp2.abort(409, message='Artwork already exists for this date.')
result = urlfetch.fetch(url)
if result.status_code < 200 or result.status_code >= 300:
webapp2.abort(400, message='Error processing URL: HTTP %d. Content: %s'
% (result.status_code, result.content))
soup = BeautifulSoup(result.content, 'html.parser')
attribution = None
if re.search(r'wikiart.org', url, re.I) or re.search(r'wikipaintings.org', url, re.I):
attribution = 'wikiart.org'
details_url = re.sub(r'#.+', '', url, re.I | re.S) + '?utm_source=Muzei&utm_campaign=Muzei'
title = soup.find('h1').get_text()
author = soup.find('a', class_='artist-name').get_text()
completion_year = None
try:
completion_year = unicode(soup
.find(text='Date:')
.parent
.find_next_sibling('span')
.text).strip()
except:
pass
byline = author + ((', ' + completion_year) if completion_year else '')
image_url = get_wikiart_image_url(soup)
elif re.search(r'metmuseum.org', url, re.I):
attribution = 'metmuseum.org'
details_url = re.sub(r'[#?].+', '', url, re.I | re.S) + '?utm_source=Muzei&utm_campaign=Muzei'
title = soup.find('h2').get_text()
author = ''
try:
author = unicode(soup.find(text='Artist:').parent.next_sibling).strip()
except:
pass
author = re.sub(r'\s*\(.*', '', author)
completion_year = None
try:
completion_year = unicode(soup.find(text='Date:').parent.next_sibling).strip()
except:
pass
byline = author + ((', ' + completion_year) if completion_year else '')
image_url = soup.find('a', class_='download').attrs['href']
else:
webapp2.abort(400, message='Unrecognized URL')
if not title or not author or not image_url:
webapp2.abort(500, message='Could not parse HTML')
image_url, thumb_url = maybe_process_image(image_url,
NO_CROP_TUPLE,
publish_date.strftime('%Y%m%d') + ' ' + title + ' ' + byline)
# create the artwork entry
new_artwork = FeaturedArtwork(
title=title.strip(),
byline=byline.strip(),
attribution=attribution,
image_url=image_url,
thumb_url=thumb_url,
details_url=details_url,
publish_date=publish_date)
new_artwork.save()
return new_artwork
def get_wikiart_image_url(soup):
# TODO: use a cleaner method :(
tmp = soup.find(class_='thumbnails_ref')['onclick']
thumb_html_url = re.search(r'(/en.+?)\'', tmp).group(1)
thumb_html_url = "http://www.wikiart.org%s" % thumb_html_url
result = urlfetch.fetch(thumb_html_url)
if result.status_code < 200 or result.status_code >= 300:
webapp2.abort(400, message='Error processing URL: HTTP %d. Content: %s'
% (result.status_code, result.content))
thumb_html = json.loads(result.content)
thumb_soup = BeautifulSoup(thumb_html, 'html.parser')
max_thumb_width = 0
max_thumb_url = None
for thumb_title_el in thumb_soup.select('.thumbnail_title'):
thumb_width = int(re.search(r'(\d+)x\d+', thumb_title_el.get_text()).group(1))
if thumb_width > max_thumb_width:
max_thumb_width = thumb_width
max_thumb_url = thumb_title_el.parent.find('a')['href']
return max_thumb_url
def maybe_process_image(image_url, crop_tuple, base_name):
if CLOUD_STORAGE_ROOT_URL in image_url and crop_tuple == NO_CROP_TUPLE:
return (image_url, None)
image_result = urlfetch.fetch(image_url, deadline=20)
if image_result.status_code < 200 or image_result.status_code >= 300:
raise IOError('Error downloading image: HTTP %d.' % image_result.status_code)
filename = re.sub(r'[^\w]+', '-', base_name.strip().lower()) + '.jpg'
# main image
image_gcs_path = CLOUD_STORAGE_BASE_PATH + '/fullres/' + filename
# resize to max width 4000 or max height 2000
image_contents = image_result.content
image = images.Image(image_contents)
edited = False
if image.height > 2000:
image.resize(width=(image.width * 2000 / image.height), height=2000)
edited = True
elif image.width > 4000:
image.resize(width=4000, height=(image.height * 4000 / image.width))
edited = True
if crop_tuple != NO_CROP_TUPLE:
image.crop(*crop_tuple)
edited = True
if edited:
image_contents = image.execute_transforms(output_encoding=images.JPEG, quality=80)
# upload with default ACLs set on the bucket # or use options={'x-goog-acl': 'public-read'})
gcs_file = gcs.open(image_gcs_path, 'w', content_type='image/jpeg')
gcs_file.write(image_contents)
gcs_file.close()
# thumb
thumb_gcs_path = CLOUD_STORAGE_BASE_PATH + '/thumbs/' + filename
thumb = images.Image(image_result.content)
thumb.resize(width=(thumb.width * THUMB_HEIGHT / thumb.height), height=THUMB_HEIGHT)
if crop_tuple != NO_CROP_TUPLE:
thumb.crop(*crop_tuple)
edited = True
thumb_contents = thumb.execute_transforms(output_encoding=images.JPEG, quality=40)
gcs_file = gcs.open(thumb_gcs_path, 'w', content_type='image/jpeg')
gcs_file.write(thumb_contents)
gcs_file.close()
return (CLOUD_STORAGE_ROOT_URL + image_gcs_path,
CLOUD_STORAGE_ROOT_URL + thumb_gcs_path)
|
apache-2.0
| 525,997,306,480,910,340
| 33.798883
| 98
| 0.67266
| false
| 3.227461
| false
| false
| false
|
realgam3/SubtitlesClient
|
SubtitlesClient.py
|
1
|
3702
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
########################################################
# Name: Subtitles Client
# Site: http://RealGame.co.il
__author__ = 'RealGame (Tomer Zait)'
__license__ = 'GPL v3'
__version__ = '1.0'
__email__ = 'realgam3@gmail.com'
########################################################
from os import path
from sys import argv
from docopt import docopt
from engines.engine import SubtitleSite, SUBTITLE_SITE_LIST, DEFAULTS
__doc__ = \
"""
Subtitles Client
Usage:
{prog} download <releases_path>... [--lang=<language> --engine=<subtitle_site>...]
{prog} exist <releases_path>... [--lang=<language> --engine=<subtitle_site>...]
{prog} test [<engines>...]
{prog} (-l | --list)
{prog} (-h | --help)
{prog} (-v | --version)
Options:
-l --list Show subtitles engine list.
-h --help Show this screen.
-v --version Show version.
--lang=<language> Subtitle language (alpha2) [default: {def_language}].
--engine=<subtitle_site> Subtitle site [default: {def_engine}].
""".format(prog=path.basename(argv[0]),
def_language=DEFAULTS['subtitle_language'],
def_engine=DEFAULTS['subtitle_engine'])
def download_subtitles(releases, engines=[DEFAULTS['subtitle_engine']], lang=DEFAULTS['subtitle_language']):
if releases:
for release in releases:
for engine in engines:
subtitle_release = SubtitleSite.get_file_properties(release)['release_name']
print "[{engine: ^15}] Trying To Download Subtitles For: '{release}'".format(engine=engine,
release=subtitle_release)
sub_obj = SubtitleSite.class_factory(engine)
subtitle_path = sub_obj.download_subtitle(release, lang)
if subtitle_path:
print "{0:17} Download Success: ({file_path}).\n".format("", file_path=subtitle_path)
else:
print "{0:17} Subtitles Not Found.\n".format("")
def is_subtitles_exist(releases, engines=[DEFAULTS['subtitle_engine']], lang=DEFAULTS['subtitle_language']):
if releases:
for release in releases:
for engine in engines:
subtitle_release = SubtitleSite.get_file_properties(release)['release_name']
sub_obj = SubtitleSite.class_factory(engine)
exist_flag = sub_obj.is_subtitle_exist(release, lang)
res = "Exist"
if not exist_flag:
res = "Does Not " + res
print "[{engine: ^15}] '{release}' - {res}.".format(engine=engine,
release=subtitle_release,
res=res)
def test_engines(engines):
if not engines:
engines = SUBTITLE_SITE_LIST.keys()
for engine_key in engines:
t = SubtitleSite.class_factory(engine_key)
t.test_engine()
def main():
args = docopt(__doc__, help=True, version='Subtitles Client %s' % __version__)
if args['download']:
download_subtitles(args['<releases_path>'], args['--engine'], args['--lang'])
elif args['exist']:
is_subtitles_exist(args['<releases_path>'], args['--engine'], args['--lang'])
elif args['test']:
test_engines(args['<engines>'])
elif args['--list']:
for sub_site in SUBTITLE_SITE_LIST.keys():
sub_dict = SUBTITLE_SITE_LIST.get(sub_site)
print sub_dict.get('class_name')
if __name__ == "__main__":
main()
|
gpl-3.0
| 1,652,222,305,396,797,700
| 36.77551
| 118
| 0.537007
| false
| 4.081588
| false
| false
| false
|
eReuse/DeviceHub
|
ereuse_devicehub/resources/account/settings.py
|
1
|
5490
|
from ereuse_devicehub.resources.account.role import Role
from ereuse_devicehub.resources.resource import ResourceSettings
from ereuse_devicehub.resources.schema import Thing
from ereuse_devicehub.security.perms import DB_PERMS
from ereuse_devicehub.validation.validation import ALLOWED_WRITE_ROLE
class Account(Thing):
"""
An account represents a physical person or an organization.
"""
email = {
'type': 'email',
'required': True,
'unique': True,
'sink': 5
}
password = {
'type': 'string',
# 'required': True, todo active OR password required
'minlength': 4,
'sink': 4,
'doc': 'Users can only see their own passwords.'
}
role = {
'type': 'string',
'allowed': set(Role.ROLES),
'default': Role.USER,
'doc': 'See the Roles section to get more info.',
ALLOWED_WRITE_ROLE: Role(Role.ADMIN)
}
token = {
'type': 'string',
'readonly': True,
}
name = {
'type': 'string',
'sink': 3,
'description': 'The name of an account, if it is a person or an organization.'
}
organization = {
'type': 'string',
'sink': 1,
'description': 'The name of the organization the account is in. Organizations can be inside others.'
}
active = {
'type': 'boolean',
'default': True,
'sink': -1,
'description': 'Activate the account so you can start using it.',
'doc': 'Inactive accounts cannot login, and they are created through regular events.'
}
blocked = {
'type': 'boolean',
'default': True,
'sink': -2,
'description': 'As a manager, you need to specifically accept the user by unblocking it\'s account.',
ALLOWED_WRITE_ROLE: Role(Role.ADMIN)
}
isOrganization = {
'type': 'boolean',
'sink': 2
}
databases = { # todo make admin worthy
'type': 'dict',
'valueschema': {
'type': 'string',
'allowed': list(DB_PERMS)
},
'required': True,
ALLOWED_WRITE_ROLE: Role(Role.ADMIN),
'teaser': False,
'sink': -4,
}
defaultDatabase = {
'type': 'string', # todo If this is not set, the first databased in 'databases' it should be used
ALLOWED_WRITE_ROLE: Role(Role.ADMIN),
'teaser': False,
'sink': -5
}
shared = {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'db': {
'type': 'string'
},
'@type': {
'type': 'string'
},
'label': {
'type': 'string'
},
'_id': {
'type': 'string'
},
'baseUrl': {
'type': 'url',
'doc': 'The scheme, domain, any path to reach the DeviceHub.'
}
}
},
'default': [],
'materialized': True,
'description': 'The groups (eg: lots, packages...) other people shared to this account.'
}
fingerprints = {
'type': 'list',
'readonly': True,
}
publicKey = {
'type': 'string',
'writeonly': True
}
class AccountSettings(ResourceSettings):
resource_methods = ['GET', 'POST']
item_methods = ['PATCH', 'DELETE', 'GET']
# the standard account entry point is defined as
# '/accounts/<ObjectId>'. We define an additional read-only entry
# point accessible at '/accounts/<username>'.
# Note that this regex is weak; it will accept more string that are not emails, which is fine; it is fast.
additional_lookup = {
'url': 'regex("[^@]+@[^@]+\.[^@]+")',
'field': 'email',
}
# 'public_methods': ['POST'], # Everyone can create an account, which will be blocked (not active)
datasource = {
'projection': {'token': 0}, # We exclude from showing tokens to everyone
'source': 'accounts'
}
# We also disable endpoint caching as we don't want client apps to
# cache account data.
cache_control = ''
cache_expires = 0
# Allow 'token' to be returned with POST responses
extra_response_fields = ResourceSettings.extra_response_fields + ['email', 'active', 'name',
'databases', 'defaultDatabase', 'organization',
'isOrganization']
# Finally, let's add the schema definition for this endpoint.
_schema = Account
allowed_write_roles = {Role.ADMIN} # Only admins or above can POST, PUT or DELETE
use_default_database = True # We have a common shared database with accounts
fa = 'fa-user-o'
unregistered_user = {
'email': Account.email,
'name': Account.name,
'organization': Account.organization,
'isOrganization': Account.isOrganization
}
unregistered_user_doc = 'It can be a reference to an account, or a basic account object. ' \
+ 'The object has to contain at least an e-mail. If the e-mail does ' \
+ 'not match to an existing one, an account is created. If the e-mail exists, ' \
+ 'that account is used, and the rest of the data (name, org...) is discarded.'
|
agpl-3.0
| 5,062,361,657,323,126,000
| 32.680982
| 117
| 0.532058
| false
| 4.184451
| false
| false
| false
|
codedsk/hubcheck
|
hubcheck/pageobjects/widgets/groups_wiki_edit_form.py
|
1
|
3453
|
from hubcheck.pageobjects.widgets.groups_wiki_new_form import \
GroupsWikiNewForm1, GroupsWikiNewForm1_Locators_Base, \
GroupsWikiNewForm2, GroupsWikiNewForm2_Locators_Base, \
GroupsWikiNewForm3, GroupsWikiNewForm3_Locators_Base
from hubcheck.pageobjects.basepageelement import Link
class GroupsWikiEditForm1(GroupsWikiNewForm1):
"""
GroupsWikiNewForm with TextArea widget for pagetext
"""
def __init__(self, owner, locatordict={}):
super(GroupsWikiEditForm1,self).__init__(owner,locatordict)
# load hub's classes
GroupsWikiEditForm_Locators = self.load_class('GroupsWikiEditForm_Locators')
# update this object's locator
self.locators.update(GroupsWikiEditForm_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.rename = Link(self,{'base':'rename'})
# update the component's locators with this objects overrides
self._updateLocators()
def goto_rename(self):
"""click the rename link"""
self.rename.click()
class GroupsWikiEditForm1_Locators_Base(object):
"""locators for GroupsWikiEditForm1 object"""
locators = {
'rename' : "xpath=//a[text()='here']",
}
class GroupsWikiEditForm2(GroupsWikiNewForm2):
"""
GroupsWikiEditForm that uses an IframeWrap widget for pagetext
"""
def __init__(self, owner, locatordict={}):
super(GroupsWikiEditForm2,self).__init__(owner,locatordict)
# load hub's classes
GroupsWikiEditForm_Locators = self.load_class('GroupsWikiEditForm_Locators')
# update this object's locator
self.locators.update(GroupsWikiEditForm_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.rename = Link(self,{'base':'rename'})
# update the component's locators with this objects overrides
self._updateLocators()
def goto_rename(self):
"""click the rename link"""
self.rename.click()
class GroupsWikiEditForm2_Locators_Base(object):
"""locators for GroupsWikiEditForm2 object"""
locators = {
'rename' : "xpath=//a[text()='here']",
}
class GroupsWikiEditForm3(GroupsWikiNewForm3):
"""GroupsWikiEditForm
TextArea widget for pagetext
Upload3 file upload widget with embedded iframes
"""
def __init__(self, owner, locatordict={}):
super(GroupsWikiEditForm3,self).__init__(owner,locatordict)
# load hub's classes
GroupsWikiEditForm_Locators = self.load_class('GroupsWikiEditForm_Locators')
# update this object's locator
self.locators.update(GroupsWikiEditForm_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.rename = Link(self,{'base':'rename'})
# update the component's locators with this objects overrides
self._updateLocators()
def goto_rename(self):
"""click the rename link"""
self.rename.click()
class GroupsWikiEditForm3_Locators_Base(object):
"""locators for GroupsWikiEditForm3 object"""
locators = {
'rename' : "xpath=//a[text()='here']",
}
|
mit
| 2,063,670,874,993,383,400
| 27.073171
| 84
| 0.660875
| false
| 4.015116
| false
| false
| false
|
couchbaselabs/devguide-examples
|
python/cas.py
|
1
|
1612
|
from __future__ import print_function
from couchbase.bucket import Bucket
from couchbase.bucket import LOCKMODE_WAIT
from threading import Thread
from couchbase.exceptions import KeyExistsError
cb = Bucket('couchbase://10.0.0.31/default', lockmode=LOCKMODE_WAIT)
cb.upsert('a_list', [])
print('Will attempt concurrent document mutations without CAS')
def add_item_to_list(client, new_item):
l = client.get('a_list').value
l.append(new_item)
client.replace('a_list', l)
threads = [Thread(target=add_item_to_list, args=(cb, "item_" + str(x)))
for x in range(0, 10)]
[t.start() for t in threads]
[t.join() for t in threads]
cur_list = cb.get('a_list').value
print('Current list has {0} elements'.format(len(cur_list)))
if len(cur_list) != 10:
print('Concurrent modifications removed some of our items!', cur_list)
# The same as above, but using CAS
def add_item_to_list_safe(client, new_item):
while True:
rv = client.get('a_list')
l = rv.value
l.append(new_item)
try:
cb.replace('a_list', l, cas=rv.cas)
return
except KeyExistsError:
print("Cas mismatch for item", new_item)
continue
# Reset the list again
cb.upsert('a_list', [])
print('Will attempt concurrent modifications using CAS')
threads = [Thread(target=add_item_to_list_safe, args=(cb, "item_" + str(x)))
for x in range(0, 10)]
[t.start() for t in threads]
[t.join() for t in threads]
cur_list = cb.get('a_list').value
print('Current list has {0} elements'.format(len(cur_list)))
assert len(cur_list) == 10
|
apache-2.0
| 2,503,954,480,258,937,300
| 26.322034
| 76
| 0.653226
| false
| 3.192079
| false
| false
| false
|
tanonev/codewebs
|
src/dataBaseTools/PrecomputeNN.py
|
1
|
3695
|
import sys
import os.path
sys.path.append(os.path.abspath('../../'))
sys.path.append(os.path.abspath('../../site/cwsite'))
import src.util.DBSetup
from src.util.MLClass import MLClass
from src.util.FileSystem import FileSystem
from src.util.AstNetwork import AstNetwork
from src.util.Assignment import Assignment
from models.models import Octave
from operator import itemgetter
import logging
from sets import Set
class PrecomputeNN(object):
def getASTs(self, assn, label):
dataDir = FileSystem.getDataDir()
outputDir = os.path.join(dataDir, 'incorrects')
fileName = label + '_' + str(assn) + '.txt'
path = os.path.join(outputDir, fileName)
astList = []
astFile = open(path)
for line in astFile.readlines():
astList.append(int(line))
return Set(astList)
def getAllParts(self):
return [(4,1), (4,2), (4,3), (4,4), (4,5)]
def getNN(self, corrects, incorrects, astNetwork):
NNmap = {}
numASTs = len(corrects) + len(incorrects)
row = 0
astNetwork.matrixFile.seek(0)
while(True):
if row % 100 == 0:
logging.info(str(row) + ' of ' + str(numASTs))
line = astNetwork.matrixFile.readline()
if not line: break
rowValues = map(int, line.strip().split())
for col in range(row+1, len(rowValues)):
value = rowValues[col]
if value == -1:
continue
if col in corrects:
try:
if value < NNmap[row][1]:
NNmap[row] = (col, value)
except KeyError:
NNmap[row] = (col, value)
if row in corrects:
try:
if value < NNmap[col][1]:
NNmap[col] = (row, value)
except KeyError:
NNmap[col] = (row, value)
row += 1
return NNmap
def writeNN(self, path, NNmap):
fid = open(path,'wt')
NNmaptuples = sorted(NNmap.iteritems(), key = itemgetter(0))
for t in NNmaptuples:
fid.write(str(t[0]) + ', ' + str(t[1][0]) + ', ' + str(t[1][1]) + '\n')
fid.close()
def initializeLog(self):
logDir = os.path.join(FileSystem.getLogDir(),'PrecomputeNN')
if not os.path.exists(logDir):
os.makedirs(logDir)
logFileName = os.path.join(logDir,'log')
logging.basicConfig(filename = logFileName, format = '%(asctime)s %(message)s', \
datefmt = '%m/%d/%Y %I:%M:%S %p', level = logging.INFO)
def run(self):
self.initializeLog()
for (h,p) in self.getAllParts():
assn = Assignment(h,p)
logging.info('PrecomputeNN (hw,part): ' + str(assn))
corrects = self.getASTs(assn, 'corrects')
incorrects = self.getASTs(assn, 'incorrects')
distanceMatrix = FileSystem.loadDistanceMatrix(assn.getTuple(),False)
subIdMap = FileSystem.loadSubmissionIdMap(assn.getTuple())
astNetwork = AstNetwork(assn.getTuple(), distanceMatrix, subIdMap)
NNmap = self.getNN(corrects, incorrects, astNetwork)
outputDir = os.path.join(FileSystem.getDataDir(), 'nearestNeighbors')
if not os.path.exists(outputDir):
os.makedirs(outputDir)
outputPath = os.path.join(outputDir, 'NNmap_' + str(assn) + '.txt')
self.writeNN(outputPath, NNmap)
if __name__ == '__main__':
PrecomputeNN().run()
|
mit
| -6,443,573,087,008,330,000
| 35.584158
| 89
| 0.540731
| false
| 3.724798
| false
| false
| false
|
zandao/stn3270
|
stn3270/field.py
|
1
|
1639
|
# -*- coding: utf-8 -*-
"""
******************
Super TN3270 Field
******************
Super TN3270 Field - stn3270.field - implements the field
manipulation on a 3270 virtual screen: read, write, verify
and find fields.
"""
class Field:
"""It's a representation of a 3270 field, with a *start of field* sequence,
its position (*row* and *column*), raw *text* and its ASCII *data* representation
:param start_of_field: a 3270 SF sequence
:param row: starting row of the field
:param col: starting column of the field
:param text: raw text of the field
:param filler: ASCII character used to fill empty editable field
:type start_of_field: string
:type row: int
:type col: int
:type text: string
:type filler: string
"""
def __init__(self, start_of_field, row=None, col=None, text="", filler="_"):
self.filler = filler
self.start_of_field = self._SF(start_of_field)
self.row = row
self.col = col
self.set_text(text)
self.is_visible = ("c0=cd" not in start_of_field)
self.is_editable = False
for sf in self.start_of_field:
self.is_editable = self.is_editable or sf in ("c0=c1","c0=cd")
def set_text(self, text):
"""Sets the text of the field and the filtered data (text without filler characters)
:param text: raw text of field
:type text: string
"""
self.text = text
self.length = len(text)
self.data = text.replace(self.filler," ").rstrip()
return self.data
def _SF(self, char):
return char[3:-1].split(',')
|
lgpl-2.1
| -6,518,581,165,051,224,000
| 31.78
| 92
| 0.597315
| false
| 3.602198
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.